/* * arch/powerpc/kernel/mpic.c * * Driver for interrupt controllers following the OpenPIC standard, the * common implementation being IBM's MPIC. This driver also can deal * with various broken implementations of this HW. * * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * Copyright 2010-2012 Freescale Semiconductor, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details.
*/
/* Check if we have one of those nice broken MPICs with a flipped endian on * reads from IPI registers
*/ staticvoid __init mpic_test_broken_ipi(struct mpic *mpic)
{
u32 r;
mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
/* Test if an interrupt is sourced from HyperTransport (used on broken U3s) * to force the edge setting on the MPIC and do the ack workaround.
*/ staticinlineint mpic_is_ht_interrupt(struct mpic *mpic, unsignedint source)
{ if (source >= 128 || !mpic->fixups) return 0; return mpic->fixups[source].base != NULL;
}
#ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW,
* set if this fixup was enabled, clear otherwise */
mpic->save_data[source].fixup_data = tmp | 1; #endif
}
#ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW,
* set if this fixup was enabled, clear otherwise */
mpic->save_data[source].fixup_data = tmp & ~1; #endif
}
/* Map U3 config space. We assume all IO-APICs are on the primary bus * so we only need to map 64kB.
*/
cfgspace = ioremap(0xf2000000, 0x10000);
BUG_ON(cfgspace == NULL);
/* Now we scan all slots. We do a very quick scan, we read the header * type, vendor ID and device ID only, that's plenty enough
*/ for (devfn = 0; devfn < 0x100; devfn++) {
u8 __iomem *devbase = cfgspace + (devfn << 8);
u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
u32 l = readl(devbase + PCI_VENDOR_ID);
u16 s;
DBG("devfn %x, l: %x\n", devfn, l);
/* If no device, skip */ if (l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000) goto next; /* Check if is supports capability lists */
s = readw(devbase + PCI_STATUS); if (!(s & PCI_STATUS_CAP_LIST)) goto next;
/* Find an mpic associated with a given linux interrupt */ staticstruct mpic *mpic_find(unsignedint irq)
{ if (irq < NR_IRQS_LEGACY) return NULL;
return irq_get_chip_data(irq);
}
/* Determine if the linux irq is an IPI */ staticunsignedint mpic_is_ipi(struct mpic *mpic, unsignedint src)
{ return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
}
/* Determine if the linux irq is a timer */ staticunsignedint mpic_is_tm(struct mpic *mpic, unsignedint src)
{ return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
}
/* Convert a cpu mask from logical to physical cpu numbers. */ staticinline u32 mpic_physmask(u32 cpumask)
{ int i;
u32 mask = 0;
for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask;
}
#ifdef CONFIG_SMP /* Get the mpic structure from the IPI number */ staticinlinestruct mpic * mpic_from_ipi(struct irq_data *d)
{ return irq_data_get_irq_chip_data(d);
} #endif
/* Get the mpic structure from the irq number */ staticinlinestruct mpic * mpic_from_irq(unsignedint irq)
{ return irq_get_chip_data(irq);
}
/* Get the mpic structure from the irq data */ staticinlinestruct mpic * mpic_from_irq_data(struct irq_data *d)
{ return irq_data_get_irq_chip_data(d);
}
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
~MPIC_VECPRI_MASK); /* make sure mask gets to controller before we return to user */ do { if (!loops--) {
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src); break;
}
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
}
/* make sure mask gets to controller before we return to user */ do { if (!loops--) {
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src); break;
}
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
}
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, d->irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway
*/
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, d->irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway
*/
if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
} #endif/* !CONFIG_MPIC_U3_HT_IRQS */
/* * IPIs are marked IRQ_PER_CPU. This has the side effect of * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from * applying to them. We EOI them late to avoid re-entering.
*/
mpic_eoi(mpic);
}
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
/* If the MPIC was reset, then all vectors have already been * initialized. Otherwise, a per source lazy initialization * is done here.
*/ if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { int cpu;
preempt_disable();
cpu = mpic_processor_id(mpic);
preempt_enable();
*out_hwirq = intspec[0]; if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { /* * Freescale MPIC with extended intspec: * First two cells are as usual. Third specifies * an "interrupt type". Fourth is type-specific data. * * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
*/ switch (intspec[2]) { case 0: break; case 1: if (!(mpic->flags & MPIC_FSL_HAS_EIMR)) break;
if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs)) return -EINVAL;
*out_hwirq = mpic->err_int_vecs[intspec[3]];
break; case 2: if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) return -EINVAL;
*out_hwirq = mpic->ipi_vecs[intspec[0]]; break; case 3: if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) return -EINVAL;
/* Apple invented a new race of encoding on machines with * an HT APIC. They encode, among others, the index within * the HT APIC. We don't care about it here since thankfully, * it appears that they have the APIC already properly * configured, and thus our current fixup code that reads the * APIC config works fine. However, we still need to mask out * bits in the specifier to make sure we only get bit 0 which * is the level/edge bit (the only sense bit exposed by Apple), * as their bit 1 means something else.
*/ if (machine_is(powermac))
mask = 0x1;
*out_flags = map_mpic_senses[intspec[1] & mask];
} else
*out_flags = IRQ_TYPE_NONE;
DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);
return 0;
}
/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ staticvoid mpic_cascade(struct irq_desc *desc)
{ struct irq_chip *chip = irq_desc_get_chip(desc); struct mpic *mpic = irq_desc_get_handler_data(desc); unsignedint virq;
BUG_ON(!(mpic->flags & MPIC_SECONDARY));
virq = mpic_get_one_irq(mpic); if (virq)
generic_handle_irq(virq);
/* * If we were not passed a device-tree node, then perform the default * search for standardized a standardized OpenPIC.
*/ if (node) {
node = of_node_get(node);
} else {
node = of_find_matching_node(NULL, mpic_device_id); if (!node) return NULL;
}
/* Pick the physical address from the device tree if unspecified */ if (!phys_addr) { /* Check if it is DCR-based */ if (of_property_read_bool(node, "dcr-reg")) {
flags |= MPIC_USES_DCR;
} else { struct resource r; if (of_address_to_resource(node, 0, &r)) goto err_of_node_put;
phys_addr = r.start;
}
}
/* Read extra device-tree properties into the flags variable */ if (of_property_read_bool(node, "big-endian"))
flags |= MPIC_BIG_ENDIAN; if (of_property_read_bool(node, "pic-no-reset"))
flags |= MPIC_NO_RESET; if (of_property_read_bool(node, "single-cpu-affinity"))
flags |= MPIC_SINGLE_DEST_CPU; if (of_device_is_compatible(node, "fsl,mpic")) {
flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
mpic_irq_chip.flags |= IRQCHIP_SKIP_SET_WAKE;
mpic_tm_chip.flags |= IRQCHIP_SKIP_SET_WAKE;
}
/* Look for protected sources */
psrc = of_get_property(mpic->node, "protected-sources", &psize); if (psrc) { /* Allocate a bitmap with one bit per interrupt */
mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL);
BUG_ON(mpic->protected == NULL); for (i = 0; i < psize/sizeof(u32); i++) { if (psrc[i] > intvec_top) continue;
__set_bit(psrc[i], mpic->protected);
}
}
/* default register type */ if (mpic->flags & MPIC_BIG_ENDIAN)
mpic->reg_type = mpic_access_mmio_be; else
mpic->reg_type = mpic_access_mmio_le;
/* * An MPIC with a "dcr-reg" property must be accessed that way, but * only if the kernel includes DCR support.
*/ #ifdef CONFIG_PPC_DCR if (mpic->flags & MPIC_USES_DCR)
mpic->reg_type = mpic_access_dcr; #else
BUG_ON(mpic->flags & MPIC_USES_DCR); #endif
/* Map the global registers */
mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
if (mpic->flags & MPIC_FSL) { int ret;
/* * Yes, Freescale really did put global registers in the * magic per-cpu area -- and they don't even show up in the * non-magic per-cpu copies that this driver normally uses.
*/
mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
MPIC_CPU_THISBASE, 0x1000);
fsl_version = fsl_mpic_get_version(mpic);
/* Error interrupt mask register (EIMR) is required for * handling individual device error interrupts. EIMR * was added in MPIC version 4.1. * * Over here we reserve vector number space for error * interrupt vectors. This space is stolen from the * global vector number space, as in case of ipis * and timer interrupts. * * Available vector space = intvec_top - 13, where 13 * is the number of vectors which have been consumed by * ipis, timer interrupts and spurious.
*/ if (fsl_version >= 0x401) {
ret = mpic_setup_error_int(mpic, intvec_top - 13); if (ret) return NULL;
}
}
/* * EPR is only available starting with v4.0. To support * platforms that don't know the MPIC version at compile-time, * such as qemu-e500, turn off coreint if this MPIC doesn't * support it. Note that we never enable it if it wasn't * requested in the first place. * * This is done outside the MPIC_FSL check, so that we * also disable coreint if the MPIC node doesn't have * an "fsl,mpic" compatible at all. This will be the case * with device trees generated by older versions of QEMU. * fsl_version will be zero if MPIC_FSL is not set.
*/ if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT))
ppc_md.get_irq = mpic_get_irq;
/* Reset */
/* When using a device-node, reset requests are only honored if the MPIC * is allowed to reset.
*/ if (!(mpic->flags & MPIC_NO_RESET)) {
printk(KERN_DEBUG "mpic: Resetting\n");
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_RESET); while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
& MPIC_GREG_GCONF_RESET)
mb();
}
if (mpic->flags & MPIC_ENABLE_MCK)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_MCK);
/* * The MPIC driver will crash if there are more cores than we * can initialize, so we may as well catch that problem here.
*/
BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
/* Map the per-CPU registers */
for_each_possible_cpu(i) { unsignedint cpu = get_hard_smp_processor_id(i);
mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
0x1000);
}
/* * Read feature register. For non-ISU MPICs, num sources as well. On * ISU MPICs, sources are counted as ISUs are added
*/
greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
/* * By default, the last source number comes from the MPIC, but the * device-tree and board support code can override it on buggy hw. * If we get passed an isu_size (multi-isu MPIC) then we use that * as a default instead of the value read from the HW.
*/
last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
>> MPIC_GREG_FEATURE_LAST_SRC_SHIFT; if (isu_size)
last_irq = isu_size * MPIC_MAX_ISU - 1;
of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq); if (irq_count)
last_irq = irq_count - 1;
/* Initialize main ISU if none provided */ if (!isu_size) {
isu_size = last_irq + 1;
mpic->num_sources = isu_size;
mpic_map(mpic, mpic->paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE),
MPIC_INFO(IRQ_STRIDE) * isu_size);
}
/* * FIXME: The code leaks the MPIC object and mappings here; this * is very unlikely to fail but it ought to be fixed anyways.
*/ if (mpic->irqhost == NULL) return NULL;
/* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1:
vers = "1.0"; break; case 2:
vers = "1.2"; break; case 3:
vers = "1.3"; break; default:
vers = ""; break;
}
printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," " max %d CPUs\n",
name, vers, (unsignedlonglong)mpic->paddr, num_possible_cpus());
printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
mpic->next = mpics;
mpics = mpic;
if (!(mpic->flags & MPIC_SECONDARY)) {
mpic_primary = mpic;
irq_set_default_domain(mpic->irqhost);
}
void __init mpic_init(struct mpic *mpic)
{ int i, cpu; int num_timers = 4;
BUG_ON(mpic->num_sources == 0);
printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
if (mpic->flags & MPIC_FSL) {
u32 version = fsl_mpic_get_version(mpic);
/* * Timer group B is present at the latest in MPIC 3.1 (e.g. * mpc8536). It is not present in MPIC 2.0 (e.g. mpc8544). * I don't know about the status of intermediate versions (or * whether they even exist).
*/ if (version >= 0x0301)
num_timers = 8;
}
/* Initialize timers to our reserved vectors and mask them for now */ for (i = 0; i < num_timers; i++) { unsignedint offset = mpic_tm_offset(mpic, i);
/* Initialize IPIs to our reserved vectors and mark them disabled for now */
mpic_test_broken_ipi(mpic); for (i = 0; i < 4; i++) {
mpic_ipi_write(i,
MPIC_VECPRI_MASK |
(10 << MPIC_VECPRI_PRIORITY_SHIFT) |
(mpic->ipi_vecs[0] + i));
}
/* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags); if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
mpic_scan_ht_pics(mpic);
mpic_u3msi_init(mpic);
}
mpic_pasemi_msi_init(mpic);
cpu = mpic_processor_id(mpic);
if (!(mpic->flags & MPIC_NO_RESET)) { for (i = 0; i < mpic->num_sources; i++) { /* start with vector = source number, and masked */
u32 vecpri = MPIC_VECPRI_MASK | i |
(8 << MPIC_VECPRI_PRIORITY_SHIFT);
/* Disable 8259 passthrough, if supported */ if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_8259_PTHROU_DIS);
if (mpic->flags & MPIC_NO_BIAS)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_NO_BIAS);
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
#ifdef CONFIG_PM /* allocate memory to save mpic state */
mpic->save_data = kmalloc_array(mpic->num_sources, sizeof(*mpic->save_data),
GFP_KERNEL);
BUG_ON(mpic->save_data == NULL); #endif
/* Check if this MPIC is chained from a parent interrupt controller */ if (mpic->flags & MPIC_SECONDARY) { int virq = irq_of_parse_and_map(mpic->node, 0); if (virq) {
printk(KERN_INFO "%pOF: hooking up to IRQ %d\n",
mpic->node, virq);
irq_set_handler_data(virq, mpic);
irq_set_chained_handler(virq, &mpic_cascade);
}
}
/* let the mpic know we want intrs. default affinity is 0xffffffff * until changed via /proc. That's how it's done on x86. If we want * it differently, then we should make sure we also change the default * values of irq_desc[].affinity in irq.c.
*/ if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) { for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
}
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
/* let the mpic know we don't want intrs. */ for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); /* We need to EOI the IPI since not all platforms reset the MPIC * on boot and new interrupts wouldn't get delivered otherwise.
*/
mpic_eoi(mpic);
raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
staticunsignedint _mpic_get_one_irq(struct mpic *mpic, int reg)
{
u32 src;
void mpic_reset_core(int cpu)
{ struct mpic *mpic = mpic_primary;
u32 pir; int cpuid = get_hard_smp_processor_id(cpu); int i;
/* Set target bit for core reset */
pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
pir |= (1 << cpuid);
mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
/* Restore target bit after reset complete */
pir &= ~(1 << cpuid);
mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
/* Perform 15 EOI on each reset core to clear pending interrupts.
* This is required for FSL CoreNet based devices */ if (mpic->flags & MPIC_FSL) { for (i = 0; i < 15; i++) {
_mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid],
MPIC_CPU_EOI, 0);
}
}
} #endif/* CONFIG_SMP */
#ifdef CONFIG_PM staticvoid mpic_suspend_one(struct mpic *mpic)
{ int i;
for (i = 0; i < mpic->num_sources; i++) {
mpic->save_data[i].vecprio =
mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
mpic->save_data[i].dest =
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
}
}
while (mpic) {
mpic_suspend_one(mpic);
mpic = mpic->next;
}
return 0;
}
staticvoid mpic_resume_one(struct mpic *mpic)
{ int i;
for (i = 0; i < mpic->num_sources; i++) {
mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
mpic->save_data[i].vecprio);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic->save_data[i].dest);
#ifdef CONFIG_MPIC_U3_HT_IRQS if (mpic->fixups) { struct mpic_irq_fixup *fixup = &mpic->fixups[i];
if (fixup->base) { /* we use the lowest bit in an inverted meaning */ if ((mpic->save_data[i].fixup_data & 1) == 0) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
SSL
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.