// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright The Asahi Linux Contributors * * Based on irq-lpc32xx: * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com> * Based on irq-bcm2836: * Copyright 2015 Broadcom
*/
/* * AIC is a fairly simple interrupt controller with the following features: * * - 896 level-triggered hardware IRQs * - Single mask bit per IRQ * - Per-IRQ affinity setting * - Automatic masking on event delivery (auto-ack) * - Software triggering (ORed with hw line) * - 2 per-CPU IPIs (meant as "self" and "other", but they are * interchangeable if not symmetric) * - Automatic prioritization (single event/ack register per CPU, lower IRQs = * higher priority) * - Automatic masking on ack * - Default "this CPU" register view and explicit per-CPU views * * In addition, this driver also handles FIQs, as these are routed to the same * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and * performance counters (TODO). * * Implementation notes: * * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs, * and one for IPIs. * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused). * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu. * - DT bindings use 3-cell form (like GIC): * - <0 nr flags> - hwirq #nr * - <1 nr flags> - FIQ #nr * - nr=0 Physical HV timer * - nr=1 Virtual HV timer * - nr=2 Physical guest timer * - nr=3 Virtual guest timer
*/
/* * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: * * Repeat for each die: * IRQ_CFG: u32 * MAX_IRQS * SW_SET: u32 * (MAX_IRQS / 32) * SW_CLR: u32 * (MAX_IRQS / 32) * MASK_SET: u32 * (MAX_IRQS / 32) * MASK_CLR: u32 * (MAX_IRQS / 32) * HW_STATE: u32 * (MAX_IRQS / 32) * * This is followed by a set of event registers, each 16K page aligned. * The first one is the AP event register we will use. Unfortunately, * the actual implemented die count is not specified anywhere in the * capability registers, so we have to explicitly specify the event * register as a second reg entry in the device tree to remain * forward-compatible.
*/
/* * FIQ hwirq index definitions: FIQ sources use the DT binding defines * directly, except that timers are special. At the irqchip level, the * two timer types are represented by their access method: _EL0 registers * or _EL02 registers. In the DT binding, the timers are represented * by their purpose (HV or guest). This mapping is for when the kernel is * running at EL2 (with VHE). When the kernel is running at EL1, the * mapping differs and aic_irq_domain_translate() performs the remapping.
*/ enum fiq_hwirq { /* Must be ordered as in apple-aic.h */
AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS,
AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT,
AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS,
AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT,
AIC_CPU_PMU_Effi = AIC_CPU_PMU_E,
AIC_CPU_PMU_Perf = AIC_CPU_PMU_P, /* No need for this to be discovered from DT */
AIC_VGIC_MI,
AIC_NR_FIQ
};
/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */ static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); /* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */ static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
u32 irq = AIC_HWIRQ_IRQ(hwirq);
aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
}
staticvoid aic_irq_eoi(struct irq_data *d)
{ /* * Reading the interrupt reason automatically acknowledges and masks * the IRQ, so we just unmask it here if needed.
*/ if (!irqd_irq_masked(d))
aic_irq_unmask(d);
}
do { /* * We cannot use a relaxed read here, as reads from DMA buffers * need to be ordered after the IRQ fires.
*/
event = readl(ic->event + ic->info.event);
type = FIELD_GET(AIC_EVENT_TYPE, event);
irq = FIELD_GET(AIC_EVENT_NUM, event);
/* * vGIC maintenance interrupts end up here too, so we need to check * for them separately. It should however only trigger when NV is * in use, and be cleared when coming back from the handler.
*/ if (is_kernel_in_hyp_mode() &&
(read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2))) {
pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
}
staticint aic_irq_set_type(struct irq_data *d, unsignedint type)
{ /* * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't * have a way to find out the type of any given IRQ, so just allow both.
*/ return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
}
staticvoid aic_fiq_eoi(struct irq_data *d)
{ /* We mask to ack (where we can), so we need to unmask at EOI. */ if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
aic_fiq_clear_mask(d);
}
staticvoid __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
{ /* * It would be really nice if we had a system register that lets us get * the FIQ source state without having to peek down into sources... * but such a register does not seem to exist. * * So, we have these potential sources to test for: * - Fast IPIs (not yet used) * - The 4 timers (CNTP, CNTV for each of HV and guest) * - Per-core PMCs (not yet supported) * - Per-cluster uncore PMCs (not yet supported) * * Since not dealing with any of these results in a FIQ storm, * we check for everything here, even things we don't support yet.
*/
if (static_branch_likely(&use_fast_ipi) &&
(read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
aic_handle_ipi(regs);
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
(FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) { int irq; if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
irq = AIC_CPU_PMU_P; else
irq = AIC_CPU_PMU_E;
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(irq));
}
if (static_branch_likely(&use_fast_ipi) &&
(FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
(read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { /* Same story with uncore PMCs */
pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
}
}
if (fwspec->param_count == 4) {
die = args[0];
args++;
}
switch (fwspec->param[0]) { case AIC_IRQ: if (die >= ic->nr_die) return -EINVAL; if (args[0] >= ic->nr_irq) return -EINVAL;
*hwirq = AIC_IRQ_HWIRQ(die, args[0]); break; case AIC_FIQ: if (die != 0) return -EINVAL; if (args[0] >= AIC_NR_FIQ) return -EINVAL;
*hwirq = AIC_FIQ_HWIRQ(args[0]);
/* * In EL1 the non-redirected registers are the guest's, * not EL2's, so remap the hwirqs to match.
*/ if (!is_kernel_in_hyp_mode()) { switch (args[0]) { case AIC_TMR_GUEST_PHYS:
*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS); break; case AIC_TMR_GUEST_VIRT:
*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT); break; case AIC_TMR_HV_PHYS: case AIC_TMR_HV_VIRT: return -ENOENT; default: break;
}
} break; default: return -EINVAL;
}
*type = args[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
staticint aic_irq_domain_alloc(struct irq_domain *domain, unsignedint virq, unsignedint nr_irqs, void *arg)
{ unsignedint type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = arg;
irq_hw_number_t hwirq; int i, ret;
ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type); if (ret) return ret;
for (i = 0; i < nr_irqs; i++) {
ret = aic_irq_domain_map(domain, virq + i, hwirq + i); if (ret) return ret;
}
staticvoid aic_handle_ipi(struct pt_regs *regs)
{ /* * Ack the IPI. We need to order this after the AIC event read, but * that is enforced by normal MMIO ordering guarantees. * * For the Fast IPI case, this needs to be ordered before the vIPI * handling below, so we need to isb();
*/ if (static_branch_likely(&use_fast_ipi)) {
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
isb();
} else {
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
}
ipi_mux_process();
/* * No ordering needed here; at worst this just changes the timing of * when the next IPI will be delivered.
*/ if (!static_branch_likely(&use_fast_ipi))
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
if (aic_irqc->info.version == 1) { /* * Make sure the kernel's idea of logical CPU order is the same as AIC's * If we ever end up with a mismatch here, we will have to introduce * a mapping table similar to what other irqchip drivers do.
*/
WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
/* * Always keep IPIs unmasked at the hardware level (except auto-masking * by AIC during processing). We manage masks at the vIPI level. * These registers only exist on AICv1, AICv2 always uses fast IPIs.
*/
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); if (static_branch_likely(&use_fast_ipi)) {
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
} else {
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
}
/* Initialize the local mask state */
__this_cpu_write(aic_fiq_unmasked, 0);
off = 0; for (die = 0; die < irqc->nr_die; die++) { for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX); for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX); if (irqc->info.target_cpu) for (i = 0; i < irqc->nr_irq; i++)
aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
off += irqc->info.die_stride;
}
if (irqc->info.version == 2) {
u32 config = aic_ic_read(irqc, AIC2_CONFIG);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.14Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.