/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2016 Cavium, Inc.
*/
/* * The 8 most significant bits of the intsn identify the interrupt major block. * Each major block might use its own interrupt domain. Thus 256 domains are * needed.
*/ #define MAX_CIU3_DOMAINS 256
/* Information for each ciu3 in the system */ struct octeon_ciu3_info {
u64 ciu3_addr; int node; struct irq_domain *domain[MAX_CIU3_DOMAINS];
octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
};
/* Each ciu3 in the system uses its own data (one ciu3 per node) */ staticstruct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
struct octeon_irq_ciu_domain_data { int num_sum; /* number of sum registers (2 or 3). */
};
static __read_mostly int octeon_irq_ciu_to_irq[8][64];
struct octeon_ciu_chip_data { union { struct { /* only used for ciu3 */
u64 ciu3_addr; unsignedint intsn;
}; struct { /* only used for ciu/ciu2 */
u8 line;
u8 bit;
};
}; int gpio_line; int current_cpu; /* Next CPU expected to take this irq */ int ciu_node; /* NUMA node number of the CIU */
};
/* * We don't need to disable IRQs to make these atomic since * they are already disabled earlier in the low level * interrupt code.
*/
clear_c0_status(0x100 << bit); /* The two user interrupts must be cleared manually. */ if (bit < 2)
clear_c0_cause(0x100 << bit);
}
/* * We don't need to disable IRQs to make these atomic since * they are already disabled earlier in the low level * interrupt code.
*/
set_c0_status(0x100 << cd->bit);
}
raw_spin_lock_irqsave(lock, flags);
__set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq.
*/
wmb(); if (cd->line == 0)
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
}
/* * Enable the irq on the next core in the affinity set for chips that * have the EN*_W1{S,C} registers.
*/ staticvoid octeon_irq_ciu_enable_v2(struct irq_data *data)
{
u64 mask; int cpu = next_cpu_for_irq(data); struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
/* * Called under the desc lock, so these should never get out * of sync.
*/ if (cd->line == 0) { int index = octeon_coreid_for_cpu(cpu) * 2;
set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else { int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
/* * Enable the irq in the sum2 registers.
*/ staticvoid octeon_irq_ciu_enable_sum2(struct irq_data *data)
{
u64 mask; int cpu = next_cpu_for_irq(data); int index = octeon_coreid_for_cpu(cpu); struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
/* * Disable the irq in the sum2 registers.
*/ staticvoid octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
{
u64 mask; int cpu = next_cpu_for_irq(data); int index = octeon_coreid_for_cpu(cpu); struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
staticvoid octeon_irq_ciu_ack_sum2(struct irq_data *data)
{
u64 mask; int cpu = next_cpu_for_irq(data); int index = octeon_coreid_for_cpu(cpu); struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
/* * Enable the irq on the current CPU for chips that * have the EN*_W1{S,C} registers.
*/ staticvoid octeon_irq_ciu_enable_local_v2(struct irq_data *data)
{
u64 mask; struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) { int index = cvmx_get_core_num() * 2;
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else { int index = cvmx_get_core_num() * 2 + 1;
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) { int index = cvmx_get_core_num() * 2;
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
} else { int index = cvmx_get_core_num() * 2 + 1;
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
/* * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
*/ staticvoid octeon_irq_ciu_ack(struct irq_data *data)
{
u64 mask; struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) { int index = cvmx_get_core_num() * 2;
cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
} else {
cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
}
}
/* * Disable the irq on the all cores for chips that have the EN*_W1{S,C} * registers.
*/ staticvoid octeon_irq_ciu_disable_all_v2(struct irq_data *data)
{ int cpu;
u64 mask; struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2;
clear_bit(cd->bit,
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
}
} else {
for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
clear_bit(cd->bit,
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
}
/* * Enable the irq on the all cores for chips that have the EN*_W1{S,C} * registers.
*/ staticvoid octeon_irq_ciu_enable_all_v2(struct irq_data *data)
{ int cpu;
u64 mask; struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2;
set_bit(cd->bit,
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
}
} else {
for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
set_bit(cd->bit,
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
}
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->gpio_line);
cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
}
#ifdef CONFIG_SMP
staticvoid octeon_irq_cpu_offline_ciu(struct irq_data *data)
{ int cpu = smp_processor_id();
cpumask_t new_affinity; conststruct cpumask *mask = irq_data_get_affinity_mask(data);
if (!cpumask_test_cpu(cpu, mask)) return;
if (cpumask_weight(mask) > 1) { /* * It has multi CPU affinity, just remove this CPU * from the affinity set.
*/
cpumask_copy(&new_affinity, mask);
cpumask_clear_cpu(cpu, &new_affinity);
} else { /* Otherwise, put it on lowest numbered online CPU. */
cpumask_clear(&new_affinity);
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
}
irq_set_affinity_locked(data, &new_affinity, false);
}
/* * For non-v2 CIU, we will allow only single CPU affinity. * This removes the need to do locking in the .ack/.eoi * functions.
*/ if (cpumask_weight(dest) != 1) return -EINVAL;
if (!enable_one) return 0;
for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu);
/* * Set affinity for the irq for chips that have the EN*_W1{S,C} * registers.
*/ staticint octeon_irq_ciu_set_affinity_v2(struct irq_data *data, conststruct cpumask *dest, bool force)
{ int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
u64 mask; struct octeon_ciu_chip_data *cd;
if (!enable_one) return 0;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << cd->bit;
staticunsignedint edge_startup(struct irq_data *data)
{ /* ack any pending edge-irq at startup, so there is * an _edge_ to fire on when the event reappears.
*/
data->chip->irq_ack(data);
data->chip->irq_enable(data); return 0;
}
/* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core.
*/ staticvoid octeon_irq_ciu_wd_enable(struct irq_data *data)
{ unsignedlong flags; unsignedlong *pen; int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ int cpu = octeon_cpu_for_coreid(coreid);
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
raw_spin_lock_irqsave(lock, flags);
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
__set_bit(coreid, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling * the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
/* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core.
*/ staticvoid octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
{ int coreid = data->irq - OCTEON_IRQ_WDOG0; int cpu = octeon_cpu_for_coreid(coreid);
line = (hw + gpiod->base_hwirq) >> 6;
bit = (hw + gpiod->base_hwirq) & 63; if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL;
/* * Default to handle_level_irq. If the DT contains a different * trigger type, it will call the irq_set_type callback and * the handler gets updated.
*/
r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
octeon_irq_gpio_chip, handle_level_irq); return r;
}
staticvoid octeon_irq_init_ciu_percpu(void)
{ int coreid = cvmx_get_core_num();
__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
wmb();
raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); /* * Disable All CIU Interrupts. The ones we need will be * enabled later. Read the SUM register so we know the write * completed.
*/
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
}
staticvoid octeon_irq_init_ciu2_percpu(void)
{
u64 regx, ipx; int coreid = cvmx_get_core_num();
u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
/* * Disable All CIU2 Interrupts. The ones we need will be * enabled later. Read the SUM register so we know the write * completed. * * There are 9 registers and 3 IPX levels with strides 0x1000 * and 0x200 respectively. Use loops to clear them.
*/ for (regx = 0; regx <= 0x8000; regx += 0x1000) { for (ipx = 0; ipx <= 0x400; ipx += 0x200)
cvmx_write_csr(base + regx + ipx, 0);
}
/* CIU_0 */ for (i = 0; i < 16; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); if (r) goto err;
}
r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1); if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX0"); goto err;
}
r = octeon_irq_set_ciu_mapping(
OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); if (r) goto err;
r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1); if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX1"); goto err;
}
r = octeon_irq_set_ciu_mapping(
OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); if (r) goto err;
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); if (r) goto err;
} for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); if (r) goto err;
}
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); if (r) goto err;
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); if (r) goto err;
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); if (r) goto err;
}
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); if (r) goto err;
r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1); if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_WDOGx"); goto err;
} /* CIU_1 */ for (i = 0; i < 16; i++) {
r = octeon_irq_set_ciu_mapping(
i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
handle_level_irq); if (r) goto err;
}
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2); if (octeon_irq_use_ip4)
set_c0_status(STATUSF_IP4); else
clear_c0_status(STATUSF_IP4);
gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); if (gpiod) { /* gpio domain host_data is the base hwirq number. */
gpiod->base_hwirq = base_hwirq;
irq_domain_create_linear(of_fwnode_handle(gpio_node), 16,
&octeon_irq_domain_gpio_ops, gpiod);
} else {
pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); return -ENOMEM;
}
/* * Clear the OF_POPULATED flag that was set by of_irq_init() * so that all GPIO devices will be probed.
*/
of_node_clear_flag(gpio_node, OF_POPULATED);
return 0;
} /* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core.
*/ staticvoid octeon_irq_ciu2_wd_enable(struct irq_data *data)
{
u64 mask;
u64 en_addr; int coreid = data->irq - OCTEON_IRQ_WDOG0; struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
bit = fls64(src) - 1;
irq = octeon_irq_ciu_to_irq[line][bit]; if (unlikely(!irq)) goto spurious;
do_IRQ(irq); goto out;
spurious:
spurious_interrupt();
out: /* CN68XX pass 1.x has an errata that accessing the ACK registers
can stop interrupts from propagating */ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); else
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); return;
}
staticvoid octeon_irq_ciu2_mbox(void)
{ int line;
constunsignedlong core_id = cvmx_get_core_num();
u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
if (unlikely(!sum)) goto spurious;
line = fls64(sum) - 1;
do_IRQ(OCTEON_IRQ_MBOX0 + line); goto out;
spurious:
spurious_interrupt();
out: /* CN68XX pass 1.x has an errata that accessing the ACK registers
can stop interrupts from propagating */ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); else
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); return;
}
/* CUI2 */ for (i = 0; i < 64; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); if (r) goto err;
}
for (i = 0; i < 32; i++) {
r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
&octeon_irq_chip_ciu2_wd, handle_level_irq); if (r) goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); if (r) goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); if (r) goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); if (r) goto err;
}
switch (type) { case 0: /* unofficial value, but we might as well let it work. */ case 4: /* official value for level triggering. */
*out_type = IRQ_TYPE_LEVEL_HIGH; break; case 1: /* official value for edge triggering. */
*out_type = IRQ_TYPE_EDGE_RISING; break; default: /* Nothing else is acceptable. */ return -EINVAL;
}
isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq)); if (!isc.s.imp) return -EINVAL;
switch (type) { case 4: /* official value for level triggering. */
*out_type = IRQ_TYPE_LEVEL_HIGH; break; case 0: /* unofficial value, but we might as well let it work. */ case 1: /* official value for edge triggering. */
*out_type = IRQ_TYPE_EDGE_RISING; break; default: /* Nothing else is acceptable. */ return -EINVAL;
}
*out_hwirq = hwirq;
return 0;
}
void octeon_irq_ciu3_enable(struct irq_data *data)
{ int cpu; union cvmx_ciu3_iscx_ctl isc_ctl; union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_ctl_addr;
if (likely(dest_pp_int.s.intr)) {
irq_hw_number_t intsn = dest_pp_int.s.intsn;
irq_hw_number_t hw; struct irq_domain *domain; /* Get the domain to use from the major block */ int block = intsn >> 12; int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.