/* set the affinity hint */ if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
dev_err(&dpio_dev->dev, "irq_set_affinity failed irq %d cpu %d\n",
irq->virq, cpu);
return 0;
}
staticint dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
{ struct dpio_attr dpio_attrs; struct dpaa2_io_desc desc; struct dpio_priv *priv; int err = -ENOMEM; struct device *dev = &dpio_dev->dev; int possible_next_cpu; int sdest;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) goto err_priv_alloc;
/* get the cpu to use for the affinity hint */
possible_next_cpu = cpumask_first(cpus_unused_mask); if (possible_next_cpu >= nr_cpu_ids) {
dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
err = -ERANGE; goto err_allocate_irqs;
}
desc.cpu = possible_next_cpu;
cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu); if (sdest >= 0) {
err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
dpio_dev->mc_handle,
sdest); if (err)
dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
desc.cpu);
}
if (dpio_dev->obj_desc.region_count < 3) { /* No support for DDR backed portals, use classic mapping */ /* * Set the CENA regs to be the cache inhibited area of the * portal to avoid coherency issues if a user migrates to * another core.
*/
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
resource_size(&dpio_dev->regions[1]),
MEMREMAP_WC);
} else {
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
resource_size(&dpio_dev->regions[2]),
MEMREMAP_WB);
}
/* Tear down interrupts for a given DPIO object */ staticvoid dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
{
unregister_dpio_irq_handlers(dpio_dev);
fsl_mc_free_irqs(dpio_dev);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.