// SPDX-License-Identifier: GPL-2.0+ /* * Renesas SuperH DMA Engine support * * base is drivers/dma/flsdma.c * * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * - DMA of SuperH does not have Hardware DMA chain mode. * - MAX DMA size is 16MB. *
*/
/* * Used for write-side mutual exclusion for the global device list, * read-side synchronization by way of RCU, and per-controller data.
*/ static DEFINE_SPINLOCK(sh_dmae_lock); static LIST_HEAD(sh_dmae_devices);
/* * Different DMAC implementations provide different ways to clear DMA channels: * (1) none - no CHCLR registers are available * (2) one CHCLR register per channel - 0 has to be written to it to clear * channel buffers * (3) one CHCLR per several channels - 1 has to be written to the bit, * corresponding to the specific channel to reset it
*/ staticvoid channel_clear(struct sh_dmae_chan *sh_dc)
{ struct sh_dmae_device *shdev = to_sh_dev(sh_dc); conststruct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
sh_dc->shdma_chan.id;
u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
if (shdev->pdata->chclr_present) { int i; for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (sh_chan)
channel_clear(sh_chan);
}
}
staticint dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{ /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ if (dmae_is_busy(sh_chan)) return -EBUSY;
int ret = 0; if (slave_id >= 0) { conststruct sh_dmae_slave_config *cfg =
sh_chan->config;
ret = dmae_set_dmars(sh_chan, cfg->mid_rid); if (ret < 0) goto END;
ret = dmae_set_chcr(sh_chan, cfg->chcr); if (ret < 0) goto END;
} else {
dmae_init(sh_chan);
}
END: return ret;
}
/* * Find a slave channel configuration from the controller list by either a slave * ID in the non-DT case, or by a MID/RID value in the DT case
*/ staticconststruct sh_dmae_slave_config *dmae_find_slave( struct sh_dmae_chan *sh_chan, int match)
{ struct sh_dmae_device *shdev = to_sh_dev(sh_chan); conststruct sh_dmae_pdata *pdata = shdev->pdata; conststruct sh_dmae_slave_config *cfg; int i;
if (!sh_chan->shdma_chan.dev->of_node) { if (match >= SH_DMA_SLAVE_NUMBER) return NULL;
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) if (cfg->slave_id == match) return cfg;
} else { for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) if (cfg->mid_rid == match) {
sh_chan->shdma_chan.slave_id = i; return cfg;
}
}
staticbool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
{ /* Fast path out if NMIF is not asserted for this controller */ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) returnfalse;
return sh_dmae_reset(shdev);
}
staticint sh_dmae_nmi_handler(struct notifier_block *self, unsignedlong cmd, void *data)
{ struct sh_dmae_device *shdev; int ret = NOTIFY_DONE; bool triggered;
/* * Only concern ourselves with NMI events. * * Normally we would check the die chain value, but as this needs * to be architecture independent, check for NMI context instead.
*/ if (!in_nmi()) return NOTIFY_DONE;
rcu_read_lock();
list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { /* * Only stop if one of the controllers has NMIF asserted, * we do not want to interfere with regular address error * handling or NMI events that don't concern the DMACs.
*/
triggered = sh_dmae_nmi_notify(shdev); if (triggered == true)
ret = NOTIFY_OK;
}
rcu_read_unlock();
/* * Implicit BUG_ON(!sh_chan->config) * This is an exclusive slave DMA operation, may only be called after a * successful slave configuration.
*/ return sh_chan->slave_addr;
}
if (pdev->dev.of_node)
pdata = of_device_get_match_data(&pdev->dev); else
pdata = dev_get_platdata(&pdev->dev);
/* get platform data */ if (!pdata || !pdata->channel_num) return -ENODEV;
/* DMARS area is optional */
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); /* * IRQ resources: * 1. there always must be at least one IRQ IO-resource. On SH4 it is * the error IRQ, in which case it is the only IRQ in this resource: * start == end. If it is the only IRQ resource, all channels also * use the same IRQ. * 2. DMA channel IRQ resources can be specified one per resource or in * ranges (start != end) * 3. iff all events (channels and, optionally, error) on this * controller use the same IRQ, only one IRQ resource can be * specified, otherwise there must be one IRQ per channel, even if * some of them are equal * 4. if all IRQs on this controller are equal or if some specific IRQs * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be * requested with the IRQF_SHARED flag
*/
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!errirq_res) return -ENODEV;
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
GFP_KERNEL); if (!shdev) return -ENOMEM;
dma_dev = &shdev->shdma_dev.dma_dev;
shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(shdev->chan_reg)) return PTR_ERR(shdev->chan_reg); if (dmars) {
shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); if (IS_ERR(shdev->dmars)) return PTR_ERR(shdev->dmars);
}
/* Create DMA Channel */ for (i = 0; i < irq_cnt; i++) {
err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err;
}
if (irq_cap)
dev_notice(&pdev->dev, "Attempting to register %d DMA " "channels when a maximum of %d are supported.\n",
pdata->channel_num, SH_DMAE_MAX_CHANNELS);
pm_runtime_put(&pdev->dev);
err = dma_async_device_register(&shdev->shdma_dev.dma_dev); if (err < 0) goto edmadevreg;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.