// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Xilinx TEMAC Ethernet device * * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * * This is a driver for the Xilinx ll_temac ipcore which is often used * in the Virtex and Spartan series of chips. * * Notes: * - The ll_temac hardware uses indirect access for many of the TEMAC * registers, include the MDIO bus. However, indirect access to MDIO * registers take considerably more clock cycles than to TEMAC registers. * MDIO accesses are long, so threads doing them should probably sleep * rather than busywait. However, since only one indirect access can be * in progress at any given time, that means that *all* indirect accesses * could end up sleeping (to wait for an MDIO access to complete). * Fortunately none of the indirect accesses are on the 'hot' path for tx * or rx, so this should be okay. * * TODO: * - Factor out locallink DMA code into separate driver * - Fix support for hardware checksumming. * - Testing. Lots and lots of testing. *
*/
/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz * that was used before, and should cover MDIO bus speed down to 3200 * Hz.
*/ #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
/* * temac_indirect_busywait - Wait for current indirect register access * to complete.
*/ int temac_indirect_busywait(struct temac_local *lp)
{
ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); if (WARN_ON(!hard_acs_rdy(lp))) return -ETIMEDOUT;
return 0;
}
/* * temac_indirect_in32 - Indirect register read access. This function * must be called without lp->indirect_lock being held.
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{ unsignedlong flags; int val;
/* * temac_indirect_in32_locked - Indirect register read access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid * repeated lock/unlock and to ensure uninterrupted access to indirect * registers.
*/
u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
{ /* This initial wait should normally not spin, as we always * try to wait for indirect access to complete before * releasing the indirect_lock.
*/ if (WARN_ON(temac_indirect_busywait(lp))) return -ETIMEDOUT; /* Initiate read from indirect register */
temac_iow(lp, XTE_CTL0_OFFSET, reg); /* Wait for indirect register access to complete. We really * should not see timeouts, and could even end up causing * problem for following indirect access, so let's make a bit * of WARN noise.
*/ if (WARN_ON(temac_indirect_busywait(lp))) return -ETIMEDOUT; /* Value is ready now */ return temac_ior(lp, XTE_LSW0_OFFSET);
}
/* * temac_indirect_out32 - Indirect register write access. This function * must be called without lp->indirect_lock being held.
*/ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{ unsignedlong flags;
/* * temac_indirect_out32_locked - Indirect register write access. This * function must be called with lp->indirect_lock being held. Use * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid * repeated lock/unlock and to ensure uninterrupted access to indirect * registers.
*/ void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
{ /* As in temac_indirect_in32_locked(), we should normally not * spin here. And if it happens, we actually end up silently * ignoring the write request. Ouch.
*/ if (WARN_ON(temac_indirect_busywait(lp))) return; /* Initiate write to indirect register */
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); /* As in temac_indirect_in32_locked(), we should not see timeouts * here. And if it happens, we continue before the write has * completed. Not good.
*/
WARN_ON(temac_indirect_busywait(lp));
}
/* * temac_dma_in32_* - Memory mapped DMA read, these function expects a * register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to * lp->dma_in32.
*/ static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
{ return ioread32be(lp->sdma_regs + (reg << 2));
}
/* * temac_dma_out32_* - Memory mapped DMA read, these function expects * a register input that is based on DCR word addresses which are then * converted to memory mapped byte addresses. To be assigned to * lp->dma_out32.
*/ staticvoid temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
{
iowrite32be(value, lp->sdma_regs + (reg << 2));
}
/* DMA register access functions can be DCR based or memory mapped. * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both * memory mapped.
*/ #ifdef CONFIG_PPC_DCR
/* * temac_dcr_setup - If the DMA is DCR based, then setup the address and * I/O functions
*/ staticint temac_dcr_setup(struct temac_local *lp, struct platform_device *op, struct device_node *np)
{ unsignedint dcrs;
/* setup the dcr address mapping if it's in the device tree */
dcrs = dcr_resource_start(np, 0); if (dcrs != 0) {
lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
lp->dma_in = temac_dma_dcr_in;
lp->dma_out = temac_dma_dcr_out;
dev_dbg(&op->dev, "DCR base: %x\n", dcrs); return 0;
} /* no DCR in the device tree, indicate a failure */ return -1;
}
#else
/* * temac_dcr_setup - This is a stub for when DCR is not supported, * such as with MicroBlaze and x86
*/ staticint temac_dcr_setup(struct temac_local *lp, struct platform_device *op, struct device_node *np)
{ return -1;
}
lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) goto out;
/* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out;
/* set up unicast MAC address filter set its mac address */
spin_lock_irqsave(lp->indirect_lock, flags);
temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24)); /* There are reserved bits in EUAW1 * so don't affect them Set MAC bits [47:32] in EUAW1
*/
temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
spin_unlock_irqrestore(lp->indirect_lock, flags);
}
/* Clear all or remaining/unused address table entries */ while (i < MULTICAST_CAM_TABLE_NUM) {
temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
i++;
}
/* Enable address filter block if currently disabled */ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
& XTE_AFM_EPPRM_MASK) {
temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
promisc_mode_disabled = true;
}
spin_unlock_irqrestore(lp->indirect_lock, flags);
if (promisc_mode_disabled)
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
staticstruct temac_option { int flg;
u32 opt;
u32 reg;
u32 m_or;
u32 m_and;
} temac_options[] = { /* Turn on jumbo packet support for both Rx and Tx */
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXJMBO_MASK,
},
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
.m_or = XTE_RXC1_RXJMBO_MASK,
}, /* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
.m_or = XTE_RXC1_RXVLAN_MASK,
}, /* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
.m_or = XTE_RXC1_RXFCS_MASK,
}, /* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXFCS_MASK,
}, /* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
.m_or = XTE_RXC1_RXLT_MASK,
}, /* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or = XTE_FCC_RXFLO_MASK,
}, /* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or = XTE_FCC_TXFLO_MASK,
}, /* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
.m_or = XTE_AFM_EPPRM_MASK,
}, /* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXEN_MASK,
}, /* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
.m_or = XTE_RXC1_RXEN_MASK,
},
{}
};
switch (phy->speed) { case SPEED_1000:
mii_speed |= XTE_EMCFG_LINKSPD_1000; break; case SPEED_100:
mii_speed |= XTE_EMCFG_LINKSPD_100; break; case SPEED_10:
mii_speed |= XTE_EMCFG_LINKSPD_10; break;
}
/* Write new speed setting out to TEMAC */
temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
spin_unlock_irqrestore(lp->indirect_lock, flags);
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) { /* Make sure that the other fields are read after bd is * released by dma
*/
rmb();
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
skb = (struct sk_buff *)ptr_from_txbd(cur_p); if (skb)
dev_consume_skb_irq(skb);
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
/* Process all received buffers, passing them on network * stack. After this, the buffer descriptors will be in an * un-allocated stage, where no skb is allocated for it, and * they are therefore not available for TEMAC/DMA.
*/ do { struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; unsignedint bdstat = be32_to_cpu(bd->app0); int length;
/* While this should not normally happen, we can end * here when GFP_ATOMIC allocations fail, and we * therefore have un-allocated buffers.
*/ if (!skb) break;
/* Loop over all completed buffer descriptors */ if (!(bdstat & STS_CTRL_APP0_CMPLT)) break;
dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); /* The buffer is not valid for DMA anymore */
bd->phys = 0;
bd->len = 0;
/* if we're doing rx csum offload, set it up */ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) { /* Convert from device endianness (be32) to cpu * endianness, and if necessary swap the bytes * (back) for proper IP checksum byte order * (be16).
*/
skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb); /* The skb buffer is now owned by network stack above */
lp->rx_skb[lp->rx_bd_ci] = NULL;
rx_bd = lp->rx_bd_ci; if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
/* DMA operations will halt when the last buffer descriptor is * processed (ie. the one pointed to by RX_TAILDESC_PTR). * When that happens, no more interrupt events will be * generated. No IRQ_COAL or IRQ_DLY, and not even an * IRQ_ERR. To avoid stalling, we schedule a delayed work * when there is a potential risk of that happening. The work * will call this function, and thus re-schedule itself until * enough buffers are available again.
*/ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
schedule_delayed_work(&lp->restart_work, HZ / 1000);
/* Allocate new buffers for those buffer descriptors that were * passed to network stack. Note that GFP_ATOMIC allocations * can fail (e.g. when a larger burst of GFP_ATOMIC * allocations occurs), so while we try to allocate all * buffers in the same interrupt where they were processed, we * continue with what we could get in case of allocation * failure. Allocation of remaining buffers will be retried * in following calls.
*/ while (1) { struct sk_buff *skb; struct cdmac_bd *bd;
dma_addr_t skb_dma_addr;
/* Move tail pointer when buffers have been allocated */ if (update_tail) {
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
}
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
/* Function scheduled to ensure a restart in case of DMA halt * condition caused by running out of buffer descriptors.
*/ staticvoid ll_temac_restart_work_func(struct work_struct *work)
{ struct temac_local *lp = container_of(work, struct temac_local,
restart_work.work); struct net_device *ndev = lp->ndev;
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0; if (temac_np) {
p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL); if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL); if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
} elseif (pdata) { if (pdata->txcsum)
lp->temac_features |= TEMAC_FEATURE_TX_CSUM; if (pdata->rxcsum)
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
} if (lp->temac_features & TEMAC_FEATURE_TX_CSUM) /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
/* Defaults for IRQ delay/coalescing setup. These are * configuration values, so does not belong in device-tree.
*/
lp->coalesce_delay_tx = 0x10;
lp->coalesce_count_tx = 0x22;
lp->coalesce_delay_rx = 0xff;
lp->coalesce_count_rx = 0x07;
/* Setup LocalLink DMA */ if (temac_np) { /* Find the DMA node, map the DMA registers, and * decode the DMA IRQs.
*/
dma_np = of_parse_phandle(temac_np, "llink-connected", 0); if (!dma_np) {
dev_err(&pdev->dev, "could not find DMA node\n"); return -ENODEV;
}
/* Setup the DMA register accesses, could be DCR or * memory mapped.
*/ if (temac_dcr_setup(lp, pdev, dma_np)) { /* no DCR in the device tree, try non-DCR */
lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
NULL); if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev, "unable to map DMA registers\n");
of_node_put(dma_np); return PTR_ERR(lp->sdma_regs);
} if (of_property_read_bool(dma_np, "little-endian")) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
lp->dma_in = temac_dma_in32_be;
lp->dma_out = temac_dma_out32_be;
}
dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
}
/* Get DMA RX and TX interrupts */
lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
} elseif (pdata) { /* 2nd memory resource specifies DMA registers */
lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev, "could not map DMA registers\n"); return PTR_ERR(lp->sdma_regs);
} if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
lp->dma_in = temac_dma_in32_be;
lp->dma_out = temac_dma_out32_be;
}
/* Get DMA RX and TX interrupts */
lp->rx_irq = platform_get_irq(pdev, 0);
lp->tx_irq = platform_get_irq(pdev, 1);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.