/* Arbitrary values, I am not sure the HW has limits */ #define MAX_RX_QUEUE_ENTRIES 1024 #define MAX_TX_QUEUE_ENTRIES 1024 #define MIN_RX_QUEUE_ENTRIES 32 #define MIN_TX_QUEUE_ENTRIES 32
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(priv->netdev, mac);
dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
} else {
eth_hw_addr_random(priv->netdev);
dev_info(priv->dev, "Generated random MAC address %pM\n",
priv->netdev->dev_addr);
}
return 0;
}
staticint ftgmac100_set_mac_addr(struct net_device *dev, void *p)
{ int ret;
ret = eth_prepare_mac_addr_change(dev, p); if (ret < 0) return ret;
/* Configure descriptor sizes and increase burst sizes according * to values in Aspeed SDK. The FIFO arbitration is enabled and * the thresholds set based on the recommended values in the * AST2400 specification.
*/
iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
priv->base + FTGMAC100_OFFSET_DBLAC);
/* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt * mitigation doesn't seem to provide any benefit with NAPI so leave * it at that.
*/
iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
FTGMAC100_ITC_TXINT_THR(1),
priv->base + FTGMAC100_OFFSET_ITC);
/* Grab descriptor status */
status = le32_to_cpu(rxdes->rxdes0);
/* Do we have a packet ? */ if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) returnfalse;
/* Order subsequent reads with the test for the ready bit */
dma_rmb();
/* We don't cope with fragmented RX packets */ if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
!(status & FTGMAC100_RXDES0_LRS))) goto drop;
/* Grab received size and csum vlan field in the descriptor */
size = status & FTGMAC100_RXDES0_VDBC;
csum_vlan = le32_to_cpu(rxdes->rxdes1);
/* Any error (other than csum offload) flagged ? */ if (unlikely(status & RXDES0_ANY_ERROR)) { /* Correct for incorrect flagging of runt packets * with vlan tags... Just accept a runt packet that * has been flagged as vlan and whose size is at * least 60 bytes.
*/ if ((status & FTGMAC100_RXDES0_RUNT) &&
(csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
(size >= 60))
status &= ~FTGMAC100_RXDES0_RUNT;
/* Any error still in there ? */ if (status & RXDES0_ANY_ERROR) {
ftgmac100_rx_packet_error(priv, status); goto drop;
}
}
/* If the packet had no skb (failed to allocate earlier) * then try to allocate one and skip
*/
skb = priv->rx_skbs[pointer]; if (!unlikely(skb)) {
ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); goto drop;
}
if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
netdev->stats.multicast++;
/* If the HW found checksum errors, bounce it to software. * * If we didn't, we need to see if the packet was recognized * by HW as one of the supported checksummed protocols before * we accept the HW test results.
*/ if (netdev->features & NETIF_F_RXCSUM) {
u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
FTGMAC100_RXDES1_IP_CHKSUM_ERR; if ((csum_vlan & err_bits) ||
!(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
skb->ip_summed = CHECKSUM_NONE; else
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
/* Transfer received size to skb */
skb_put(skb, size);
/* Extract vlan tag */ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
csum_vlan & 0xffff);
/* Tear down DMA mapping, do necessary cache management */
map = le32_to_cpu(rxdes->rxdes3);
#ifdefined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) /* When we don't have an iommu, we can save cycles by not * invalidating the cache for the part of the packet that * wasn't received.
*/
dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); #else
dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); #endif
static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
{ /* Returns the number of available slots in the TX queue * * This always leaves one free slot so we don't have to * worry about empty vs. full, and this simplifies the * test for ftgmac100_tx_buf_cleanable() below
*/ return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
(priv->tx_q_entries - 1);
}
/* Add VLAN tag */ if (skb_vlan_tag_present(skb)) {
csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
}
/* Get header len */
len = skb_headlen(skb);
/* Map the packet head */
map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->dev, map)) { if (net_ratelimit())
netdev_err(netdev, "map tx packet head failed\n"); goto drop;
}
/* Grab the next free tx descriptor */
pointer = priv->tx_pointer;
txdes = first = &priv->txdes[pointer];
/* Setup it up with the packet head. Don't write the head to the * ring just yet
*/
priv->tx_skbs[pointer] = skb;
f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
f_ctl_stat |= FTGMAC100_TXDES0_FTS; if (nfrags == 0)
f_ctl_stat |= FTGMAC100_TXDES0_LTS;
txdes->txdes3 = cpu_to_le32(map);
txdes->txdes1 = cpu_to_le32(csum_vlan);
/* Next descriptor */
pointer = ftgmac100_next_tx_pointer(priv, pointer);
/* Add the fragments */ for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
/* Map it */
map = skb_frag_dma_map(priv->dev, frag, 0, len,
DMA_TO_DEVICE); if (dma_mapping_error(priv->dev, map)) goto dma_err;
/* Next one */
pointer = ftgmac100_next_tx_pointer(priv, pointer);
}
/* Order the previous packet and descriptor udpates * before setting the OWN bit on the first descriptor.
*/
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
/* Ensure the descriptor config is visible before setting the tx * pointer.
*/
smp_wmb();
/* Update next TX pointer */
priv->tx_pointer = pointer;
/* If there isn't enough room for all the fragments of a new packet * in the TX ring, stop the queue. The sequence below is race free * vs. a concurrent restart in ftgmac100_poll()
*/ if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
netif_stop_queue(netdev); /* Order the queue stop with the test below */
smp_mb(); if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
netif_wake_queue(netdev);
}
/* Poke transmitter to read the updated TX descriptors */
iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
return NETDEV_TX_OK;
dma_err: if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
/* This cannot be reached if we successfully mapped the * last fragment, so we know ftgmac100_free_tx_packet() * hasn't freed the skb yet.
*/
drop: /* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
staticvoid ftgmac100_free_buffers(struct ftgmac100 *priv)
{ int i;
/* Free all RX buffers */ for (i = 0; i < priv->rx_q_entries; i++) { struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; struct sk_buff *skb = priv->rx_skbs[i];
dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) return;
/* Initialize RX ring */ for (i = 0; i < priv->rx_q_entries; i++) {
rxdes = &priv->rxdes[i];
rxdes->rxdes0 = 0;
rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
} /* Mark the end of the ring */
rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) return;
/* Initialize TX ring */ for (i = 0; i < priv->tx_q_entries; i++) {
txdes = &priv->txdes[i];
txdes->txdes0 = 0;
}
txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
staticint ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
{ int i;
for (i = 0; i < priv->rx_q_entries; i++) { struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) return -ENOMEM;
} return 0;
}
staticint ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{ struct net_device *netdev = bus->priv; struct ftgmac100 *priv = netdev_priv(netdev); unsignedint phycr; int i;
/* Handle TX completions */ if (ftgmac100_tx_buf_cleanable(priv))
ftgmac100_tx_complete(priv);
/* Handle RX packets */ do {
more = ftgmac100_rx_packet(priv, &work_done);
} while (more && work_done < budget);
/* The interrupt is telling us to kick the MAC back to life * after an RX overflow
*/ if (unlikely(priv->need_mac_restart)) {
ftgmac100_start_hw(priv);
priv->need_mac_restart = false;
/* As long as we are waiting for transmit packets to be * completed we keep NAPI going
*/ if (ftgmac100_tx_buf_cleanable(priv))
work_done = budget;
if (work_done < budget) { /* We are about to re-enable all interrupts. However * the HW has been latching RX/TX packet interrupts while * they were masked. So we clear them first, then we need * to re-check if there's something to process
*/
iowrite32(FTGMAC100_INT_RXTX,
priv->base + FTGMAC100_OFFSET_ISR);
/* Push the above (and provides a barrier vs. subsequent * reads of the descriptor).
*/
ioread32(priv->base + FTGMAC100_OFFSET_ISR);
/* Check RX and TX descriptors for more work to do */ if (ftgmac100_check_rx(priv) ||
ftgmac100_tx_buf_cleanable(priv)) return budget;
/* deschedule NAPI */
napi_complete(napi);
/* enable all interrupts */
iowrite32(FTGMAC100_INT_ALL,
priv->base + FTGMAC100_OFFSET_IER);
}
/* Lock the world */
rtnl_lock(); if (netdev->phydev)
mutex_lock(&netdev->phydev->lock); if (priv->mii_bus)
mutex_lock(&priv->mii_bus->mdio_lock);
/* Check if the interface is still up */ if (!netif_running(netdev)) goto bail;
/* Stop the network stack */
netif_trans_update(netdev);
napi_disable(&priv->napi);
netif_tx_disable(netdev);
/* Stop and reset the MAC */
ftgmac100_stop_hw(priv);
err = ftgmac100_reset_and_config_mac(priv); if (err) { /* Not much we can do ... it might come back... */
netdev_err(netdev, "attempting to continue...\n");
}
/* Free all rx and tx buffers */
ftgmac100_free_buffers(priv);
/* Setup everything again and restart chip */
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
bail: if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock); if (netdev->phydev)
mutex_unlock(&netdev->phydev->lock);
rtnl_unlock();
}
/* We store "no link" as speed 0 */ if (!phydev->link)
new_speed = 0; else
new_speed = phydev->speed;
/* Grab pause settings from PHY if configured to do so */ if (priv->aneg_pause) {
rx_pause = tx_pause = phydev->pause; if (phydev->asym_pause)
tx_pause = !rx_pause;
} else {
rx_pause = priv->rx_pause;
tx_pause = priv->tx_pause;
}
/* Link hasn't changed, do nothing */ if (phydev->speed == priv->cur_speed &&
phydev->duplex == priv->cur_duplex &&
rx_pause == priv->rx_pause &&
tx_pause == priv->tx_pause) return;
/* Print status if we have a link or we had one and just lost it, * don't print otherwise.
*/ if (new_speed || priv->cur_speed)
phy_print_status(phydev);
/* Link is down, do nothing else */ if (!new_speed) return;
/* Disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
/* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock * order consistent to prevent dead lock.
*/ if (netdev->phydev)
mutex_unlock(&netdev->phydev->lock);
ftgmac100_reset(priv);
if (netdev->phydev)
mutex_lock(&netdev->phydev->lock);
/* Default to RGMII. It's a gigabit part after all */
err = of_get_phy_mode(np, &phy_intf); if (err)
phy_intf = PHY_INTERFACE_MODE_RGMII;
/* Aspeed only supports these. I don't know about other IP * block vendors so I'm going to just let them through for * now. Note that this is only a warning if for some obscure * reason the DT really means to lie about it or it's a newer * part we don't know about. * * On the Aspeed SoC there are additionally straps and SCU * control bits that could tell us what the interface is * (or allow us to configure it while the IP block is held * in reset). For now I chose to keep this driver away from * those SoC specific bits and assume the device-tree is * right and the SCU has been configured properly by pinmux * or the firmware.
*/ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
netdev_warn(netdev, "Unsupported PHY mode %s !\n",
phy_modes(phy_intf));
}
phydev = phy_find_first(priv->mii_bus); if (!phydev) {
netdev_info(netdev, "%s: no PHY found\n", netdev->name); return -ENODEV;
}
/* Allocate ring buffers */
err = ftgmac100_alloc_rings(priv); if (err) {
netdev_err(netdev, "Failed to allocate descriptors\n"); return err;
}
/* When using NC-SI we force the speed to 100Mbit/s full duplex, * * Otherwise we leave it set to 0 (no link), the link * message from the PHY layer will handle setting it up to * something else if needed.
*/ if (priv->use_ncsi) {
priv->cur_duplex = DUPLEX_FULL;
priv->cur_speed = SPEED_100;
} else {
priv->cur_duplex = 0;
priv->cur_speed = 0;
}
/* Reset the hardware */
err = ftgmac100_reset_and_config_mac(priv); if (err) goto err_hw;
/* Initialize NAPI */
netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Start things up */
err = ftgmac100_init_all(priv, false); if (err) {
netdev_err(netdev, "Failed to allocate packet buffers\n"); goto err_alloc;
}
if (netdev->phydev) { /* If we have a PHY, start polling */
phy_start(netdev->phydev);
} if (priv->use_ncsi) { /* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
/* Start the NCSI device */
err = ncsi_start_dev(priv->ndev); if (err) goto err_ncsi;
}
/* Note about the reset task: We are called with the rtnl lock * held, so we are synchronized against the core of the reset * task. We must not try to synchronously cancel it otherwise * we can deadlock. But since it will test for netif_running() * which has already been cleared by the net core, we don't * anything special to do.
*/
/* disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
netif_napi_del(&priv->napi); if (netdev->phydev)
phy_stop(netdev->phydev); if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) return -EIO;
if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
of_device_is_compatible(np, "aspeed,ast2500-mac")) { /* The AST2600 has a separate MDIO controller */
/* For the AST2400 and AST2500 this driver only supports the * old MDIO interface
*/
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
}
/* Aspeed specifies a 100MHz clock is required for up to * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz * is sufficient
*/
rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
FTGMAC_100MHZ); if (rc) goto cleanup_clk;
/* RCLK is for RMII, typically used for NCSI. Optional because it's not * necessary if it's the AST2400 MAC, or the MAC is configured for * RGMII, or the controller is not an ASPEED-based controller.
*/
priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
rc = clk_prepare_enable(priv->rclk); if (!rc) return 0;
/* Support "mdio"/"phy" child nodes for ast2400/2500 with * an embedded MDIO controller. Automatically scan the DTS for * available PHYs and register them.
*/ if (of_get_property(np, "phy-handle", NULL) &&
(of_device_is_compatible(np, "aspeed,ast2400-mac") ||
of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
err = ftgmac100_setup_mdio(netdev); if (err) goto err_setup_mdio;
}
phy = of_phy_get_and_connect(priv->netdev, np,
&ftgmac100_adjust_link); if (!phy) {
dev_err(&pdev->dev, "Failed to connect to phy\n");
err = -EINVAL; goto err_phy_connect;
}
/* Indicate that we support PAUSE frames (see comment in * Documentation/networking/phy.rst)
*/
phy_support_asym_pause(phy);
/* Display what we found */
phy_attached_info(phy);
} elseif (np && !ftgmac100_has_child_node(np, "mdio")) { /* Support legacy ASPEED devicetree descriptions that decribe a * MAC with an embedded MDIO controller but have no "mdio" * child node. Automatically scan the MDIO bus for available * PHYs.
*/
priv->use_ncsi = false;
err = ftgmac100_setup_mdio(netdev); if (err) goto err_setup_mdio;
/* There's a small chance the reset task will have been re-queued, * during stop, make sure it's gone before we free the structure.
*/
cancel_work_sync(&priv->reset_task);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.