staticinline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
{ return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
}
staticinlinevoid enet_dmas_writel(struct bcm_enet_priv *priv,
u32 val, u32 off, int chan)
{
bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
}
/* * write given data into mii register and wait for transfer to end * with timeout (average measured transfer time is 25us)
*/ staticint do_mdio_op(struct bcm_enet_priv *priv, unsignedint data)
{ int limit;
/* make sure mii interrupt status is cleared */
enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
enet_writel(priv, data, ENET_MIIDATA_REG);
wmb();
/* busy wait on mii interrupt bit, with timeout */
limit = 1000; do { if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) break;
udelay(1);
} while (limit-- > 0);
return (limit < 0) ? 1 : 0;
}
/* * MII internal read callback
*/ staticint bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, int regnum)
{
u32 tmp, val;
/* * MII read callback from phylib
*/ staticint bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, int regnum)
{ return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
}
/* * MII write callback from phylib
*/ staticint bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, int regnum, u16 value)
{ return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
}
/* * MII read callback from mii core
*/ staticint bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, int regnum)
{ return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
}
/* * MII write callback from mii core
*/ staticvoid bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, int regnum, int value)
{
bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
}
/* tell dma engine we allocated one buffer */ if (priv->dma_has_sram)
enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); else
enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
}
/* If rx ring is still empty, set a timer to try allocating
* again at a later time. */ if (priv->rx_desc_count == 0 && netif_running(dev)) {
dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
priv->rx_timeout.expires = jiffies + HZ;
add_timer(&priv->rx_timeout);
}
/* make sure we actually read the descriptor status at
* each loop */
rmb();
len_stat = desc->len_stat;
/* break if dma ownership belongs to hw */ if (len_stat & DMADESC_OWNER_MASK) break;
processed++;
priv->rx_curr_desc++; if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */ if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
(DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
dev->stats.rx_dropped++; continue;
}
/* recycle packet if it's marked as bad */ if (!priv->enet_is_sw &&
unlikely(len_stat & DMADESC_ERR_MASK)) {
dev->stats.rx_errors++;
if (len_stat & DMADESC_OVSIZE_MASK)
dev->stats.rx_length_errors++; if (len_stat & DMADESC_CRC_MASK)
dev->stats.rx_crc_errors++; if (len_stat & DMADESC_UNDER_MASK)
dev->stats.rx_frame_errors++; if (len_stat & DMADESC_OV_MASK)
dev->stats.rx_fifo_errors++; continue;
}
/* valid packet */
buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; /* don't include FCS */
len -= 4;
if (len < copybreak) {
skb = napi_alloc_skb(&priv->napi, len); if (unlikely(!skb)) { /* forget packet, just rearm desc */
dev->stats.rx_dropped++; continue;
}
/* * try to or force reclaim of transmitted buffers
*/ staticint bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{ struct bcm_enet_priv *priv; unsignedint bytes; int released;
/* lock against tx reclaim */
spin_lock(&priv->tx_lock);
/* make sure the tx hw queue is not full, should not happen
* since we stop queue before it's the case */ if (unlikely(!priv->tx_desc_count)) {
netif_stop_queue(dev);
dev_err(&priv->pdev->dev, "xmit called with no tx desc " "available?\n");
ret = NETDEV_TX_BUSY; goto out_unlock;
}
/* pad small packets sent on a switch device */ if (priv->enet_is_sw && skb->len < 64) { int needed = 64 - skb->len; char *data;
if (unlikely(skb_tailroom(skb) < needed)) { struct sk_buff *nskb;
nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); if (!nskb) {
ret = NETDEV_TX_BUSY; goto out_unlock;
}
dev_kfree_skb(skb);
skb = nskb;
}
data = skb_put_zero(skb, needed);
}
/* point to the next available desc */
desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
priv->tx_skb[priv->tx_curr_desc] = skb;
/* fill descriptor */
desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
(dev->dev_addr[4] << 8) | dev->dev_addr[5];
enet_writel(priv, val, ENET_PML_REG(0));
val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
val |= ENET_PMH_DATAVALID_MASK;
enet_writel(priv, val, ENET_PMH_REG(0));
return 0;
}
/* * Change rx mode (promiscuous/allmulti) and update multicast list
*/ staticvoid bcm_enet_set_multicast_list(struct net_device *dev)
{ struct bcm_enet_priv *priv; struct netdev_hw_addr *ha;
u32 val; int i;
priv = netdev_priv(dev);
val = enet_readl(priv, ENET_RXCFG_REG);
if (dev->flags & IFF_PROMISC)
val |= ENET_RXCFG_PROMISC_MASK; else
val &= ~ENET_RXCFG_PROMISC_MASK;
/* only 3 perfect match registers left, first one is used for
* own mac address */ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK; else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
/* no need to set perfect match registers if we catch all
* multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) {
enet_writel(priv, val, ENET_RXCFG_REG); return;
}
i = 0;
netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
for (; i < 3; i++) {
enet_writel(priv, 0, ENET_PML_REG(i + 1));
enet_writel(priv, 0, ENET_PMH_REG(i + 1));
}
enet_writel(priv, val, ENET_RXCFG_REG);
}
/* * set mac duplex parameters
*/ staticvoid bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
{
u32 val;
val = enet_readl(priv, ENET_TXCTL_REG); if (fullduplex)
val |= ENET_TXCTL_FD_MASK; else
val &= ~ENET_TXCTL_FD_MASK;
enet_writel(priv, val, ENET_TXCTL_REG);
}
/* * set mac flow control parameters
*/ staticvoid bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
{
u32 val;
/* rx flow control (pause frame handling) */
val = enet_readl(priv, ENET_RXCFG_REG); if (rx_en)
val |= ENET_RXCFG_ENFLOW_MASK; else
val &= ~ENET_RXCFG_ENFLOW_MASK;
enet_writel(priv, val, ENET_RXCFG_REG);
if (!priv->dma_has_sram) return;
/* tx flow control (pause frame generation) */
val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en)
val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); else
val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
enet_dma_writel(priv, val, ENETDMA_CFG_REG);
}
/* reflect duplex change in mac configuration */ if (phydev->link && phydev->duplex != priv->old_duplex) {
bcm_enet_set_duplex(priv,
(phydev->duplex == DUPLEX_FULL) ? 1 : 0);
status_changed = 1;
priv->old_duplex = phydev->duplex;
}
/* enable flow control if remote advertise it (trust phylib to
* check that duplex is full */ if (phydev->link && phydev->pause != priv->old_pause) { int rx_pause_en, tx_pause_en;
if (phydev->pause) { /* pause was advertised by lpa and us */
rx_pause_en = 1;
tx_pause_en = 1;
} elseif (!priv->pause_auto) { /* pause setting overridden by user */
rx_pause_en = priv->pause_rx;
tx_pause_en = priv->pause_tx;
} else {
rx_pause_en = 0;
tx_pause_en = 0;
}
if (IS_ERR(phydev)) {
dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev);
}
/* mask with MAC supported features */
phy_support_sym_pause(phydev);
phy_set_max_speed(phydev, SPEED_100);
phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
priv->pause_auto);
/* init & fill rx ring with buffers */
priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL); if (!priv->rx_buf) {
ret = -ENOMEM; goto out_free_tx_skb;
}
/* set max rx/tx length */
enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
/* set dma maximum burst len */
enet_dmac_writel(priv, priv->dma_maxburst,
ENETDMAC_MAXBURST, priv->rx_chan);
enet_dmac_writel(priv, priv->dma_maxburst,
ENETDMAC_MAXBURST, priv->tx_chan);
/* set correct transmit fifo watermark */
enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
/* set flow control low/high threshold to 1/3 / 2/3 */ if (priv->dma_has_sram) {
val = priv->rx_ring_size / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
val = (priv->rx_ring_size * 2) / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
} else {
enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
}
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG); if (priv->dma_has_sram)
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->rx_chan);
/* watch "mib counters about to overflow" interrupt */
enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
/* watch "packet transferred" interrupt in rx and tx */
enet_dmac_writel(priv, priv->dma_chan_int_mask,
ENETDMAC_IR, priv->rx_chan);
enet_dmac_writel(priv, priv->dma_chan_int_mask,
ENETDMAC_IR, priv->tx_chan);
/* make sure we enable napi before rx interrupt */
napi_enable(&priv->napi);
/* also empty unused mib counters to make sure mib counter
* overflow interrupt is cleared */ for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
}
if (priv->has_phy) { if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { /* asymetric pause mode not supported, * actually possible but integrated PHY has RO
* asym_pause bit */ return -EINVAL;
}
} else { /* no pause autoneg on direct mii connection */ if (ecmd->autoneg) return -EINVAL;
}
/* * adjust mtu, can't be called while device is running
*/ staticint bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{ struct bcm_enet_priv *priv = netdev_priv(dev); int actual_mtu = new_mtu;
/* * setup maximum size before we get overflow mark in * descriptor, note that this will not prevent reception of * big frames, they will be split into multiple buffers * anyway
*/
priv->hw_mtu = actual_mtu;
/* * preinit hardware to allow mii operation while device is down
*/ staticvoid bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
{
u32 val; int limit;
/* make sure mac is disabled */
bcm_enet_disable_mac(priv);
/* soft reset mac */
val = ENET_CTL_SRESET_MASK;
enet_writel(priv, val, ENET_CTL_REG);
wmb();
limit = 1000; do {
val = enet_readl(priv, ENET_CTL_REG); if (!(val & ENET_CTL_SRESET_MASK)) break;
udelay(1);
} while (limit--);
/* select correct mii interface */
val = enet_readl(priv, ENET_CTL_REG); if (priv->use_external_mii)
val |= ENET_CTL_EPHYSEL_MASK; else
val &= ~ENET_CTL_EPHYSEL_MASK;
enet_writel(priv, val, ENET_CTL_REG);
/* set mib counters to self-clear when read */
val = enet_readl(priv, ENET_MIBCTL_REG);
val |= ENET_MIBCTL_RDCLEAR_MASK;
enet_writel(priv, val, ENET_MIBCTL_REG);
}
priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk); goto out;
}
ret = clk_prepare_enable(priv->mac_clk); if (ret) goto out;
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
/* only probe bus where we think the PHY is, because * the mdio read operation return 0 instead of 0xffff
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
if (priv->has_phy_interrupt)
bus->irq[priv->phy_id] = priv->phy_interrupt;
ret = mdiobus_register(bus); if (ret) {
dev_err(&pdev->dev, "unable to register mdio bus\n"); goto out_free_mdio;
}
} else {
/* run platform code to initialize PHY device */ if (pd && pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n"); goto out_uninit_hw;
}
}
/* disable all ports */ for (i = 0; i < priv->num_ports; i++) {
enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
ENETSW_PORTOV_REG(i));
enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
ENETSW_PTCTRL_TXDIS_MASK,
ENETSW_PTCTRL_REG(i));
priv->sw_port_link[i] = 0;
}
/* reset mib */
val = enetsw_readb(priv, ENETSW_GMCR_REG);
val |= ENETSW_GMCR_RST_MIB_MASK;
enetsw_writeb(priv, val, ENETSW_GMCR_REG);
mdelay(1);
val &= ~ENETSW_GMCR_RST_MIB_MASK;
enetsw_writeb(priv, val, ENETSW_GMCR_REG);
mdelay(1);
/* force CPU port state */
val = enetsw_readb(priv, ENETSW_IMPOV_REG);
val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
/* enable switch forward engine */
val = enetsw_readb(priv, ENETSW_SWMODE_REG);
val |= ENETSW_SWMODE_FWD_EN_MASK;
enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
/* enable jumbo on all ports */
enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
/* set dma maximum burst len */
enet_dmac_writel(priv, priv->dma_maxburst,
ENETDMAC_MAXBURST, priv->rx_chan);
enet_dmac_writel(priv, priv->dma_maxburst,
ENETDMAC_MAXBURST, priv->tx_chan);
/* set flow control low/high threshold to 1/3 / 2/3 */
val = priv->rx_ring_size / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
val = (priv->rx_ring_size * 2) / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
/* all set, enable mac and interrupts, start dma engine and * kick rx dma channel
*/
wmb();
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
ENETDMAC_CHANCFG, priv->rx_chan);
/* watch "packet transferred" interrupt in rx and tx */
enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
ENETDMAC_IR, priv->rx_chan);
enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
ENETDMAC_IR, priv->tx_chan);
/* make sure we enable napi before rx interrupt */
napi_enable(&priv->napi);
/* apply override config for bypass_link ports here. */ for (i = 0; i < priv->num_ports; i++) { struct bcm63xx_enetsw_port *port;
u8 override;
port = &priv->used_ports[i]; if (!port->used) continue;
/* reset BQL after forced tx reclaim to prevent kernel panic */
netdev_reset_queue(dev);
return 0;
}
/* try to sort out phy external status by walking the used_port field * in the bcm_enet_priv structure. in case the phy address is not * assigned to any physical port on the switch, assume it is external * (and yell at the user).
*/ staticint bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
{ int i;
for (i = 0; i < priv->num_ports; ++i) { if (!priv->used_ports[i].used) continue; if (priv->used_ports[i].phy_id == phy_id) return bcm_enet_port_is_rgmii(i);
}
printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
phy_id); return 1;
}
/* can't use bcmenet_sw_mdio_read directly as we need to sort out * external/internal status of the given phy_id first.
*/ staticint bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, int location)
{ struct bcm_enet_priv *priv;
/* can't use bcmenet_sw_mdio_write directly as we need to sort out * external/internal status of the given phy_id first.
*/ staticvoid bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, int location, int val)
{ struct bcm_enet_priv *priv;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.