/* * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux. * * Based on skelton.c by Donald Becker. * * This driver is a replacement of older and less maintained version. * This is a header of the older version: * -----<snip>----- * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * Copyright (C) 2000-2001 Toshiba Corporation * static const char *version = * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n"; * -----<snip>----- * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * (C) Copyright TOSHIBA CORPORATION 2004-2005 * All Rights Reserved.
*/
/* Tuning parameters */ #define DMA_BURST_SIZE 32 #define TX_THRESHOLD 1024 /* used threshold with packet max byte for low pci transfer ability.*/ #define TX_THRESHOLD_MAX 1536 /* setting threshold max value when overrun error occurred this count. */ #define TX_THRESHOLD_KEEP_LIMIT 10
/* Information that need to be kept for each controller. */ struct tc35815_local { struct pci_dev *pci_dev;
struct net_device *dev; struct napi_struct napi;
/* statistics */ struct { int max_tx_qlen; int tx_ints; int rx_ints; int tx_underrun;
} lstats;
/* Tx control lock. This protects the transmit buffer ring * state along with the "tx full" state of the driver. This * means all netif_queue flow control actions are protected * by this lock as well.
*/
spinlock_t lock;
spinlock_t rx_lock;
struct mii_bus *mii_bus; int duplex; int speed; int link; struct work_struct restart_work;
/* * TX4939 PCFG.SPEEDn bit will be changed on * NETDEV_CHANGE event.
*/ /* * WORKAROUND: enable LostCrS only if half duplex * operation. * (TX4939 does not have EnLCarr)
*/ if (phydev->duplex == DUPLEX_HALF &&
lp->chiptype != TC35815_TX4939)
tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
&tr->Tx_Ctl);
phydev = phy_find_first(lp->mii_bus); if (!phydev) {
printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV;
}
/* attach the mac to the phy */
phydev = phy_connect(dev, phydev_name(phydev),
&tc_handle_link_change,
lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev);
}
#ifdef CONFIG_CPU_TX49XX /* * Find a platform_device providing a MAC address. The platform code * should provide a "tc35815-mac" device with a MAC address in its * platform_data.
*/ staticint tc35815_mac_match(struct device *dev, constvoid *data)
{ struct platform_device *plat_dev = to_platform_device(dev); conststruct pci_dev *pci_dev = data; unsignedint id = pci_dev->irq; return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
}
/* Buffer List (for Receive) */
lp->fbl_ptr = (struct FrFD *)fd_addr;
lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); /* * move all allocated skbs to head of rx_skbs[] array. * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in * tc35815_rx() had failed.
*/
lp->fbl_count = 0; for (i = 0; i < RX_BUF_NUM; i++) { if (lp->rx_skbs[i].skb) { if (i != lp->fbl_count) {
lp->rx_skbs[lp->fbl_count].skb =
lp->rx_skbs[i].skb;
lp->rx_skbs[lp->fbl_count].skb_dma =
lp->rx_skbs[i].skb_dma;
}
lp->fbl_count++;
}
} for (i = 0; i < RX_BUF_NUM; i++) { if (i >= lp->fbl_count) {
lp->fbl_ptr->bd[i].BuffData = 0;
lp->fbl_ptr->bd[i].BDCtl = 0; continue;
}
lp->fbl_ptr->bd[i].BuffData =
cpu_to_le32(lp->rx_skbs[i].skb_dma); /* BDID is index of FrFD.bd[] */
lp->fbl_ptr->bd[i].BDCtl =
cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
RX_BUF_SIZE);
}
printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
dev->name, tc_readl(&tr->Tx_Stat));
/* Try to restart the adaptor. */
tc35815_schedule_restart(dev);
dev->stats.tx_errors++;
}
/* * Open/initialize the controller. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong.
*/ staticint
tc35815_open(struct net_device *dev)
{ struct tc35815_local *lp = netdev_priv(dev);
/* * This is used if the interrupt line can turned off (shared). * See 3c503.c for an example of selecting the IRQ at config-time.
*/ if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
dev->name, dev)) return -EAGAIN;
tc35815_chip_reset(dev);
if (tc35815_init_queues(dev) != 0) {
free_irq(dev->irq, dev); return -EAGAIN;
}
napi_enable(&lp->napi);
/* Reset the hardware here. Don't forget to set the station address. */
spin_lock_irq(&lp->lock);
tc35815_chip_init(dev);
spin_unlock_irq(&lp->lock);
netif_carrier_off(dev); /* schedule a link state check */
phy_start(dev->phydev);
/* We are now ready to accept transmit requeusts from * the queueing layer of the networking.
*/
netif_start_queue(dev);
return 0;
}
/* This will only be invoked if your driver is _not_ in XOFF state. * What this means is that you need not check it, and that this * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times.
*/ static netdev_tx_t
tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
{ struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; unsignedlong flags;
/* If some error occurs while trying to transmit this * packet, you should return '1' from this function. * In such a case you _may not_ do anything to the * SKB, it is still owned by the network queueing * layer when an error is returned. This means you * may not modify any SKB fields, you may not free * the SKB, etc.
*/
/* This is the most common case for modern hardware. * The spinlock protects this code from the TX complete * hardware interrupt handler. Queue flow control is * thus managed under this lock as well.
*/
spin_lock_irqsave(&lp->lock, flags);
/* failsafe... (handle txdone now if half of FDs are used) */ if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
TX_FD_NUM / 2)
tc35815_txdone(dev);
/* If we just used up the very last entry in the * TX ring on this device, tell the queueing * layer to send no more.
*/ if (tc35815_tx_full(dev)) { if (netif_msg_tx_queued(lp))
printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
netif_stop_queue(dev);
}
/* When the TX completion hw interrupt arrives, this * is when the transmit statistics are updated.
*/
/* We have a good packet(s), get it/them out of the buffers. */ staticint
tc35815_rx(struct net_device *dev, int limit)
{ struct tc35815_local *lp = netdev_priv(dev); unsignedint fdctl; int i; int received = 0;
while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); int pkt_len = fdctl & FD_FDLength_MASK; int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; #ifdef DEBUG struct RxFD *next_rfd; #endif #if (RX_CTL_CMD & Rx_StripCRC) == 0
pkt_len -= ETH_FCS_LEN; #endif
if (netif_msg_rx_status(lp))
dump_rxfd(lp->rfd_cur); if (status & Rx_Good) { struct sk_buff *skb; unsignedchar *data; int cur_bd;
if (--limit < 0) break;
BUG_ON(bd_count > 1);
cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
& BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; #ifdef DEBUG if (cur_bd >= RX_BUF_NUM) {
printk("%s: invalid BDID.\n", dev->name);
panic_queues(dev);
}
BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
(le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); if (!lp->rx_skbs[cur_bd].skb) {
printk("%s: NULL skb.\n", dev->name);
panic_queues(dev);
} #else
BUG_ON(cur_bd >= RX_BUF_NUM); #endif
skb = lp->rx_skbs[cur_bd].skb;
prefetch(skb->data);
lp->rx_skbs[cur_bd].skb = NULL;
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, DMA_FROM_DEVICE); if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
data = skb_put(skb, pkt_len); if (netif_msg_pktdata(lp))
print_eth(data);
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
received++;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
dev->stats.rx_errors++; if (netif_msg_rx_err(lp))
dev_info(&dev->dev, "Rx error (status %x)\n",
status & Rx_Stat_Mask); /* WORKAROUND: LongErr and CRCErr means Overflow. */ if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
status &= ~(Rx_LongErr|Rx_CRCErr);
status |= Rx_Over;
} if (status & Rx_LongErr)
dev->stats.rx_length_errors++; if (status & Rx_Over)
dev->stats.rx_fifo_errors++; if (status & Rx_CRCErr)
dev->stats.rx_crc_errors++; if (status & Rx_Align)
dev->stats.rx_frame_errors++;
}
if (bd_count > 0) { /* put Free Buffer back to controller */ int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); unsignedchar id =
(bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; #ifdef DEBUG if (id >= RX_BUF_NUM) {
printk("%s: invalid BDID.\n", dev->name);
panic_queues(dev);
} #else
BUG_ON(id >= RX_BUF_NUM); #endif /* free old buffers */
lp->fbl_count--; while (lp->fbl_count < RX_BUF_NUM)
{ unsignedchar curid =
(id + 1 + lp->fbl_count) % RX_BUF_NUM; struct BDesc *bd = &lp->fbl_ptr->bd[curid]; #ifdef DEBUG
bdctl = le32_to_cpu(bd->BDCtl); if (bdctl & BD_CownsBD) {
printk("%s: Freeing invalid BD.\n",
dev->name);
panic_queues(dev);
} #endif /* pass BD to controller */ if (!lp->rx_skbs[curid].skb) {
lp->rx_skbs[curid].skb =
alloc_rxbuf_skb(dev,
lp->pci_dev,
&lp->rx_skbs[curid].skb_dma); if (!lp->rx_skbs[curid].skb) break; /* try on next reception */
bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
} /* Note: BDLength was modified by chip. */
bd->BDCtl = cpu_to_le32(BD_CownsBD |
(curid << BD_RxBDID_SHIFT) |
RX_BUF_SIZE);
lp->fbl_count++;
}
}
/* put RxFD back to controller */ #ifdef DEBUG
next_rfd = fd_bus_to_virt(lp,
le32_to_cpu(lp->rfd_cur->fd.FDNext)); if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
printk("%s: RxFD FDNext invalid.\n", dev->name);
panic_queues(dev);
} #endif for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { /* pass FD to controller */ #ifdef DEBUG
lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); #else
lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); #endif
lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
lp->rfd_cur++;
} if (lp->rfd_cur > lp->rfd_limit)
lp->rfd_cur = lp->rfd_base; #ifdef DEBUG if (lp->rfd_cur != next_rfd)
printk("rfd_cur = %p, next_rfd %p\n",
lp->rfd_cur, next_rfd); #endif
}
spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src); do { /* BLEx, FDAEx will be cleared later */
tc_writel(status & ~(Int_BLEx | Int_FDAEx),
&tr->Int_Src); /* write to clear */
handled = tc35815_do_interrupt(dev, status, budget - received); if (status & (Int_BLEx | Int_FDAEx))
tc_writel(status & (Int_BLEx | Int_FDAEx),
&tr->Int_Src); if (handled >= 0) {
received += handled; if (received >= budget) break;
}
status = tc_readl(&tr->Int_Src);
} while (status);
spin_unlock(&lp->rx_lock);
/* count collisions */ if (status & Tx_ExColl)
dev->stats.collisions += 16; if (status & Tx_TxColl_MASK)
dev->stats.collisions += status & Tx_TxColl_MASK;
/* TX4939 does not have NCarr */ if (lp->chiptype == TC35815_TX4939)
status &= ~Tx_NCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ if (!lp->link || lp->duplex == DUPLEX_FULL)
status &= ~Tx_NCarr;
if (!(status & TX_STA_ERR)) { /* no error. */
dev->stats.tx_packets++; return;
}
lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
txfd = &lp->tfd_base[lp->tfd_end]; #ifdef DEBUG if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
printk("%s: TxFD FDNext invalid.\n", dev->name);
panic_queues(dev);
} #endif if (fdnext & FD_Next_EOL) { /* DMA Transmitter has been stopping... */ if (lp->tfd_end != lp->tfd_start) { struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr; int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; struct TxFD *txhead = &lp->tfd_base[head]; int qlen = (lp->tfd_start + TX_FD_NUM
- lp->tfd_end) % TX_FD_NUM;
#ifdef DEBUG if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
printk("%s: TxFD FDCtl invalid.\n", dev->name);
panic_queues(dev);
} #endif /* log max queue length */ if (lp->lstats.max_tx_qlen < qlen)
lp->lstats.max_tx_qlen = qlen;
/* start DMA Transmitter again */
txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); if (netif_msg_tx_queued(lp)) {
printk("%s: start TxFD on queue.\n",
dev->name);
dump_txfd(txfd);
}
tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
} break;
}
}
/* If we had stopped the queue due to a "tx full" * condition, and space has now been made available, * wake up the queue.
*/ if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
netif_wake_queue(dev);
}
/* The inverse routine to tc35815_open(). */ staticint
tc35815_close(struct net_device *dev)
{ struct tc35815_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&lp->napi); if (dev->phydev)
phy_stop(dev->phydev);
cancel_work_sync(&lp->restart_work);
/* Flush the Tx and disable Rx here. */
tc35815_chip_reset(dev);
free_irq(dev->irq, dev);
tc35815_free_queues(dev);
return 0;
}
/* * Get the current statistics. * This may be called with the card open or closed.
*/ staticstruct net_device_stats *tc35815_get_stats(struct net_device *dev)
{ struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr; if (netif_running(dev)) /* Update the statistics from the device registers. */
dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
/* * Set or clear the multicast filter for this adaptor. * num_addrs == -1 Promiscuous mode, receive all packets * num_addrs == 0 Normal mode, clear multicast list * num_addrs > 0 Multicast mode, receive normal and MC packets, * and do best-effort filtering.
*/ staticvoid
tc35815_set_multicast_list(struct net_device *dev)
{ struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
if (dev->flags & IFF_PROMISC) { /* With some (all?) 100MHalf HUB, controller will hang * if we enabled promiscuous mode before linkup...
*/ struct tc35815_local *lp = netdev_priv(dev);
/* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ if (HAVE_DMA_RXALIGN(lp))
tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); else
tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
tc_writel(0, &tr->TxPollCtr); /* Batch mode */
tc_writel(TX_THRESHOLD, &tr->TxThrsh);
tc_writel(INT_EN_CMD, &tr->Int_En);
/* set queues */
tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
tc_writel((unsignedlong)lp->rfd_limit - (unsignedlong)lp->rfd_base,
&tr->FDA_Lim); /* * Activation method: * First, enable the MAC Transmitter and the DMA Receive circuits. * Then enable the DMA Transmitter and the MAC Receive circuits.
*/
tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
/* start MAC transmitter */ /* TX4939 does not have EnLCarr */ if (lp->chiptype == TC35815_TX4939)
txctl &= ~Tx_EnLCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL)
txctl &= ~Tx_EnLCarr;
tc_writel(txctl, &tr->Tx_Ctl);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.