if (inform) {
ether_netdev = card->netdev[GELIC_PORT_ETHERNET_0]; if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP)
netif_carrier_on(ether_netdev); else
netif_carrier_off(ether_netdev);
}
}
/** * gelic_descr_get_status -- returns the status of a descriptor * @descr: descriptor to look at * * returns the status as in the hw_regs.dmac_cmd_status field of the descriptor
*/ staticenum gelic_descr_dma_status
gelic_descr_get_status(struct gelic_descr *descr)
{ return be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
GELIC_DESCR_DMA_STAT_MASK;
}
staticint gelic_card_set_link_mode(struct gelic_card *card, int mode)
{ int status;
u64 v1, v2;
/** * gelic_card_disable_txdmac - disables the transmit DMA controller * @card: card structure * * gelic_card_disable_txdmac terminates processing on the DMA controller by * turing off DMA and issuing a force end
*/ staticvoid gelic_card_disable_txdmac(struct gelic_card *card)
{ int status;
/* this hvc blocks until the DMA in progress really stopped */
status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card)); if (status)
dev_err(ctodev(card), "lv1_net_stop_tx_dma failed, status=%d\n", status);
}
/** * gelic_card_enable_rxdmac - enables the receive DMA controller * @card: card structure * * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN * in the GDADMACCNTR register
*/ staticvoid gelic_card_enable_rxdmac(struct gelic_card *card)
{ int status;
/** * gelic_card_disable_rxdmac - disables the receive DMA controller * @card: card structure * * gelic_card_disable_rxdmac terminates processing on the DMA controller by * turing off DMA and issuing a force end
*/ staticvoid gelic_card_disable_rxdmac(struct gelic_card *card)
{ int status;
/* this hvc blocks until the DMA in progress really stopped */
status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); if (status)
dev_err(ctodev(card), "lv1_net_stop_rx_dma failed, %d\n", status);
}
/** * gelic_descr_set_status -- sets the status of a descriptor * @descr: descriptor to change * @status: status to set in the descriptor * * changes the status to the specified value. Doesn't change other bits * in the status
*/ staticvoid gelic_descr_set_status(struct gelic_descr *descr, enum gelic_descr_dma_status status)
{
descr->hw_regs.dmac_cmd_status = cpu_to_be32(status |
(be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
~GELIC_DESCR_DMA_STAT_MASK)); /* * dma_cmd_status field is used to indicate whether the descriptor * is valid or not. * Usually caller of this function wants to inform that to the * hardware, so we assure here the hardware sees the change.
*/
wmb();
}
/** * gelic_card_reset_chain - reset status of a descriptor chain * @card: card structure * @chain: address of chain * @start_descr: address of descriptor array * * Reset the status of dma descriptors to ready state * and re-initialize the hardware chain for later use
*/ staticvoid gelic_card_reset_chain(struct gelic_card *card, struct gelic_descr_chain *chain, struct gelic_descr *start_descr)
{ struct gelic_descr *descr;
/** * gelic_card_init_chain - links descriptor chain * @card: card structure * @chain: address of chain * @start_descr: address of descriptor array * @no: number of descriptors * * we manage a circular list that mirrors the hardware structure, * except that the hardware uses bus addresses. * * returns 0 on success, <0 on failure
*/ staticint gelic_card_init_chain(struct gelic_card *card, struct gelic_descr_chain *chain, struct gelic_descr *start_descr, int no)
{ int i; struct gelic_descr *descr;
descr->next = descr + 1;
descr->prev = descr - 1;
} /* make them as ring */
(descr - 1)->next = start_descr;
start_descr->prev = (descr - 1);
/* chain bus addr of hw descriptor */
descr = start_descr; for (i = 0; i < no; i++, descr++) {
descr->hw_regs.next_descr_addr =
cpu_to_be32(descr->next->link.cpu_addr);
}
/* do not chain last hw descriptor */
(descr - 1)->hw_regs.next_descr_addr = 0;
return 0;
}
/** * gelic_descr_prepare_rx - reinitializes a rx descriptor * @card: card structure * @descr: descriptor to re-init * * return 0 on success, <0 on failure * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise * * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/ staticint gelic_descr_prepare_rx(struct gelic_card *card, struct gelic_descr *descr)
{ staticconstunsignedint rx_skb_size =
ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
GELIC_NET_RXBUF_ALIGN - 1;
dma_addr_t cpu_addr; int offset;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/** * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains * @card: card structure * * fills all descriptors in the rx chain: allocates skbs * and iommu-maps them. * returns 0 on success, < 0 on failure
*/ staticint gelic_card_fill_rx_chain(struct gelic_card *card)
{ struct gelic_descr *descr = card->rx_chain.head; int ret;
do { if (!descr->skb) {
ret = gelic_descr_prepare_rx(card, descr); if (ret) goto rewind;
}
descr = descr->next;
} while (descr != card->rx_chain.head);
/** * gelic_card_get_next_tx_descr - returns the next available tx descriptor * @card: device structure to get descriptor from * * returns the address of the next descriptor, or NULL if not available.
*/ staticstruct gelic_descr *
gelic_card_get_next_tx_descr(struct gelic_card *card)
{ if (!card->tx_chain.head) return NULL; /* see if the next descriptor is free */ if (card->tx_chain.tail != card->tx_chain.head->next &&
gelic_descr_get_status(card->tx_chain.head) ==
GELIC_DESCR_DMA_NOT_IN_USE) return card->tx_chain.head; else return NULL;
}
/** * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field * @descr: descriptor structure to fill out * @skb: packet to consider * * fills out the command and status field of the descriptor structure, * depending on hardware checksum settings. This function assumes a wmb() * has executed before.
*/ staticvoid gelic_descr_set_tx_cmdstat(struct gelic_descr *descr, struct sk_buff *skb)
{ if (skb->ip_summed != CHECKSUM_PARTIAL)
descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL); else { /* is packet ip?
* if yes: tcp? udp? */ if (skb->protocol == htons(ETH_P_IP)) { if (ip_hdr(skb)->protocol == IPPROTO_TCP)
descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
elseif (ip_hdr(skb)->protocol == IPPROTO_UDP)
descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL); else/* * the stack should checksum non-tcp and non-udp * packets on his own: NETIF_F_IP_CSUM
*/
descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
}
}
}
/** * gelic_net_xmit - transmits a frame over the device * @skb: packet to send out * @netdev: interface device structure * * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
*/
netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{ struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; int result; unsignedlong flags;
spin_lock_irqsave(&card->tx_lock, flags);
gelic_card_release_tx_chain(card, 0);
descr = gelic_card_get_next_tx_descr(card); if (!descr) { /* * no more descriptors free
*/
gelic_card_stop_queues(card);
spin_unlock_irqrestore(&card->tx_lock, flags); return NETDEV_TX_BUSY;
}
result = gelic_descr_prepare_tx(card, descr, skb); if (result) { /* * DMA map failed. As chances are that failure * would continue, just release skb and return
*/
netdev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&card->tx_lock, flags); return NETDEV_TX_OK;
} /* * link this prepared descriptor to previous one * to achieve high performance
*/
descr->prev->hw_regs.next_descr_addr =
cpu_to_be32(descr->link.cpu_addr); /* * as hardware descriptor is modified in the above lines, * ensure that the hardware sees it
*/
wmb(); if (gelic_card_kick_txdma(card, descr)) { /* * kick failed. * release descriptor which was just prepared
*/
netdev->stats.tx_dropped++; /* don't trigger BUG_ON() in gelic_descr_release_tx */
descr->hw_regs.data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
gelic_descr_release_tx(card, descr); /* reset head */
card->tx_chain.head = descr; /* reset hw termination */
descr->prev->hw_regs.next_descr_addr = 0;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
}
/** * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on * @descr: descriptor to process * @card: card structure * @netdev: net_device structure to be passed packet * * iommu-unmaps the skb, fills out skb structure and passes the data to the * stack. The descriptor state is not changed.
*/ staticvoid gelic_net_pass_skb_up(struct gelic_descr *descr, struct gelic_card *card, struct net_device *netdev)
skb_put(skb, be32_to_cpu(descr->hw_regs.valid_size)?
be32_to_cpu(descr->hw_regs.valid_size) :
be32_to_cpu(descr->hw_regs.result_size)); if (!descr->hw_regs.valid_size)
dev_info(ctodev(card), "buffer full %x %x %x\n",
be32_to_cpu(descr->hw_regs.result_size),
be32_to_cpu(descr->hw_regs.payload.size),
be32_to_cpu(descr->hw_regs.dmac_cmd_status));
descr->skb = NULL; /* * the card put 2 bytes vlan tag in front * of the ethernet frame
*/
skb_pull(skb, 2);
skb->protocol = eth_type_trans(skb, netdev);
/* pass skb up to stack */
netif_receive_skb(skb);
}
/** * gelic_card_decode_one_descr - processes an rx descriptor * @card: card structure * * returns 1 if a packet has been sent to the stack, otherwise 0 * * processes an rx descriptor by iommu-unmapping the data buffer and passing * the packet up to the stack
*/ staticint gelic_card_decode_one_descr(struct gelic_card *card)
{ enum gelic_descr_dma_status status; struct gelic_descr_chain *chain = &card->rx_chain; struct gelic_descr *descr = chain->head; struct net_device *netdev = NULL; int dmac_chain_ended;
status = gelic_descr_get_status(descr);
if (status == GELIC_DESCR_DMA_CARDOWNED) return 0;
/* netdevice select */ if (card->vlan_required) { unsignedint i;
u16 vid;
vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK; for (i = 0; i < GELIC_PORT_MAX; i++) { if (card->vlan[i].rx == vid) {
netdev = card->netdev[i]; break;
}
} if (GELIC_PORT_MAX <= i) {
pr_info("%s: unknown packet vid=%x\n", __func__, vid); goto refill;
}
} else
netdev = card->netdev[GELIC_PORT_ETHERNET_0];
if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) ||
(status == GELIC_DESCR_DMA_PROTECTION_ERROR) ||
(status == GELIC_DESCR_DMA_FORCE_END)) {
dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
status);
netdev->stats.rx_dropped++; goto refill;
}
if (status == GELIC_DESCR_DMA_BUFFER_FULL) { /* * Buffer full would occur if and only if * the frame length was longer than the size of this * descriptor's buffer. If the frame length was equal * to or shorter than buffer'size, FRAME_END condition * would occur. * Anyway this frame was longer than the MTU, * just drop it.
*/
dev_info(ctodev(card), "overlength frame\n"); goto refill;
} /* * descriptors any other than FRAME_END here should * be treated as error.
*/ if (status != GELIC_DESCR_DMA_FRAME_END) {
dev_dbg(ctodev(card), "RX descriptor with state %x\n",
status); goto refill;
}
/* ok, we've got a packet in descr */
gelic_net_pass_skb_up(descr, card, netdev);
refill:
/* is the current descriptor terminated with next_descr == NULL? */
dmac_chain_ended =
be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
GELIC_DESCR_RX_DMA_CHAIN_END; /* * So that always DMAC can see the end * of the descriptor chain to avoid * from unwanted DMAC overrun.
*/
descr->hw_regs.next_descr_addr = 0;
/* change the descriptor state: */
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
/* * this call can fail, but for now, just leave this * descriptor without skb
*/
gelic_descr_prepare_rx(card, descr);
chain->tail = descr;
chain->head = descr->next;
/* * Set this descriptor the end of the chain.
*/
descr->prev->hw_regs.next_descr_addr =
cpu_to_be32(descr->link.cpu_addr);
/* * If dmac chain was met, DMAC stopped. * thus re-enable it
*/
if (dmac_chain_ended)
gelic_card_enable_rxdmac(card);
return 1;
}
/** * gelic_net_poll - NAPI poll function called by the stack to return packets * @napi: napi structure * @budget: number of packets we can pass to the stack at most * * returns the number of the processed packets *
*/ staticint gelic_net_poll(struct napi_struct *napi, int budget)
{ struct gelic_card *card = container_of(napi, struct gelic_card, napi); int packets_done = 0;
while (packets_done < budget) { if (!gelic_card_decode_one_descr(card)) break;
/** * gelic_net_open - called upon ifconfig up * @netdev: interface device structure * * returns 0 on success, <0 on failure * * gelic_net_open allocates all the descriptors and memory needed for * operation, sets up multicast list and enables interrupts
*/ int gelic_net_open(struct net_device *netdev)
{ struct gelic_card *card = netdev_card(netdev);
/** * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout * function (to be called not under interrupt status) * @work: work is context of tx timout task * * called as task when tx hangs, resets interface (if interface is up)
*/ staticvoid gelic_net_tx_timeout_task(struct work_struct *work)
{ struct gelic_card *card =
container_of(work, struct gelic_card, tx_timeout_task); struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
if (card->vlan_required) {
netdev->hard_header_len += VLAN_HLEN; /* * As vlan is internally used, * we can not receive vlan packets
*/
netdev->features |= NETIF_F_VLAN_CHALLENGED;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.