/* Transmit Deinitialization * This routine will free allocations done by mlxbf_gige_tx_init(), * namely the TX WQE array and the TX completion counter
*/ void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
{
u64 *tx_wqe_addr;
size_t size; int i;
tx_wqe_addr = priv->tx_wqe_base;
for (i = 0; i < priv->tx_q_entries; i++) { if (priv->tx_skb[i]) {
dma_unmap_single(priv->dev, *tx_wqe_addr,
priv->tx_skb[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_skb[i]);
priv->tx_skb[i] = NULL;
}
tx_wqe_addr += 2;
}
/* Function that returns status of TX ring: * 0: TX ring is full, i.e. there are no * available un-used entries in TX ring. * non-null: TX ring is not full, i.e. there are * some available entries in TX ring. * The non-null value is a measure of * how many TX entries are available, but * it is not the exact number of available * entries (see below). * * The algorithm makes the assumption that if * (prev_tx_ci == tx_pi) then the TX ring is empty. * An empty ring actually has (tx_q_entries-1) * entries, which allows the algorithm to differentiate * the case of an empty ring vs. a full ring.
*/ static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
{ unsignedlong flags;
u16 avail;
/* Transmit completion logic needs to loop until the completion * index (in SW) equals TX consumer index (from HW). These * parameters are unsigned 16-bit values and the wrap case needs * to be supported, that is TX consumer index wrapped from 0xFFFF * to 0 while TX completion index is still < 0xFFFF.
*/ for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries; /* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX * buffer address and the 8 LSB contain information * about the TX WQE.
*/
tx_wqe_addr = priv->tx_wqe_base +
(tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
/* Ensure completion of updates across all cores */
mb();
}
/* Since the TX ring was likely just drained, check if TX queue * had previously been stopped and now that there are TX buffers * available the TX queue can be awakened.
*/ if (netif_queue_stopped(priv->netdev) &&
mlxbf_gige_tx_buffs_avail(priv))
netif_wake_queue(priv->netdev);
returntrue;
}
/* Function to advance the tx_wqe_next pointer to next TX WQE */ void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
{ /* Advance tx_wqe_next pointer */
priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
/* Check if 'next' pointer is beyond end of TX ring */ /* If so, set 'next' back to 'base' pointer of ring */ if (priv->tx_wqe_next == (priv->tx_wqe_base +
(priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
priv->tx_wqe_next = priv->tx_wqe_base;
}
/* Verify that payload pointer and data length of SKB to be * transmitted does not violate the hardware DMA limitation.
*/ if (start_dma_page != end_dma_page) { /* DMA operation would fail as-is, alloc new aligned SKB */
tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
&tx_buf_dma, DMA_TO_DEVICE); if (!tx_skb) { /* Free original skb, could not alloc new aligned SKB */
dev_kfree_skb(skb);
netdev->stats.tx_dropped++; return NETDEV_TX_OK;
}
skb_put_data(tx_skb, skb->data, skb->len);
/* Free the original SKB */
dev_kfree_skb(skb);
} else {
tx_skb = skb;
tx_buf_dma = dma_map_single(priv->dev, skb->data,
skb->len, DMA_TO_DEVICE); if (dma_mapping_error(priv->dev, tx_buf_dma)) {
dev_kfree_skb(skb);
netdev->stats.tx_dropped++; return NETDEV_TX_OK;
}
}
/* Get address of TX WQE */
tx_wqe_addr = priv->tx_wqe_next;
mlxbf_gige_update_tx_wqe_next(priv);
/* Put PA of buffer address into first 64-bit word of TX WQE */
*tx_wqe_addr = tx_buf_dma;
/* Set TX WQE pkt_len appropriately * NOTE: GigE silicon will automatically pad up to * minimum packet length if needed.
*/
word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
/* Write entire 2nd word of TX WQE */
*(tx_wqe_addr + 1) = word2;
if (!netdev_xmit_more()) { /* Create memory barrier before write to TX PI */
wmb();
writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
}
/* Check if the last TX entry was just used */ if (!mlxbf_gige_tx_buffs_avail(priv)) { /* TX ring is full, inform stack */
netif_stop_queue(netdev);
/* Since there is no separate "TX complete" interrupt, need * to explicitly schedule NAPI poll. This will trigger logic * which processes TX completions, and will hopefully drain * the TX ring allowing the TX queue to be awakened.
*/
napi_schedule(&priv->napi);
}
return NETDEV_TX_OK;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.