staticvoid efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{ /* We need to consider all queues that the net core sees as one */ struct efx_nic *efx = txq1->efx; struct efx_tx_queue *txq2; unsignedint fill_level;
fill_level = efx_channel_tx_old_fill_level(txq1->channel); if (likely(fill_level < efx->txq_stop_thresh)) return;
/* We used the stale old_read_count above, which gives us a * pessimistic estimate of the fill level (which may even * validly be >= efx->txq_entries). Now try again using * read_count (more likely to be a cache miss). * * If we read read_count and then conditionally stop the * queue, it is possible for the completion path to race with * us and complete all outstanding descriptors in the middle, * after which there will be no more completions to wake it. * Therefore we stop the queue first, then read read_count * (with a memory barrier to ensure the ordering), then * restart the queue if the fill level turns out to be low * enough.
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
efx_for_each_channel_tx_queue(txq2, txq1->channel)
txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb(); if (likely(!efx->loopback_selftest))
netif_tx_start_queue(txq1->core_txq);
}
}
struct efx_short_copy_buffer { int used;
u8 buf[L1_CACHE_BYTES];
};
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. * Advances piobuf pointer. Leaves additional data in the copy buffer.
*/ staticvoid efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
u8 *data, int len, struct efx_short_copy_buffer *copy_buf)
{ int block_len = len & ~(sizeof(copy_buf->buf) - 1);
if (len) {
data += block_len;
BUG_ON(copy_buf->used);
BUG_ON(len > sizeof(copy_buf->buf));
memcpy(copy_buf->buf, data, len);
copy_buf->used = len;
}
}
/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. * Advances piobuf pointer. Leaves additional data in the copy buffer.
*/ staticvoid efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
u8 *data, int len, struct efx_short_copy_buffer *copy_buf)
{ if (copy_buf->used) { /* if the copy buffer is partially full, fill it up and write */ int copy_to_buf =
min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
/* Copy to PIO buffer. Ensure the writes are padded to the end * of a cache line, as this is required for write-combining to be * effective on at least x86.
*/
if (skb_shinfo(skb)->nr_frags) { /* The size of the copy buffer will ensure all writes * are the size of a cache line.
*/ struct efx_short_copy_buffer copy_buf;
/* Decide whether we can use TX PIO, ie. write packet data directly into * a buffer on the device. This can reduce latency at the expense of * throughput, so we only do this if both hardware and software TX rings * are empty, including all queues for the channel. This also ensures that * only one packet at a time can be using the PIO buffer. If the xmit_more * flag is set then we don't use this - there'll be another packet along * shortly and we want to hold off the doorbell.
*/ staticbool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
{ struct efx_channel *channel = tx_queue->channel;
efx_for_each_channel_tx_queue(tx_queue, channel) if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) returnfalse;
returntrue;
} #endif/* EFX_USE_PIO */
/* Send any pending traffic for a channel. xmit_more is shared across all * queues for a channel, so we must check all of them.
*/ staticvoid efx_tx_send_pending(struct efx_channel *channel)
{ struct efx_tx_queue *q;
efx_for_each_channel_tx_queue(q, channel) { if (q->xmit_pending)
efx_nic_push_buffers(q);
}
}
/* * Add a socket buffer to a TX queue * * This maps all fragments of a socket buffer for DMA and adds them to * the TX queue. The queue's insert pointer will be incremented by * the number of fragments in the socket buffer. * * If any DMA mapping fails, any mapped fragments will be unmapped, * the queue's insert pointer will be restored to its original value. * * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * * Returns NETDEV_TX_OK. * You must hold netif_tx_lock() to call this function.
*/
netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{ unsignedint old_insert_count = tx_queue->insert_count; bool xmit_more = netdev_xmit_more(); bool data_mapped = false; unsignedint segments; unsignedint skb_len; int rc;
skb_len = skb->len;
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; if (segments == 1)
segments = 0; /* Don't use TSO for a single segment. */
/* Handle TSO first - it's *possible* (although unlikely) that we might * be passed a packet to segment that's smaller than the copybreak/PIO * size limit.
*/ if (segments) { switch (tx_queue->tso_version) { case 1:
rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped); break; case 2:
rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped); break; case 0: /* No TSO on this queue, SW fallback needed */ default:
rc = -EINVAL; break;
} if (rc == -EINVAL) {
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++; if (rc == 0) return 0;
} if (rc) goto err; #ifdef EFX_USE_PIO
} elseif (skb_len <= efx_piobuf_size && !xmit_more &&
efx_tx_may_pio(tx_queue)) { /* Use PIO for short packets with an empty queue. */ if (efx_enqueue_skb_pio(tx_queue, skb)) goto err;
tx_queue->pio_packets++;
data_mapped = true; #endif
} elseif (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { /* Pad short packets or coalesce short fragmented packets. */ if (efx_enqueue_skb_copy(tx_queue, skb)) goto err;
tx_queue->cb_packets++;
data_mapped = true;
}
/* Map for DMA and create descriptors if we haven't done so already. */ if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) goto err;
efx_tx_maybe_stop_queue(tx_queue);
tx_queue->xmit_pending = true;
/* Pass off to hardware */ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
efx_tx_send_pending(tx_queue->channel);
/* If we're not expecting another transmit and we had something to push * on this queue or a partner queue then we need to push here to get the * previous packets out.
*/ if (!xmit_more)
efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
/* Transmit a packet from an XDP buffer * * Returns number of packets sent on success, error code otherwise. * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC * (for XDP redirect).
*/ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, bool flush)
{ struct efx_tx_buffer *tx_buffer; struct efx_tx_queue *tx_queue; struct xdp_frame *xdpf;
dma_addr_t dma_addr; unsignedint len; int space; int cpu; int i = 0;
if (unlikely(n && !xdpfs)) return -EINVAL; if (unlikely(!n)) return 0;
cpu = raw_smp_processor_id(); if (unlikely(cpu >= efx->xdp_tx_queue_count)) return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu]; if (unlikely(!tx_queue)) return -EINVAL;
if (!tx_queue->initialised) return -EINVAL;
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
/* If we're borrowing net stack queues we have to handle stop-restart * or we might block the queue and it will be considered as frozen
*/ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { if (netif_tx_queue_stopped(tx_queue->core_txq)) goto unlock;
efx_tx_maybe_stop_queue(tx_queue);
}
/* Check for available space. We should never need multiple * descriptors per frame.
*/
space = efx->txq_entries +
tx_queue->read_count - tx_queue->insert_count;
for (i = 0; i < n; i++) {
xdpf = xdpfs[i];
if (i >= space) break;
/* We'll want a descriptor for this tx. */
prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
len = xdpf->len;
/* Map for DMA. */
dma_addr = dma_map_single(&efx->pci_dev->dev,
xdpf->data, len,
DMA_TO_DEVICE); if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) break;
/* Create descriptor and set up for unmapping DMA. */
tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
tx_buffer->xdpf = xdpf;
tx_buffer->flags = EFX_TX_BUF_XDP |
EFX_TX_BUF_MAP_SINGLE;
tx_buffer->dma_offset = 0;
tx_buffer->unmap_len = len;
tx_queue->tx_packets++;
}
/* Pass mapped frames to hardware. */ if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
unlock: if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
return i == 0 ? -EIO : i;
}
/* Initiate a packet transmission. We use one channel per CPU * (sharing when we have more CPUs than channels). * * Context: non-blocking. * Should always return NETDEV_TX_OK and consume the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{ struct efx_nic *efx = efx_netdev_priv(net_dev); struct efx_tx_queue *tx_queue; unsigned index, type;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = efx_tx_csum_type_skb(skb);
/* PTP "event" packet */ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { /* There may be existing transmits on the channel that are * waiting for this packet to trigger the doorbell write. * We need to send the packets at this point.
*/
efx_tx_send_pending(efx_get_tx_channel(efx, index)); return efx_ptp_tx(efx, skb);
}
tx_queue = efx_get_tx_queue(efx, index, type); if (WARN_ON_ONCE(!tx_queue)) { /* We don't have a TXQ of the right type. * This should never happen, as we don't advertise offload * features unless we can support them.
*/
dev_kfree_skb_any(skb); /* If we're not expecting another transmit and we had something to push * on this queue or a partner queue then we need to push here to get the * previous packets out.
*/ if (!netdev_xmit_more())
efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK;
}
/* Need to check the flag before dequeueing. */ if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
&efv_pkts_compl, &xdp_pkts_compl,
&xdp_bytes_compl);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.