// SPDX-License-Identifier: GPL-2.0-only /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2018 Solarflare Communications Inc. * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference.
*/
int ef100_tx_probe(struct efx_tx_queue *tx_queue)
{ /* Allocate an extra descriptor for the QMDA status completion entry */ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd,
(tx_queue->ptr_mask + 2) * sizeof(efx_oword_t),
GFP_KERNEL);
}
void ef100_tx_init(struct efx_tx_queue *tx_queue)
{ /* must be the inverse of lookup in efx_get_tx_channel */
tx_queue->core_txq =
netdev_get_tx_queue(tx_queue->efx->net_dev,
tx_queue->channel->channel -
tx_queue->efx->tx_channel_offset);
/* This value is purely documentational; as EF100 never passes through * the switch statement in tx.c:__efx_enqueue_skb(), that switch does * not handle case 3. EF100's TSOv3 descriptors are generated by * ef100_make_tso_desc(). * Meanwhile, all efx_mcdi_tx_init() cares about is that it's not 2.
*/
tx_queue->tso_version = 3; if (efx_mcdi_tx_init(tx_queue))
netdev_WARN(tx_queue->efx->net_dev, "failed to initialise TXQ %d\n", tx_queue->queue);
}
if (!skb_is_gso_tcp(skb)) returnfalse; if (!(efx->net_dev->features & NETIF_F_TSO)) returnfalse;
mss = skb_shinfo(skb)->gso_size; if (unlikely(mss < 4)) {
WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); returnfalse;
}
header_len = efx_tx_tso_header_length(skb); if (header_len > nic_data->tso_max_hdr_len) returnfalse;
if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { /* net_dev->gso_max_segs should've caught this */
WARN_ON_ONCE(1); returnfalse;
}
if (skb->data_len / mss > nic_data->tso_max_frames) returnfalse;
/* net_dev->gso_max_size should've caught this */ if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len)) returnfalse;
/* Reserve an empty buffer for the TSO V3 descriptor. * Convey the length of the header since we already know it.
*/
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT;
buffer->len = header_len;
buffer->unmap_len = 0;
buffer->skb = skb;
++tx_queue->insert_count; returntrue;
}
if (!skb || skb->ip_summed != CHECKSUM_PARTIAL) return;
/* skb->csum_start has the offset from head, but we need the offset * from data.
*/
csum_start = skb_checksum_start_offset(skb);
EFX_POPULATE_OWORD_3(csum,
ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1,
ESF_GZ_TX_SEND_CSO_PARTIAL_START_W,
csum_start >> 1,
ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W,
skb->csum_offset >> 1);
EFX_OR_OWORD(*txd, *txd, csum);
}
/* Create TX descriptor ring entry */
tx_queue->packet_write_count = new_write_count;
switch (next_desc_type) { case ESE_GZ_TX_DESC_TYPE_SEND:
ef100_make_send_desc(tx_queue->efx, skb,
buffer, txd, nr_descs); break; case ESE_GZ_TX_DESC_TYPE_TSO: /* TX TSO descriptor */
WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3));
ef100_make_tso_desc(tx_queue->efx, skb,
buffer, txd, nr_descs); break; default: /* TX segment descriptor */
EFX_POPULATE_OWORD_3(*txd,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG,
ESF_GZ_TX_SEG_LEN, buffer->len,
ESF_GZ_TX_SEG_ADDR, buffer->dma_addr);
} /* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND; /* mark as an EFV buffer if applicable */ if (unlikely(efv))
buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
tx_queue->write_count = new_write_count;
/* The write_count above must be updated before reading * channel->holdoff_doorbell to avoid a race with the * completion path, so ensure these operations are not * re-ordered. This also flushes the update of write_count * back into the cache.
*/
smp_mb();
}
/* Add a socket buffer to a TX queue * * You must hold netif_tx_lock() to call this function. * * Returns 0 on success, error code otherwise. In case of an error this * function will free the SKB.
*/
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{ return __ef100_enqueue_skb(tx_queue, skb, NULL);
}
if (!tx_queue->buffer || !tx_queue->ptr_mask) {
netif_stop_queue(efx->net_dev);
dev_kfree_skb_any(skb); return -ENODEV;
}
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; if (segments == 1)
segments = 0; /* Don't use TSO/GSO for a single segment. */ if (segments && !ef100_tx_can_tso(tx_queue, skb)) {
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++; if (rc) goto err; else return 0;
}
if (unlikely(efv)) { struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
/* Drop representor packets if the queue is stopped. * We currently don't assert backoff to representors so this is * to make sure representor traffic can't starve the main * net device. * And, of course, if there are no TX descriptors left.
*/ if (netif_tx_queue_stopped(tx_queue->core_txq) ||
unlikely(efx_tx_buffer_in_use(buffer))) {
atomic64_inc(&efv->stats.tx_errors);
rc = -ENOSPC; goto err;
}
/* Also drop representor traffic if it could cause us to * stop the queue. If we assert backoff and we haven't * received traffic on the main net device recently then the * TX watchdog can go off erroneously.
*/
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
fill_level += efx_tx_max_skb_descs(efx); if (fill_level > efx->txq_stop_thresh) { struct efx_tx_queue *txq2;
/* Refresh cached fill level and re-check */
efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
txq2->old_read_count = READ_ONCE(txq2->read_count);
/* Because of checks above, representor traffic should * not be able to stop the queue.
*/
WARN_ON(efv);
netif_tx_stop_queue(tx_queue->core_txq); /* Re-read after a memory barrier in case we've raced with * the completion path. Otherwise there's a danger we'll never * restart the queue if all completions have just happened.
*/
smp_mb();
efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); if (fill_level < efx->txq_stop_thresh)
netif_tx_start_queue(tx_queue->core_txq);
}
tx_queue->xmit_pending = true;
/* If xmit_more then we don't need to push the doorbell, unless there * are 256 descriptors already queued in which case we have to push to * ensure we never push more than 256 at once. * * Always push for representor traffic, and don't account it to parent * PF netdevice's BQL.
*/ if (unlikely(efv) ||
__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
err:
efx_enqueue_unwind(tx_queue, old_insert_count); if (!IS_ERR_OR_NULL(skb))
dev_kfree_skb_any(skb);
/* If we're not expecting another transmit and we had something to push * on this queue then we need to push here to get the previous packets * out. We only enter this branch from before the xmit_more handling * above, so xmit_pending still refers to the old state.
*/ if (tx_queue->xmit_pending && !xmit_more)
ef100_tx_push_buffers(tx_queue); return rc;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.