/** * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer * @timer: timer to initialize * @xdpsq: queue this timer belongs to * @lock: corresponding XDPSQ lock * @poll: queue polling/completion function * * XDPSQ clean-up timers must be set up before using at the queue configuration * time. Set the required pointers and the cleaning callback.
*/ void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq, struct libeth_xdpsq_lock *lock, void (*poll)(struct work_struct *work))
{
timer->xdpsq = xdpsq;
timer->lock = lock;
/** * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames * @bq: XDP Tx frame bulk * @sent: number of frames sent successfully (from this bulk) * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.) * * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly. * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust * the Tx bulk to try again later.
*/ void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
u32 flags)
{ conststruct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
u32 left = bq->count - sent;
if (!(flags & LIBETH_XDP_TX_NDO))
libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
if (!(flags & LIBETH_XDP_TX_DROP)) {
memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
bq->count = left;
/** * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash * @dst: target &libeth_xdp_buff to initialize * @src: source stash * * External helper used by libeth_xdp_init_buff(), do not call directly. * Recreate an onstack &libeth_xdp_buff using the stash saved earlier. * The only field untouched (rxq) is initialized later in the * abovementioned function.
*/ void libeth_xdp_load_stash(struct libeth_xdp_buff *dst, conststruct libeth_xdp_buff_stash *src)
{
dst->data = src->data;
dst->base.data_end = src->data + src->len;
dst->base.data_meta = src->data;
dst->base.data_hard_start = src->data - src->headroom;
/** * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash * @dst: target &libeth_xdp_buff_stash to initialize * @src: source XDP buffer * * External helper used by libeth_xdp_save_buff(), do not call directly. * Use the fields from the passed XDP buffer to initialize the stash on the * queue, so that a partially received frame can be finished later during * the next NAPI poll.
*/ void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst, conststruct libeth_xdp_buff *src)
{
dst->data = src->data;
dst->headroom = src->data - src->base.data_hard_start;
dst->len = src->base.data_end - src->data;
/** * libeth_xdp_return_buff_slow - free &libeth_xdp_buff * @xdp: buffer to free/return * * Slowpath version of libeth_xdp_return_buff() to be called on exceptions, * queue clean-ups etc., without unwanted inlining.
*/ void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
{
__libeth_xdp_return_buff(xdp, false);
}
EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
/** * libeth_xdp_buff_add_frag - add frag to XDP buffer * @xdp: head XDP buffer * @fqe: Rx buffer containing the frag * @len: frag length reported by HW * * External helper used by libeth_xdp_process_buff(), do not call directly. * Frees both head and frag buffers on error. * * Return: true success, false on error (no space for a new frag).
*/ bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp, conststruct libeth_fqe *fqe,
u32 len)
{
netmem_ref netmem = fqe->netmem;
if (!xdp_buff_add_frag(&xdp->base, netmem,
fqe->offset + netmem_get_pp(netmem)->p.offset,
len, fqe->truesize)) goto recycle;
/** * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk * @sinfo: shared info corresponding to the buffer * @bq: XDP frame bulk to store the buffer * @frags: whether the buffer has frags * * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx * completion of ``XDP_TX`` buffers and allows to free them in same bulks * with &xdp_frame buffers.
*/ void libeth_xdp_return_buff_bulk(conststruct skb_shared_info *sinfo, struct xdp_frame_bulk *bq, bool frags)
{ if (!frags) goto head;
for (u32 i = 0; i < sinfo->nr_frags; i++)
libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
bq);
/** * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold * @count: number of descriptors in the queue * * The threshold is the limit at which RQs start to refill (when the number of * empty buffers exceeds it) and SQs get cleaned up (when the number of free * descriptors goes below it). To speed up hotpath processing, threshold is * always pow-2, closest to 1/4 of the queue length. * Don't call it on hotpath, calculate and cache the threshold during the * queue initialization. * * Return: the calculated threshold.
*/
u32 libeth_xdp_queue_threshold(u32 count)
{
u32 quarter, low, high;
if (likely(is_power_of_2(count))) return count >> 2;
quarter = DIV_ROUND_CLOSEST(count, 4);
low = rounddown_pow_of_two(quarter);
high = roundup_pow_of_two(quarter);
return high - quarter <= quarter - low ? high : low;
}
EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
/** * __libeth_xdp_set_features - set XDP features for netdev * @dev: &net_device to configure * @xmo: XDP metadata ops (Rx hints) * @zc_segs: maximum number of S/G frags the HW can transmit * @tmo: XSk Tx metadata ops (Tx hints) * * Set all the features libeth_xdp supports. Only the first argument is * necessary; without the third one (zero), XSk support won't be advertised. * Use the non-underscored versions in drivers instead.
*/ void __libeth_xdp_set_features(struct net_device *dev, conststruct xdp_metadata_ops *xmo,
u32 zc_segs, conststruct xsk_tx_metadata_ops *tmo)
{
xdp_set_features_flag(dev,
NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT |
(zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT_SG);
dev->xdp_metadata_ops = xmo;
/** * libeth_xdp_set_redirect - toggle the XDP redirect feature * @dev: &net_device to configure * @enable: whether XDP is enabled * * Use this when XDPSQs are not always available to dynamically enable * and disable redirect feature.
*/ void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
{ if (enable)
xdp_features_set_redirect_target(dev, true); else
xdp_features_clear_redirect_target(dev);
}
EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.