/* If batch < pages_per_wqe, either: * 1. Some (or all) descriptors were invalid. * 2. dma_need_sync is true, and it fell back to allocating one frame. * In either case, try to continue allocating frames one by one, until * the first error, which will mean there are no more valid descriptors.
*/ for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); if (unlikely(!xsk_buffs[batch])) goto err_reuse_batch;
}
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{ struct mlx5_wq_cyc *wq = &rq->wqe.wq; int i;
for (i = 0; i < wqe_bulk; i++) { int j = mlx5_wq_cyc_ctr2ix(wq, ix + i); struct mlx5e_wqe_frag_info *frag; struct mlx5e_rx_wqe_cyc *wqe;
dma_addr_t addr;
/* Check packet size. Note LRO doesn't use linear SKB */ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rq->stats->oversize_pkts_sw_drop++; return NULL;
}
/* head_offset is not used in this function, because xdp->data and the * DMA address point directly to the necessary place. Furthermore, in * the current implementation, UMR pages are mapped to XSK frames, so * head_offset should always be 0.
*/
WARN_ON_ONCE(head_offset);
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
/* Possible flows: * - XDP_REDIRECT to XSKMAP: * The page is owned by the userspace from now. * - XDP_TX and other XDP_REDIRECTs: * The page was returned by ZCA and recycled. * - XDP_DROP: * Recycle the page. * - XDP_PASS: * Allocate an SKB, copy the data and recycle the page. * * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its * size is the same as the Driver RX Ring's size, and pages for WQEs are * allocated first from the Reuse Ring, so it has enough space.
*/
prog = rcu_dereference(rq->xdp_prog); if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) { if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */ return NULL; /* page/packet was consumed by XDP */
}
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the * frame. On SKB allocation failure, NULL is returned.
*/ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
/* wi->offset is not used in this function, because xdp->data and the * DMA address point directly to the necessary place. Furthermore, the * XSK allocator allocates frames per packet, instead of pages, so * wi->offset should always be 0.
*/
WARN_ON_ONCE(wi->offset);
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog); if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) { if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); return NULL; /* page/packet was consumed by XDP */
}
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse * will be handled by mlx5e_free_rx_wqe. * On SKB allocation failure, NULL is returned.
*/ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.0 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.