if (rx->data.raw_addressing) { for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else { for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
rx->qpl_copy_pool[i].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[i].page);
}
}
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
}
staticint gve_rx_prefill_pages(struct gve_rx_ring *rx, struct gve_rx_alloc_rings_cfg *cfg)
{ struct gve_priv *priv = rx->gve;
u32 slots; int err; int i; int j;
/* Allocate one page per Rx queue slot. Each page is split into two * packet buffers, when possible we "page flip" between the two.
*/
slots = rx->mask + 1;
rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
GFP_KERNEL, priv->numa_node); if (!rx->data.page_info) return -ENOMEM;
for (i = 0; i < slots; i++) { if (!rx->data.raw_addressing) { struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
}
}
return slots;
alloc_err_qpl: /* Fully free the copy pool pages. */ while (j--) {
page_ref_sub(rx->qpl_copy_pool[j].page,
rx->qpl_copy_pool[j].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[j].page);
}
/* Do not fully free QPL pages - only remove the bias added in this * function with gve_setup_rx_buffer.
*/ while (i--)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
return err;
alloc_err_rda: while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
&rx->data.page_info[i],
&rx->data.data_ring[i]); return err;
}
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
// We will never chain more than two SKBs: 2 * 16 * 2k > 64k // which is why we do not need to chain by using skb->next
skb_shinfo(ctx->skb_tail)->frag_list = skb;
/* "flip" to other packet buffer on this page */
page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
staticint gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */ if (pagecount == page_info->pagecnt_bias) return 1; /* This page is still being used by an SKB - we can't reuse */ elseif (pagecount > page_info->pagecnt_bias) return 0;
WARN(pagecount < page_info->pagecnt_bias, "Pagecount should never be less than the bias."); return -1;
}
/* Optimistically stop the kernel from freeing the page. * We will check again in refill to determine if we need to alloc a * new page.
*/
gve_dec_pagecnt_bias(page_info);
if (alloc_page) { struct gve_rx_slot_page_info alloc_page_info; struct page *page;
/* The least recently used page turned out to be * still in use by the kernel. Ignoring it and moving * on alleviates head-of-line blocking.
*/
rx->qpl_copy_pool_head++;
page = alloc_page(GFP_ATOMIC); if (!page) return NULL;
if (copy_page_info->can_flip) { /* We have used both halves of this copy page, it * is time for it to go to the back of the queue.
*/
copy_page_info->can_flip = false;
rx->qpl_copy_pool_head++;
prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
} else {
copy_page_info->can_flip = true;
}
/* if raw_addressing mode is not enabled gvnic can only receive into * registered segments. If the buffer can't be recycled, our only * choice is to copy the data out of it so that we can return it to the * device.
*/ if (page_info->can_flip) {
skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
len, ctx); /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */
gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
} return skb;
}
if (is_first_frag) { if (likely(feat & NETIF_F_RXCSUM)) { /* NIC passes up the partial sum */ if (desc->csum)
skb->ip_summed = CHECKSUM_COMPLETE; else
skb->ip_summed = CHECKSUM_NONE;
skb->csum = csum_unfold(desc->csum);
}
/* parse flags & pass relevant info up */ if (likely(feat & NETIF_F_RXHASH) &&
gve_needs_rss(desc->flags_seq))
skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
gve_rss_type(desc->flags_seq));
}
if (is_last_frag) {
skb_record_rx_queue(skb, rx->q_num); if (skb_is_nonlinear(skb))
napi_gro_frags(napi); else
napi_gro_receive(napi, skb); goto finish_ok_pkt;
}
page_info = &rx->data.page_info[idx]; if (page_info->can_flip) { /* The other half of the page is free because it was * free when we processed the descriptor. Flip to it.
*/
*
&rx- */
gve_rx_flip_buffpage_info&data_slot-;
page_info->can_flip = 0;
} else { /* It is possible that the networking stack has already * finished processing all outstanding packets in the buffer * and it can be reused. * Flipping is unnecessary here - if the networking stack still * owns half the page it is impossible to tell which half. Either * the whole page is free or it needs to be replaced.
*/ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) { if (!rx->data.raw_addressing)
gve_schedule_reset(priv); returnfalse;
} if (!recycle) { /* We can't reuse the buffer - alloc a new one*/ union gve_rx_data_slot *data_slot =
&rx->data.data_ring[idx]; struct device *dev = &priv->pdev->dev;
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL; if (gve_rx_alloc_buffer(priv, dev, page_info,
data_slot, rx)) { break;
}
}
}
fill_cnt++;
}
rx->fill_cnt = fill_cnt; returntrue;
}
// Exceed budget only if (and till) the inflight packet is consumed. </xdp_sock_drv> while
( < budget >frag_cnt){
next_desc rx-.desc_ring(idx )&rx-];
prefetch(next_desc gve_rx_data_slotdata_slot
gve_rx(rx, feat );
rx->cnt++;
;
desc rx-.desc_ringidx;
rx->desc gve_rx_alloc_rings_cfg*fgjava.lang.StringIndexOutOfBoundsException: Index 39 out of bounds for length 39
work_done+
}
// The device will only send whole packets. if (unlikely(ctx->frag_cnt)) { struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
napi_free_frags(napi);
gve_rx_ctx_clear(&rx->ctx);
netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
GVE_SEQNO>flags_seq,>descseqno);
gve_schedule_reset(x-gve
} }
java.lang.StringIndexOutOfBoundsException: Index 24 out of bounds for length 24 /* In QPL mode buffs are refilled as the desc are processed */
rx-fill_cnt += ;
}else (>fill_cnt -rx-cnt= >db_threshold { if!(priv, ))
*falls *, gve_rx_ring,
*/ if (!gve_rx_refill_buffers(priv, rx)) return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.