/********************************************************************** * Author: Cavium, Inc. * * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include"liquidio_common.h" #include"octeon_droq.h" #include"octeon_iq.h" #include"response_manager.h" #include"octeon_device.h" #include"octeon_main.h" #include"octeon_network.h" #include"cn66xx_regs.h" #include"cn66xx_device.h" #include"cn23xx_pf_device.h" #include"cn23xx_vf_device.h"
/** Get the argument that the user set when registering dispatch * function for a given opcode/subcode. * @param octeon_dev - the octeon device pointer. * @param opcode - the opcode for which the dispatch argument * is to be checked. * @param subcode - the subcode for which the dispatch argument * is to be checked. * @return Success: void * (argument to the dispatch function) * @return Failure: NULL *
*/ void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
u16 opcode, u16 subcode)
{ int idx; struct list_head *dispatch; void *fn_arg = NULL;
u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
idx = combined_opcode & OCTEON_OPCODE_MASK;
spin_lock_bh(&octeon_dev->dispatch.lock);
if (octeon_dev->dispatch.count == 0) {
spin_unlock_bh(&octeon_dev->dispatch.lock); return NULL;
}
/** Check for packets on Droq. This function should be called with lock held. * @param droq - Droq on which count is checked. * @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
u32 last_count;
/* max_empty_descs is the max. no. of descs that can have no buffers. * If the empty desc count goes beyond this value, we cannot safely * read in a 64K packet sent by Octeon * (64K is max pkt size from Octeon)
*/
droq->max_empty_descs = 0;
do {
droq->max_empty_descs++;
count += droq->buffer_size;
} while (count < (64 * 1024));
/* octeon_create_recv_info * Parameters: * octeon_dev - pointer to the octeon device structure * droq - droq in which the packet arrived. * buf_cnt - no. of buffers used by the packet. * idx - index in the descriptor for the first buffer in the packet. * Description: * Allocates a recv_info_t and copies the buffer addresses for packet data * into the recv_pkt space which starts at an 8B offset from recv_info_t. * Flags the descriptors for refill later. If available descriptors go * below the threshold to receive a 64K pkt, new buffers are first allocated * before the recv_pkt_t is created. * This routine will be called in interrupt context. * Returns: * Success: Pointer to recv_info_t * Failure: NULL.
*/ staticinlinestruct octeon_recv_info *octeon_create_recv_info( struct octeon_device *octeon_dev, struct octeon_droq *droq,
u32 buf_cnt,
u32 idx)
{ struct octeon_droq_info *info; struct octeon_recv_pkt *recv_pkt; struct octeon_recv_info *recv_info;
u32 i, bytes_left; struct octeon_skb_page_info *pg_info;
info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); if (!recv_info) return NULL;
/* If we were not able to refill all buffers, try to move around * the buffers that were not dispatched.
*/ staticinline u32
octeon_droq_refill_pullup_descs(struct octeon_droq *droq, struct octeon_droq_desc *desc_ring)
{
u32 desc_refilled = 0;
u32 refill_index = droq->refill_idx;
while (refill_index != droq->read_idx) { if (droq->recv_buf_list[refill_index].buffer) {
droq->recv_buf_list[droq->refill_idx].buffer =
droq->recv_buf_list[refill_index].buffer;
droq->recv_buf_list[droq->refill_idx].data =
droq->recv_buf_list[refill_index].data;
desc_ring[droq->refill_idx].buffer_ptr =
desc_ring[refill_index].buffer_ptr;
droq->recv_buf_list[refill_index].buffer = NULL;
desc_ring[refill_index].buffer_ptr = 0; do {
droq->refill_idx = incr_index(droq->refill_idx,
1,
droq->max_count);
desc_refilled++;
droq->refill_count--;
} while (droq->recv_buf_list[droq->refill_idx].buffer);
}
refill_index = incr_index(refill_index, 1, droq->max_count);
} /* while */ return desc_refilled;
}
/* octeon_droq_refill * Parameters: * droq - droq in which descriptors require new buffers. * Description: * Called during normal DROQ processing in interrupt mode or by the poll * thread to refill the descriptors from which buffers were dispatched * to upper layers. Attempts to allocate new buffers. If that fails, moves * up buffers (that were not dispatched) to form a contiguous ring. * Returns: * No of descriptors refilled.
*/ static u32
octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
{ struct octeon_droq_desc *desc_ring; void *buf = NULL;
u8 *data;
u32 desc_refilled = 0; struct octeon_skb_page_info *pg_info;
desc_ring = droq->desc_ring;
while (droq->refill_count && (desc_refilled < droq->max_count)) { /* If a valid buffer exists (happens if there is no dispatch), * reuse the buffer, else allocate.
*/ if (!droq->recv_buf_list[droq->refill_idx].buffer) {
pg_info =
&droq->recv_buf_list[droq->refill_idx].pg_info; /* Either recycle the existing pages or go for * new page alloc
*/ if (pg_info->page)
buf = recv_buffer_reuse(octeon_dev, pg_info); else
buf = recv_buffer_alloc(octeon_dev, pg_info); /* If a buffer could not be allocated, no point in * continuing
*/ if (!buf) {
droq->stats.rx_alloc_failure++; break;
}
droq->recv_buf_list[droq->refill_idx].buffer =
buf;
data = get_rbd(buf);
} else {
data = get_rbd(droq->recv_buf_list
[droq->refill_idx].buffer);
}
if (droq->refill_count)
desc_refilled +=
octeon_droq_refill_pullup_descs(droq, desc_ring);
/* if droq->refill_count * The refill count would not change in pass two. We only moved buffers * to close the gap in the ring, but we would still have the same no. of * buffers to refill.
*/ return desc_refilled;
}
/** check if we can allocate packets to get out of oom. * @param droq - Droq being checked. * @return 1 if fails to refill minimum
*/ int octeon_retry_droq_refill(struct octeon_droq *droq)
{ struct octeon_device *oct = droq->oct_dev; int desc_refilled, reschedule = 1;
u32 pkts_credit;
pkts_credit = readl(droq->pkts_credit_reg);
desc_refilled = octeon_droq_refill(oct, droq); if (desc_refilled) { /* Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory * is accurate.
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
reschedule = 0;
}
if (nicbuf) { if (droq->ops.fptr) {
droq->ops.fptr(oct->octeon_id,
nicbuf, pkt_len,
rh, &droq->napi,
droq->ops.farg);
} else {
recv_buffer_free(nicbuf);
}
}
}
if (droq->refill_count >= droq->refill_threshold) { int desc_refilled = octeon_droq_refill(oct, droq);
if (desc_refilled) { /* Flush the droq descriptor data to memory to * be sure that when we update the credits the * data in memory is accurate.
*/
wmb();
writel(desc_refilled, droq->pkts_credit_reg);
}
}
} /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
droq->stats.bytes_received += total_len;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.