/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * Rx buffer size. We use largish buffers if possible but settle for single * pages under memory shortage.
*/ #if PAGE_SHIFT >= 16 # define FL_PG_ORDER 0 #else # define FL_PG_ORDER (16 - PAGE_SHIFT) #endif
/* RX_PULL_LEN should be <= RX_COPY_THRES */ #define RX_COPY_THRES 256 #define RX_PULL_LEN 128
/* * Main body length for sk_buffs used for Rx Ethernet packets with fragments. * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
*/ #define RX_PKT_SKB_LEN 512
/* * Max number of Tx descriptors we clean up at a time. Should be modest as * freeing skbs isn't cheap and it happens while holding locks. We just need * to free packets faster than they arrive, we eventually catch up and keep * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should * also match the CIDX Flush Threshold.
*/ #define MAX_TX_RECLAIM 32
/* * Max number of Rx buffers we replenish at a time. Again keep this modest, * allocating buffers isn't cheap either.
*/ #define MAX_RX_REFILL 16U
/* * Period of the Rx queue check timer. This timer is infrequent as it has * something to do only when the system experiences severe memory shortage.
*/ #define RX_QCHECK_PERIOD (HZ / 2)
/* * Period of the Tx queue check timer.
*/ #define TX_QCHECK_PERIOD (HZ / 2)
/* * Max number of Tx descriptors to be reclaimed by the Tx timer.
*/ #define MAX_TIMER_TX_RECLAIM 100
/* * Timer index used when backing off due to memory shortage.
*/ #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/* * Suspension threshold for non-Ethernet Tx queues. We require enough room * for a full sized WR.
*/ #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
/* * Max Tx descriptor space we allow for an Ethernet packet to be inlined * into a WR.
*/ #define MAX_IMM_TX_PKT_LEN 256
/* * Max size of a WR sent through a control Tx queue.
*/ #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
struct rx_sw_desc { /* SW state per Rx descriptor */ struct page *page;
dma_addr_t dma_addr;
};
/* * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. * We could easily support more but there doesn't seem to be much need for * that ...
*/ #define FL_MTU_SMALL 1500 #define FL_MTU_LARGE 9000
/* * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses * these to specify the buffer size as an index into the SGE Free List Buffer * Size register array. We also use bit 4, when the buffer has been unmapped * for DMA, but this is of course never sent to the hardware and is only used * to prevent double unmappings. All of the above requires that the Free List * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal * Free List Buffer alignment is 32 bytes, this works out for us ...
*/ enum {
RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
/* * XXX We shouldn't depend on being able to use these indices. * XXX Especially when some other Master PF has initialized the * XXX adapter or we use the Firmware Configuration File. We * XXX should really search through the Host Buffer Size register * XXX array for the appropriately sized buffer indices.
*/
RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
};
/** * txq_avail - return the number of available slots in a Tx queue * @q: the Tx queue * * Returns the number of descriptors in a Tx queue available to write new * packets.
*/ staticinlineunsignedint txq_avail(conststruct sge_txq *q)
{ return q->size - 1 - q->in_use;
}
/** * fl_cap - return the capacity of a free-buffer list * @fl: the FL * * Returns the capacity of a free-buffer list. The capacity is less than * the size because one descriptor needs to be left unpopulated, otherwise * HW will think the FL is empty.
*/ staticinlineunsignedint fl_cap(conststruct sge_fl *fl)
{ return fl->size - 8; /* 1 descriptor = 8 buffers */
}
/** * fl_starving - return whether a Free List is starving. * @adapter: pointer to the adapter * @fl: the Free List * * Tests specified Free List to see whether the number of buffers * available to the hardware has falled below our "starvation" * threshold.
*/ staticinlinebool fl_starving(conststruct adapter *adapter, conststruct sge_fl *fl)
{ conststruct sge *s = &adapter->sge;
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags]; for (fp = si->frags; fp < end; fp++)
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
#ifdef CONFIG_NEED_DMA_MAP_STATE /** * deferred_unmap_destructor - unmap a packet when it is freed * @skb: the packet * * This is the packet destructor used for Tx packets that need to remain * mapped until they are freed rather than until their Tx descriptors are * freed.
*/ staticvoid deferred_unmap_destructor(struct sk_buff *skb)
{
unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
} #endif
/** * free_tx_desc - reclaims Tx descriptors and their buffers * @adap: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * @unmap: whether the buffers should be unmapped for DMA * * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held.
*/ void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsignedint n, bool unmap)
{ unsignedint cidx = q->cidx; struct tx_sw_desc *d;
d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ if (unmap && d->addr[0]) {
unmap_skb(adap->pdev_dev, d->skb, d->addr);
memset(d->addr, 0, sizeof(d->addr));
}
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d; if (++cidx == q->size) {
cidx = 0;
d = q->sdesc;
}
}
q->cidx = cidx;
}
/* * Return the number of reclaimable descriptors in a Tx queue.
*/ staticinlineint reclaimable(conststruct sge_txq *q)
{ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
/** * reclaim_completed_tx - reclaims completed TX Descriptors * @adap: the adapter * @q: the Tx queue to reclaim completed descriptors from * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 * @unmap: whether the buffers should be unmapped for DMA * * Reclaims Tx Descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. If @max == -1, then * we'll use a defaiult maximum. Called with the TX Queue locked.
*/ staticinlineint reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, int maxreclaim, bool unmap)
{ int reclaim = reclaimable(q);
if (reclaim) { /* * Limit the amount of clean up work we do at a time to keep * the Tx lock hold time O(1).
*/ if (maxreclaim < 0)
maxreclaim = MAX_TX_RECLAIM; if (reclaim > maxreclaim)
reclaim = maxreclaim;
free_tx_desc(adap, q, reclaim, unmap);
q->in_use -= reclaim;
}
return reclaim;
}
/** * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors * @adap: the adapter * @q: the Tx queue to reclaim completed descriptors from * @unmap: whether the buffers should be unmapped for DMA * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue locked.
*/ void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, bool unmap)
{
(void)reclaim_completed_tx(adap, q, -1, unmap);
}
EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
switch (rx_buf_size_idx) { case RX_SMALL_PG_BUF:
buf_size = PAGE_SIZE; break;
case RX_LARGE_PG_BUF:
buf_size = PAGE_SIZE << s->fl_pg_order; break;
case RX_SMALL_MTU_BUF:
buf_size = FL_MTU_SMALL_BUFSIZE(adapter); break;
case RX_LARGE_MTU_BUF:
buf_size = FL_MTU_LARGE_BUFSIZE(adapter); break;
default:
BUG();
}
return buf_size;
}
/** * free_rx_bufs - free the Rx buffers on an SGE free list * @adap: the adapter * @q: the SGE free list to free buffers from * @n: how many buffers to free * * Release the next @n buffers on an SGE free-buffer Rx queue. The * buffers must be made inaccessible to HW before calling this function.
*/ staticvoid free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
{ while (n--) { struct rx_sw_desc *d = &q->sdesc[q->cidx];
/** * unmap_rx_buf - unmap the current Rx buffer on an SGE free list * @adap: the adapter * @q: the SGE free list * * Unmap the current buffer on an SGE free-buffer Rx queue. The * buffer must be made inaccessible to HW before calling this function. * * This is similar to @free_rx_bufs above but does not free the buffer. * Do note that the FL still loses any further access to the buffer.
*/ staticvoid unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
{ struct rx_sw_desc *d = &q->sdesc[q->cidx];
if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
get_buf_size(adap, d), DMA_FROM_DEVICE);
d->page = NULL; if (++q->cidx == q->size)
q->cidx = 0;
q->avail--;
}
staticinlinevoid ring_fl_db(struct adapter *adap, struct sge_fl *q)
{ if (q->pend_cred >= 8) {
u32 val = adap->params.arch.sge_fl_db;
if (is_t4(adap->params.chip))
val |= PIDX_V(q->pend_cred / 8); else
val |= PIDX_T5_V(q->pend_cred / 8);
/* Make sure all memory writes to the Free List queue are * committed before we tell the hardware about them.
*/
wmb();
/* If we don't have access to the new User Doorbell (T5+), use * the old doorbell mechanism; otherwise use the new BAR2 * mechanism.
*/ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
val | QID_V(q->cntxt_id));
} else {
writel(val | QID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to * the User Doorbell area to be flushed.
*/
wmb();
}
q->pend_cred &= 7;
}
}
/** * refill_fl - refill an SGE Rx buffer ring * @adap: the adapter * @q: the ring to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for the allocations * * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity. If afterwards the queue is * found critically low mark it as starving in the bitmap of starving FLs. * * Returns the number of buffers allocated.
*/ staticunsignedint refill_fl(struct adapter *adap, struct sge_fl *q, int n,
gfp_t gfp)
{ struct sge *s = &adap->sge; struct page *pg;
dma_addr_t mapping; unsignedint cred = q->avail;
__be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; int node;
#ifdef CONFIG_DEBUG_FS if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) goto out; #endif
/* * Prefer large buffers
*/ while (n) {
pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); if (unlikely(!pg)) {
q->large_alloc_failed++; break; /* fall back to single pages */
}
mapping = dma_map_page(adap->pdev_dev, pg, 0,
PAGE_SIZE << s->fl_pg_order,
DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
__free_pages(pg, s->fl_pg_order);
q->mapping_err++; goto out; /* do not try small pages for this error */
}
mapping |= RX_LARGE_PG_BUF;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, pg, mapping);
sd++;
q->avail++; if (++q->pidx == q->size) {
q->pidx = 0;
sd = q->sdesc;
d = q->desc;
}
n--;
}
alloc_small_pages: while (n--) {
pg = alloc_pages_node(node, gfp, 0); if (unlikely(!pg)) {
q->alloc_failed++; break;
}
/** * alloc_ring - allocate resources for an SGE descriptor ring * @dev: the PCI device's core device * @nelem: the number of descriptors * @elem_size: the size of each descriptor * @sw_size: the size of the SW state associated with each ring element * @phys: the physical address of the allocated ring * @metadata: address of the array holding the SW state for the ring * @stat_size: extra space in HW ring for status information * @node: preferred node for memory allocations * * Allocates resources for an SGE descriptor ring, such as Tx queues, * free buffer lists, or response queues. Each SGE ring requires * space for its HW descriptors plus, optionally, space for the SW state * associated with each HW entry (the metadata). The function returns * three values: the virtual address for the HW ring (the return value * of the function), the bus address of the HW ring, and the address * of the SW ring.
*/ staticvoid *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata,
size_t stat_size, int node)
{
size_t len = nelem * elem_size + stat_size; void *s = NULL; void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p) return NULL; if (sw_size) {
s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
if (!s) {
dma_free_coherent(dev, len, p, *phys); return NULL;
}
} if (metadata)
*(void **)metadata = s; return p;
}
/** * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * * Calculates the number of flits needed for a scatter/gather list that * can hold the given number of entries.
*/ staticinlineunsignedint sgl_len(unsignedint n)
{ /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA * addresses. The DSGL Work Request starts off with a 32-bit DSGL * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, * repeated sequences of { Length[i], Length[i+1], Address[i], * Address[i+1] } (this ensures that all addresses are on 64-bit * boundaries). If N is even, then Length[N+1] should be set to 0 and * Address[N+1] is omitted. * * The following calculation incorporates all of the above. It's * somewhat hard to follow but, briefly: the "+2" accounts for the * first two flits which include the DSGL header, Length0 and * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 * flits for every pair of the remaining N) +1 if (n-1) is odd; and * finally the "+((n-1)&1)" adds the one remaining flit needed if * (n-1) is odd ...
*/
n--; return (3 * n) / 2 + (n & 1) + 2;
}
/** * flits_to_desc - returns the num of Tx descriptors for the given flits * @n: the number of flits * * Returns the number of Tx descriptors needed for the supplied number * of flits.
*/ staticinlineunsignedint flits_to_desc(unsignedint n)
{
BUG_ON(n > SGE_MAX_WR_LEN / 8); return DIV_ROUND_UP(n, 8);
}
/** * is_eth_imm - can an Ethernet packet be sent as immediate data? * @skb: the packet * @chip_ver: chip version * * Returns whether an Ethernet packet is small enough to fit as * immediate data. Return value corresponds to headroom required.
*/ staticinlineint is_eth_imm(conststruct sk_buff *skb, unsignedint chip_ver)
{ int hdrlen = 0;
/** * calc_tx_flits - calculate the number of flits for a packet Tx WR * @skb: the packet * @chip_ver: chip version * * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers.
*/ staticinlineunsignedint calc_tx_flits(conststruct sk_buff *skb, unsignedint chip_ver)
{ unsignedint flits; int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the * TX Packet header plus the skb data in the Work Request.
*/
if (hdrlen) return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
/* Otherwise, we're going to have to construct a Scatter gather list * of the skb body and fragments. We also include the flits necessary * for the TX Packet Work Request and CPL. We always have a firmware * Write Header (incorporated as part of the cpl_tx_pkt_lso and * cpl_tx_pkt structures), followed by either a TX Packet Write CPL * message or, if we're doing a Large Send Offload, an LSO CPL message * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size) { if (skb->encapsulation && chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_tnl_lso);
} elseif (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
u32 pkt_hdrlen;
/** * cxgb4_write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @q: the Tx queue we are writing into * @sgl: starting location for writing the SGL * @end: points right after the end of the SGL * @start: start offset into skb main-body data to include in the SGL * @addr: the list of bus addresses for the SGL elements * * Generates a gather list for the buffers that make up a packet. * The caller must provide adequate space for the SGL that will be written. * The SGL includes all of the packet's page fragments and the data in its * main body except for the first @start bytes. @sgl must be 16-byte * aligned and within a Tx descriptor with available space. @end points * right after the end of the SGL but does not account for any potential * wrap around, i.e., @end > @sgl.
*/ void cxgb4_write_sgl(conststruct sk_buff *skb, struct sge_txq *q, struct ulptx_sgl *sgl, u64 *end, unsignedint start, const dma_addr_t *addr)
{ unsignedint i, len; struct ulptx_sge_pair *to; conststruct skb_shared_info *si = skb_shinfo(skb); unsignedint nfrags = si->nr_frags; struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags)); if (likely(--nfrags == 0)) return; /* * Most of the complexity below deals with the possibility we hit the * end of the queue in the middle of writing the SGL. For this case * only we create the SGL in a temporary buffer and then copy it.
*/
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
if (likely(part0))
memcpy(sgl->sge, buf, part0);
part1 = (u8 *)end - (u8 *)q->stat;
memcpy(q->desc, (u8 *)buf + part0, part1);
end = (void *)q->desc + part1;
} if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
EXPORT_SYMBOL(cxgb4_write_sgl);
/* cxgb4_write_partial_sgl - populate SGL for partial packet * @skb: the packet * @q: the Tx queue we are writing into * @sgl: starting location for writing the SGL * @end: points right after the end of the SGL * @addr: the list of bus addresses for the SGL elements * @start: start offset in the SKB where partial data starts * @len: length of data from @start to send out * * This API will handle sending out partial data of a skb if required. * Unlike cxgb4_write_sgl, @start can be any offset into the skb data, * and @len will decide how much data after @start offset to send out.
*/ void cxgb4_write_partial_sgl(conststruct sk_buff *skb, struct sge_txq *q, struct ulptx_sgl *sgl, u64 *end, const dma_addr_t *addr, u32 start, u32 len)
{ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
u32 frag_size, skb_linear_data_len = skb_headlen(skb); struct skb_shared_info *si = skb_shinfo(skb);
u8 i = 0, frag_idx = 0, nfrags = 0;
skb_frag_t *frag;
/* Fill the first SGL either from linear data or from partial * frag based on @start.
*/ if (unlikely(start < skb_linear_data_len)) {
frag_size = min(len, skb_linear_data_len - start);
sgl->len0 = htonl(frag_size);
sgl->addr0 = cpu_to_be64(addr[0] + start);
len -= frag_size;
nfrags++;
} else {
start -= skb_linear_data_len;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag); /* find the first frag */ while (start >= frag_size) {
start -= frag_size;
frag_idx++;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag);
}
/* If the entire partial data fit in one SGL, then send it out * now.
*/ if (!len) goto done;
/* Most of the complexity below deals with the possibility we hit the * end of the queue in the middle of writing the SGL. For this case * only we create the SGL in a temporary buffer and then copy it.
*/
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
/* If the skb couldn't fit in first SGL completely, fill the * rest of the frags in subsequent SGLs. Note that each SGL * pair can store 2 frags.
*/ while (len) {
frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
to->len[i & 1] = cpu_to_be32(frag_size);
to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); if (i && (i & 1))
to++;
nfrags++;
frag_idx++;
i++;
len -= frag_size;
}
/* If we ended in an odd boundary, then set the second SGL's * length in the pair to 0.
*/ if (i & 1)
to->len[1] = cpu_to_be32(0);
/* Copy from temporary buffer to Tx ring, in case we hit the * end of the queue in the middle of writing the SGL.
*/ if (unlikely((u8 *)end > (u8 *)q->stat)) {
u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
/* 0-pad to multiple of 16 */ if ((uintptr_t)end & 8)
*end = 0;
done:
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags));
}
EXPORT_SYMBOL(cxgb4_write_partial_sgl);
/* This function copies 64 byte coalesced work request to * memory mapped BAR2 space. For coalesced WR SGE fetches * data from the FIFO instead of from Host.
*/ staticvoid cxgb_pio_copy(u64 __iomem *dst, u64 *src)
{ int count = 8;
while (count) {
writeq(*src, dst);
src++;
dst++;
count--;
}
}
/** * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell * @adap: the adapter * @q: the Tx queue * @n: number of new descriptors to give to HW * * Ring the doorbel for a Tx queue.
*/ inlinevoid cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{ /* Make sure that all writes to the TX Descriptors are committed * before we tell the hardware about them.
*/
wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ if (unlikely(q->bar2_addr == NULL)) {
u32 val = PIDX_V(n); unsignedlong flags;
/* For T4 we need to participate in the Doorbell Recovery * mechanism.
*/
spin_lock_irqsave(&q->db_lock, flags); if (!q->db_disabled)
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
QID_V(q->cntxt_id) | val); else
q->db_pidx_inc += n;
q->db_pidx = q->pidx;
spin_unlock_irqrestore(&q->db_lock, flags);
} else {
u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within * the doorbell, but T5 and later shrank the field in order to * gain a bit for Doorbell Priority. The field was absurdly * large in the first place (14 bits) so we just use the T5 * and later limits and warn if a Queue ID is too large.
*/
WARN_ON(val & DBPRIO_F);
/* If we're only writing a single TX Descriptor and we can use * Inferred QID registers, we can use the Write Combining * Gather Buffer; otherwise we use the simple doorbell.
*/ if (n == 1 && q->bar2_qid == 0) { int index = (q->pidx
? (q->pidx - 1)
: (q->size - 1));
u64 *wr = (u64 *)&q->desc[index];
/* This Write Memory Barrier will force the write to the User * Doorbell area to be flushed. This is needed to prevent * writes on different CPUs for the same queue from hitting * the adapter out of order. This is required when some Work * Requests take the Write Combine Gather Buffer path (user * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some * take the traditional path where we simply increment the * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the * hardware DMA read the actual Work Request.
*/
wmb();
}
}
EXPORT_SYMBOL(cxgb4_ring_tx_db);
/** * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors * @skb: the packet * @q: the Tx queue where the packet will be inlined * @pos: starting position in the Tx queue where to inline the packet * * Inline a packet's contents directly into Tx descriptors, starting at * the given position within the Tx DMA ring. * Most of the complexity of this operation is dealing with wrap arounds * in the middle of the packet we want to inline.
*/ void cxgb4_inline_tx_skb(conststruct sk_buff *skb, conststruct sge_txq *q, void *pos)
{ int left = (void *)q->stat - pos;
u64 *p;
/* 0-pad to multiple of 16 */
p = PTR_ALIGN(pos, 8); if ((uintptr_t)p & 8)
*p = 0;
}
EXPORT_SYMBOL(cxgb4_inline_tx_skb);
staticvoid *inline_tx_skb_header(conststruct sk_buff *skb, conststruct sge_txq *q, void *pos, int length)
{
u64 *p; int left = (void *)q->stat - pos;
if (likely(length <= left)) {
memcpy(pos, skb->data, length);
pos += length;
} else {
memcpy(pos, skb->data, left);
memcpy(q->desc, skb->data + left, length - left);
pos = (void *)q->desc + (length - left);
} /* 0-pad to multiple of 16 */
p = PTR_ALIGN(pos, 8); if ((uintptr_t)p & 8) {
*p = 0; return p + 1;
} return p;
}
/* * Figure out what HW csum a packet wants and return the appropriate control * bits.
*/ static u64 hwcsum(enum chip_type chip, conststruct sk_buff *skb)
{ int csum_type; bool inner_hdr_csum = false;
u16 proto, ver;
if (skb->encapsulation &&
(CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
inner_hdr_csum = true;
if (inner_hdr_csum) {
ver = inner_ip_hdr(skb)->version;
proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
inner_ipv6_hdr(skb)->nexthdr;
} else {
ver = ip_hdr(skb)->version;
proto = (ver == 4) ? ip_hdr(skb)->protocol :
ipv6_hdr(skb)->nexthdr;
}
if (ver == 4) { if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP; elseif (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP; else {
nocsum: /* * unknown protocol, disable HW csum * and hope a bad packet is detected
*/ return TXPKT_L4CSUM_DIS_F;
}
} else { /* * this doesn't work with extension headers
*/ if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP6; elseif (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP6; else goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP)) { int eth_hdr_len, l4_len;
u64 hdr_len;
if (inner_hdr_csum) { /* This allows checksum offload for all encapsulated * packets like GRE etc..
*/
l4_len = skb_inner_network_header_len(skb);
eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
} else {
l4_len = skb_network_header_len(skb);
eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
}
hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
/* Returns tunnel type if hardware supports offloading of the same. * It is called only for T5 and onwards.
*/ enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
{
u8 l4_hdr = 0; enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; struct port_info *pi = netdev_priv(skb->dev); struct adapter *adapter = pi->adapter;
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB)) return tnl_type;
switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP):
l4_hdr = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6):
l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: return tnl_type;
}
/** * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update * @adap: the adapter * @eq: the Ethernet TX Queue * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 * * We're typically called here to update the state of an Ethernet TX * Queue with respect to the hardware's progress in consuming the TX * Work Requests that we've put on that Egress Queue. This happens * when we get Egress Queue Update messages and also prophylactically * in regular timer-based Ethernet TX Queue maintenance.
*/ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, int maxreclaim)
{ unsignedint reclaimed, hw_cidx; struct sge_txq *q = &eq->q; int hw_in_use;
if (!q->in_use || !__netif_tx_trylock(eq->txq)) return 0;
/* If the TX Queue is currently stopped and there's now more than half * the queue available, restart it. Otherwise bail out since the rest * of what we want do here is with the possibility of shipping any * currently buffered Coalesced TX Work Request.
*/ if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
netif_tx_wake_queue(eq->txq);
eq->q.restarts++;
}
/* The chip min packet length is 10 octets but some firmware * commands have a minimum packet length requirement. So, play * safe and reject anything shorter than @min_pkt_len.
*/ if (unlikely(skb->len < min_pkt_len)) return -EINVAL;
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) return -EINVAL;
if (unlikely(credits < 0)) {
eth_txq_stop(q);
dev_err(adap->pdev_dev, "%s: Tx ring %u full while queue awake!\n",
dev->name, qidx); return NETDEV_TX_BUSY;
}
if (is_eth_imm(skb, chip_ver))
immediate = true;
if (skb->encapsulation && chip_ver > CHELSIO_T5)
tnl_type = cxgb_encap_offload_supported(skb);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { /* After we're done injecting the Work Request for this * packet, we'll be below our "stop threshold" so stop the TX * Queue now and schedule a request for an SGE Egress Queue * Update message. The queue will get started later on when * the firmware processes this Work Request and sends us an * Egress Queue Status Update message indicating that space * has opened up.
*/
eth_txq_stop(q); if (chip_ver > CHELSIO_T5)
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { /* If current position is already at the end of the * txq, reset the current to point to start of the queue * and update the end ptr as well.
*/
left = (u8 *)end - (u8 *)q->q.stat;
end = (void *)q->q.desc + left;
sgl = (void *)q->q.desc;
}
/* Constants ... */ enum { /* Egress Queue sizes, producer and consumer indices are all in units * of Egress Context Units bytes. Note that as far as the hardware is * concerned, the free list is an Egress Queue (the host produces free * buffers which the hardware consumes) and free list entries are * 64-bit PCI DMA addresses.
*/
EQ_UNIT = SGE_EQ_IDXSIZE,
FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
/** * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? * @skb: the packet * * Returns whether an Ethernet packet is small enough to fit completely as * immediate data.
*/ staticinlineint t4vf_is_eth_imm(conststruct sk_buff *skb)
{ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request * which does not accommodate immediate data. We could dike out all * of the support code for immediate data but that would tie our hands * too much if we ever want to enhace the firmware. It would also * create more differences between the PF and VF Drivers.
*/ returnfalse;
}
/** * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR * @skb: the packet * * Returns the number of flits needed for a TX Work Request for the * given Ethernet packet, including the needed WR and CPL headers.
*/ staticinlineunsignedint t4vf_calc_tx_flits(conststruct sk_buff *skb)
{ unsignedint flits;
/* If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the * TX Packet header plus the skb data in the Work Request.
*/ if (t4vf_is_eth_imm(skb)) return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), sizeof(__be64));
/* Otherwise, we're going to have to construct a Scatter gather list * of the skb body and fragments. We also include the flits necessary * for the TX Packet Work Request and CPL. We always have a firmware * Write Header (incorporated as part of the cpl_tx_pkt_lso and * cpl_tx_pkt structures), followed by either a TX Packet Write CPL * message or, if we're doing a Large Send Offload, an LSO CPL message * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_lso_core) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); else
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); return flits;
}
/** * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
*/ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{ unsignedint last_desc, flits, ndesc; conststruct skb_shared_info *ssi; struct fw_eth_tx_pkt_vm_wr *wr; struct tx_sw_desc *sgl_sdesc; struct cpl_tx_pkt_core *cpl; conststruct port_info *pi; struct sge_eth_txq *txq; struct adapter *adapter; int qidx, credits, ret;
size_t fw_hdr_copy_len; unsignedint chip_ver;
u64 cntrl, *end;
u32 wr_mid;
/* The chip minimum packet length is 10 octets but the firmware * command that we are using requires that we copy the Ethernet header * (including the VLAN tag) into the header so we reject anything * smaller than that ...
*/
BUILD_BUG_ON(sizeof(wr->firmware) !=
(sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + sizeof(wr->ethtype) + sizeof(wr->vlantci)));
fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); if (ret) goto out_free;
/* Figure out which TX Queue we're going to use. */
pi = netdev_priv(dev);
adapter = pi->adapter;
qidx = skb_get_queue_mapping(skb);
WARN_ON(qidx >= pi->nqsets);
txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
/* Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed.
*/
reclaim_completed_tx(adapter, &txq->q, -1, true);
/* Calculate the number of flits and TX Descriptors we're going to * need along with how many TX Descriptors will be left over after * we inject our Work Request.
*/
flits = t4vf_calc_tx_flits(skb);
ndesc = flits_to_desc(flits);
credits = txq_avail(&txq->q) - ndesc;
if (unlikely(credits < 0)) { /* Not enough room for this packet's Work Request. Stop the * TX Queue and return a "busy" condition. The queue will get * started later on when the firmware informs us that space * has opened up.
*/
eth_txq_stop(txq);
dev_err(adapter->pdev_dev, "%s: TX ring %u full while queue awake!\n",
dev->name, qidx); return NETDEV_TX_BUSY;
}
if (!t4vf_is_eth_imm(skb) &&
unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
sgl_sdesc->addr) < 0)) { /* We need to map the skb into PCI DMA space (because it can't * be in-lined directly into the Work Request) and the mapping * operation failed. Record the error and drop the packet.
*/
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
txq->mapping_err++; goto out_free;
}
chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { /* After we're done injecting the Work Request for this * packet, we'll be below our "stop threshold" so stop the TX * Queue now and schedule a request for an SGE Egress Queue * Update message. The queue will get started later on when * the firmware processes this Work Request and sends us an * Egress Queue Status Update message indicating that space * has opened up.
*/
eth_txq_stop(txq); if (chip_ver > CHELSIO_T5)
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle * the WR Header wrapping around the TX Descriptor Ring. If our * maximum header size ever exceeds one TX Descriptor, we'll need to * do something else here.
*/
WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
wr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL * message with an encapsulated TX Packet CPL message. Otherwise we * just use a TX Packet CPL message.
*/
ssi = skb_shinfo(skb); if (ssi->gso_size) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
/* Set up TX Packet CPL pointer, control word and perform * accounting.
*/
cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(adapter->params.chip, skb) |
TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else {
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
}
/* If there's a VLAN tag present, add that to the list of things to * do in this Work Request.
*/ if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
/* Fill in the TX Packet CPL message header. */
cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
TXPKT_INTF_V(pi->port_id) |
TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
/* Fill in the body of the TX Packet CPL message with either in-lined * data or a Scatter/Gather List.
*/ if (t4vf_is_eth_imm(skb)) { /* In-line the packet's data and free the skb since we don't * need it any longer.
*/
cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
dev_consume_skb_any(skb);
} else { /* Write the skb's Scatter/Gather list into the TX Packet CPL * message and retain a pointer to the skb so we can free it * later when its DMA completes. (We store the skb pointer * in the Software Descriptor corresponding to the last TX * Descriptor used by the Work Request.) * * The retained skb will be freed when the corresponding TX * Descriptors are reclaimed after their DMAs complete. * However, this could take quite a while since, in general, * the hardware is set up to be lazy about sending DMA * completion notifications to us and we mostly perform TX * reclaims in the transmit routine. * * This is good for performamce but means that we rely on new * TX packets arriving to run the destructors of completed * packets, which open up space in their sockets' send queues. * Sometimes we do not get such new packets causing TX to * stall. A single UDP transmitter is a good example of this * situation. We have a clean up timer that periodically * reclaims completed packets but it doesn't run often enough * (nor do we want it to) to prevent lengthy stalls. A * solution to this problem is to run the destructor early, * after the packet is queued but before it's DMAd. A con is * that we lie to socket memory accounting, but the amount of * extra memory is reasonable (limited by the number of TX * descriptors), the packets do actually get freed quickly by * new packets almost always, and for protocols like TCP that * wait for acks to really free up the data the extra memory * is even less. On the positive side we run the destructors * on the sending CPU rather than on a potentially different * completing CPU, usually a good thing. * * Run the destructor before telling the DMA engine about the * packet to make sure it doesn't complete and get freed * prematurely.
*/ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); struct sge_txq *tq = &txq->q;
/* If the Work Request header was an exact multiple of our TX * Descriptor length, then it's possible that the starting SGL * pointer lines up exactly with the end of our TX Descriptor * ring. If that's the case, wrap around to the beginning * here ...
*/ if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
end = (void *)((void *)tq->desc +
((void *)end - (void *)tq->stat));
}
out_free: /* An error of some sort happened. Free the TX skb and tell the * OS that we've "dealt" with the packet ...
*/
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
}
/** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue * * This is a variant of cxgb4_reclaim_completed_tx() that is used * for Tx queues that send only immediate data (presently just * the control queues) and thus do not have any sk_buffs to release.
*/ staticinlinevoid reclaim_completed_tx_imm(struct sge_txq *q)
{ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
reclaim += q->size;
q->in_use -= reclaim;
q->cidx = hw_cidx;
}
staticinlinevoid eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
{
u32 val = *idx + n;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.