#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ #define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ #define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ #define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of * descriptors available in its onboard memory. * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors * available in host memory. * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back * descriptors until either it has this many to write back, or the * ITR timer expires.
*/ #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) #define IGB_RX_HTHRESH 8 #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) #define IGB_TX_HTHRESH 1 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
(adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
(adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for * the frame. This leaves us with 512 bytes of room. From that we need * to deduct the space needed for the shared info and the padding needed * to IP align the frame. * * Note: For cache line sizes 256 or larger this value is going to end * up negative. In these cases we should fall back to the 3K * buffers.
*/ #if (PAGE_SIZE < 8192) #define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) #define IGB_2K_TOO_SMALL_WITH_PADDING \
((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
staticinlineint igb_compute_pad(int rx_buf_len)
{ int page_size, pad_size;
staticinlineint igb_skb_pad(void)
{ int rx_buf_len;
/* If a 2K buffer cannot handle a standard Ethernet frame then * optimize padding for a 3K buffer instead of a 1.5K buffer. * * For a 3K buffer we need to add enough padding to allow for * tailroom due to NET_IP_ALIGN possibly shifting us out of * cache-line alignment.
*/ if (IGB_2K_TOO_SMALL_WITH_PADDING)
rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); else
rx_buf_len = IGB_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN;
/* VLAN info */ #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGB_TX_FLAGS_VLAN_SHIFT 16
/* The largest size we can write to the descriptor is 65535. In order to * maintain a power of two alignment we have to limit ourselves to 32K.
*/ #define IGB_MAX_TXD_PWR 15 #define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
/* TX resources are shared between XDP and netstack * and we need to tag the buffer type to distinguish them
*/ enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer
*/ struct igb_tx_buffer { union e1000_adv_tx_desc *next_to_watch; unsignedlong time_stamp; enum igb_tx_buf_type type; union { struct sk_buff *skb; struct xdp_frame *xdpf;
}; unsignedint bytecount;
u16 gso_segs;
__be16 protocol;
struct igb_ring_container { struct igb_ring *ring; /* pointer to linked list of rings */ unsignedint total_bytes; /* total bytes processed this int */ unsignedint total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igb_ring { struct igb_q_vector *q_vector; /* backlink to q_vector */ struct net_device *netdev; /* back pointer to net_device */ struct bpf_prog *xdp_prog; struct device *dev; /* device pointer for dma mapping */ union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; struct xdp_buff **rx_buffer_info_zc;
}; void *desc; /* descriptor ring memory */ unsignedlong flags; /* ring specific flags */ void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */ unsignedint size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */ bool launchtime_enable; /* true if LaunchTime is enabled */ bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
s32 hicredit; /* hiCredit in bytes */
s32 locredit; /* loCredit in bytes */
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
/* The number of L2 ether-type filter registers, Index 3 is reserved * for PTP 1588 timestamp
*/ #define MAX_ETYPE_FILTER (4 - 1) /* ETQF filter list: one static filter per filter consumer. This is * to avoid filter collisions later. Add new filters here!! * * Current filters: Filter 3
*/ #define IGB_ETQF_FILTER_1588 3
/* to not mess up cache alignment, always add to the bottom */
u16 tx_ring_count;
u16 rx_ring_count; unsignedint vfs_allocated_count; struct vf_data_storage *vf_data; int vf_rate_link_speed;
u32 rss_queues;
u32 wvbr;
u32 *shadow_vfta;
/* This function assumes __netif_tx_lock is held by the caller. */ staticinlinevoid igb_xdp_ring_update_tail(struct igb_ring *ring)
{
lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
/* Force memory writes to complete before letting h/w know there * are new descriptors to fetch.
*/
wmb();
writel(ring->next_to_use, ring->tail);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.