/* Maximum number of scatter-gather entries in an ingress frame, * considering the maximum receive frame size is 64K
*/ #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
/* Maximum acceptable MTU value. It is in direct relation with the hardware * enforced Max Frame Length (currently 10k).
*/ #define DPAA2_ETH_MFL (10 * 1024) #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) /* Convert L3 MTU to L2 MFL */ #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
/* Set the taildrop threshold (in bytes) to allow the enqueue of a large * enough number of jumbo frames in the Rx queues (length of the current * frame is not taken into account when making the taildrop decision)
*/ #define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
/* Maximum burst size value for Tx shaping */ #define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
/* Maximum number of Tx confirmation frames to be processed * in a single NAPI call
*/ #define DPAA2_ETH_TXCONF_PER_NAPI 256
/* Maximum number of Tx frames to be processed in a single NAPI * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI * to maximize the throughput.
*/ #define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI
/* Buffer qouta per channel. We want to keep in check number of ingress frames * in flight: for small sized frames, congestion group taildrop may kick in * first; for large sizes, Rx FQ taildrop threshold will ensure only a * reasonable number of frames will be pending at any given time. * Ingress frame drop due to buffer pool depletion should be a corner case only
*/ #define DPAA2_ETH_NUM_BUFS 1280 #define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Congestion group taildrop threshold: number of frames allowed to accumulate * at any moment in a group of Rx queues belonging to the same traffic class. * Choose value such that we don't risk depleting the buffer pool before the * taildrop kicks in
*/ #define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
(1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
/* Congestion group notification threshold: when this many frames accumulate * on the Rx queues belonging to the same TC, the MAC is instructed to send * PFC frames for that TC. * When number of pending frames drops below exit threshold transmission of * PFC frames is stopped.
*/ #define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
(DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2) #define DPAA2_ETH_CN_THRESH_EXIT(priv) \
(DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
/* Maximum number of buffers that can be acquired/released through a single * QBMan command
*/ #define DPAA2_ETH_BUFS_PER_CMD 7
/* Hardware annotation area in RX/TX buffers */ #define DPAA2_ETH_RX_HWA_SIZE 64 #define DPAA2_ETH_TX_HWA_SIZE 128
/* PTP nominal frequency 1GHz */ #define DPAA2_PTP_CLK_PERIOD_NS 1
/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned * to 256B. For newer revisions, the requirement is only for 64B alignment
*/ #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 #define DPAA2_ETH_RX_BUF_ALIGN 64
/* The firmware allows assigning multiple buffer pools to a single DPNI - * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs * object: the default and 8 other distinct buffer pools, one for each queue.
*/ #define DPAA2_ETH_DEFAULT_BP_IDX 0 #define DPAA2_ETH_MAX_BPS 9
/* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter.
*/ #define DPAA2_ETH_SWA_SIZE 64
/* We store different information in the software annotation area of a Tx frame * based on what type of frame it is
*/ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
DPAA2_ETH_SWA_XSK,
DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ struct dpaa2_eth_swa { enum dpaa2_eth_swa_type type; union { struct { struct sk_buff *skb; int sgt_size;
} single; struct { struct sk_buff *skb; struct scatterlist *scl; int num_sg; int sgt_size;
} sg; struct { int dma_size; struct xdp_frame *xdpf;
} xdp; struct { struct xdp_buff *xdp_buff; int sgt_size;
} xsk; struct { struct sk_buff *skb; int num_sg; int sgt_size; int is_last_fd;
} tso;
};
};
/* Frame annotation status word is located in the first 8 bytes * of the buffer's hardware annoatation area
*/ #define DPAA2_FAS_OFFSET 0 #define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
/* Timestamp is located in the next 8 bytes of the buffer's * hardware annotation area
*/ #define DPAA2_TS_OFFSET 0x8
/* Time in milliseconds between link state updates */ #define DPAA2_ETH_LINK_STATE_REFRESH 1000
/* Number of times to retry a frame enqueue before giving up. * Value determined empirically, in order to minimize the number * of frames dropped on Tx
*/ #define DPAA2_ETH_ENQUEUE_RETRIES 10
/* Number of times to retry DPIO portal operations while waiting * for portal to finish executing current command and become * available. We want to avoid being stuck in a while loop in case * hardware becomes unresponsive, but not give up too easily if * the portal really is busy for valid reasons
*/ #define DPAA2_ETH_SWP_BUSY_RETRIES 1000
/* Driver statistics, other than those in struct rtnl_link_stats64. * These are usually collected per-CPU and aggregated by ethtool.
*/ struct dpaa2_eth_drv_stats {
__u64 tx_conf_frames;
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
__u64 tx_tso_frames;
__u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes; /* Linear skbs sent as a S/G FD due to insufficient headroom */
__u64 tx_converted_sg_frames;
__u64 tx_converted_sg_bytes; /* Enqueues retried due to portal busy */
__u64 tx_portal_busy;
};
/* Per-FQ statistics */ struct dpaa2_eth_fq_stats { /* Number of frames received on this queue */
__u64 frames;
};
/* Per-channel statistics */ struct dpaa2_eth_ch_stats { /* Volatile dequeues retried due to portal busy */
__u64 dequeue_portal_busy; /* Pull errors */
__u64 pull_err; /* Number of CDANs; useful to estimate avg NAPI len */
__u64 cdan; /* XDP counters */
__u64 xdp_drop;
__u64 xdp_tx;
__u64 xdp_tx_err;
__u64 xdp_redirect; /* Must be last, does not show up in ethtool stats */
__u64 frames;
__u64 frames_per_cdan;
__u64 bytes_per_cdan;
};
#define DPAA2_ETH_CH_STATS 7
/* Maximum number of queues associated with a DPNI */ #define DPAA2_ETH_MAX_TCS 8 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16 #define DPAA2_ETH_MAX_RX_QUEUES \
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS) #define DPAA2_ETH_MAX_TX_QUEUES 16 #define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES + \
DPAA2_ETH_MAX_RX_ERR_QUEUES) #define DPAA2_ETH_MAX_NETDEV_QUEUES \
(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
/* Buffer pool management */ struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS]; int num_bps;
u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. * This is the cpu set on which Rx and Tx conf frames are processed
*/ struct cpumask dpio_cpumask;
/* Standard statistics */ struct rtnl_link_stats64 __percpu *percpu_stats; /* Extra stats, in addition to the ones known by the kernel */ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
struct dpaa2_mac *mac; /* Serializes changes to priv->mac */ struct mutex mac_lock; struct workqueue_struct *dpaa2_ptp_wq; struct work_struct tx_onestep_tstamp; struct sk_buff_head tx_skbs; /* The one-step timestamping configuration on hardware * registers could only be done when no one-step * timestamping frames are in flight. So we use a mutex * lock here to make sure the lock is released by last * one-step timestamping packet through TX confirmation * queue before transmit current packet.
*/ struct mutex onestep_tstamp_lock; struct devlink *devlink; struct dpaa2_eth_trap_data *trap_data; struct devlink_port devlink_port;
/* Minimum firmware version that supports a more flexible API * for configuring the Rx flow hash key
*/ #define DPNI_RX_DIST_KEY_VER_MAJOR 7 #define DPNI_RX_DIST_KEY_VER_MINOR 5
/* If we don't have an skb (e.g. XDP buffer), we only need space for * the software annotation area
*/ if (!skb) return headroom;
/* For non-linear skbs we have no headroom requirement, as we build a * SG frame with a newly allocated SGT buffer
*/ if (skb_is_nonlinear(skb)) return 0;
/* If we have Tx timestamping, need 128B hardware annotation */ if (skb->cb[0])
headroom += DPAA2_ETH_TX_HWA_SIZE;
return headroom;
}
/* Extra headroom space requested to hardware, in order to make sure there's * no realloc'ing in forwarding scenarios
*/ staticinlineunsignedint dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
{ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key); int dpaa2_eth_cls_key_size(u64 key); int dpaa2_eth_cls_fld_off(int prot, int field); void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.