/* Mailbox directions */ #define MBOX_DIR_AFPF 0 /* AF replies to PF */ #define MBOX_DIR_PFAF 1 /* PF sends messages to AF */ #define MBOX_DIR_PFVF 2 /* PF replies to VF */ #define MBOX_DIR_VFPF 3 /* VF sends messages to PF */ #define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */ #define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */ #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
enum {
TYPE_AFVF,
TYPE_AFPF,
};
struct otx2_mbox_dev { void *mbase; /* This dev's mbox region */ void *hwbase;
spinlock_t mbox_lock;
u16 msg_size; /* Total msg size to be sent */
u16 rsp_size; /* Total rsp size to be sure the reply is ok */
u16 num_msgs; /* No of msgs sent or waiting for response */
u16 msgs_acked; /* No of msgs for which response is received */
};
struct otx2_mbox { struct pci_dev *pdev; void *hwbase; /* Mbox region advertised by HW */ void *reg_base;/* CSR base for this dev */
u64 trigger; /* Trigger mbox notification */
u16 tr_shift; /* Mbox trigger shift */
u64 rx_start; /* Offset of Rx region in mbox memory */
u64 tx_start; /* Offset of Tx region in mbox memory */
u16 rx_size; /* Size of Rx region */
u16 tx_size; /* Size of Tx region */
u16 ndevs; /* The number of peers */ struct otx2_mbox_dev *dev;
};
/* Header which precedes all mbox messages */ struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
u16 opt_msg;
u8 sig;
};
/* Header which precedes every msg and is also part of it */ struct mbox_msghdr {
u16 pcifunc; /* Who's sending this msg */
u16 id; /* Mbox message ID */ #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */ #define OTX2_MBOX_VERSION (0x000a)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */
};
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid); void otx2_mbox_destroy(struct otx2_mbox *mbox); int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, struct pci_dev *pdev, void __force *reg_base, int direction, int ndevs);
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, struct pci_dev *pdev, void __force *reg_base, int direction, int ndevs, unsignedlong *bmap); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size, int size_rsp); struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *msg); int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid); int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
u16 pcifunc, u16 id); bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); constchar *otx2_mbox_id2name(u16 id); staticinlinestruct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, int devid, int size)
{ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
/* Generic request msg used for those mbox messages which * don't send any data in the request.
*/ struct msg_req { struct mbox_msghdr hdr;
};
/* Generic response msg used an ack or response for those mbox * messages which don't have a specific rsp msg format.
*/ struct msg_rsp { struct mbox_msghdr hdr;
};
struct ready_msg_rsp { struct mbox_msghdr hdr;
u16 sclk_freq; /* SCLK frequency (in MHz) */
u16 rclk_freq; /* RCLK frequency (in MHz) */
};
/* Structure for requesting resource provisioning. * 'modify' flag to be used when either requesting more * or to detach partial of a certain resource type. * Rest of the fields specify how many of what type to * be attached. * To request LFs from two blocks of same type this mailbox * can be sent twice as below: * struct rsrc_attach *attach; * .. Allocate memory for message .. * attach->cptlfs = 3; <3 LFs from CPT0> * .. Send message .. * .. Allocate memory for message .. * attach->modify = 1; * attach->cpt_blkaddr = BLKADDR_CPT1; * attach->cptlfs = 2; <2 LFs from CPT1> * .. Send message ..
*/ struct rsrc_attach { struct mbox_msghdr hdr;
u8 modify:1;
u8 npalf:1;
u8 nixlf:1;
u16 sso;
u16 ssow;
u16 timlfs;
u16 cptlfs; int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
};
/* Structure for relinquishing resources. * 'partial' flag to be used when relinquishing all resources * but only of a certain type. If not set, all resources of all * types provisioned to the RVU function will be detached.
*/ struct rsrc_detach { struct mbox_msghdr hdr;
u8 partial:1;
u8 npalf:1;
u8 nixlf:1;
u8 sso:1;
u8 ssow:1;
u8 timlfs:1;
u8 cptlfs:1;
};
/* Number of resources available to the caller. * In reply to MBOX_MSG_FREE_RSRC_CNT.
*/ struct free_rsrcs_rsp { struct mbox_msghdr hdr;
u16 schq[NIX_TXSCH_LVL_CNT];
u16 sso;
u16 tim;
u16 ssow;
u16 cpt;
u8 npa;
u8 nix;
u16 schq_nix1[NIX_TXSCH_LVL_CNT];
u8 nix1;
u8 cpt1;
u8 ree0;
u8 ree1;
};
struct cgx_fec_stats_rsp { struct mbox_msghdr hdr;
u64 fec_corr_blks;
u64 fec_uncorr_blks;
}; /* Structure for requesting the operation for * setting/getting mac address in the CGX interface
*/ struct cgx_mac_addr_set_or_get { struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
u32 index;
};
/* Structure for requesting the operation to * add DMAC filter entry into CGX interface
*/ struct cgx_mac_addr_add_req { struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
};
/* Structure for response against the operation to * add DMAC filter entry into CGX interface
*/ struct cgx_mac_addr_add_rsp { struct mbox_msghdr hdr;
u32 index;
};
/* Structure for requesting the operation to * delete DMAC filter entry from CGX interface
*/ struct cgx_mac_addr_del_req { struct mbox_msghdr hdr;
u32 index;
};
/* Structure for response against the operation to * get maximum supported DMAC filter entries
*/ struct cgx_max_dmac_entries_get_rsp { struct mbox_msghdr hdr;
u32 max_dmac_filters;
};
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
uint64_t an:1; /* AN supported or not */
uint64_t fec:2; /* FEC type if enabled else 0 */ #define LMACTYPE_STR_LEN 16 char lmac_type[LMACTYPE_STR_LEN];
};
struct cgx_pause_frm_cfg { struct mbox_msghdr hdr;
u8 set; /* set = 1 if the request is to config pause frames */ /* set = 0 if the request is to fetch pause frames config */
u8 rx_pause;
u8 tx_pause;
};
struct npc_set_pkind { struct mbox_msghdr hdr; #define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0) #define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
u64 mode; #define PKIND_TX BIT_ULL(0) #define PKIND_RX BIT_ULL(1)
u8 dir;
u8 pkind; /* valid only in case custom flag */
u8 var_len_off; /* Offset of custom header length field. * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
*/
u8 var_len_off_mask; /* Mask for length with in offset */
u8 shift_dir; /* shift direction to get length of the header at var_len_off */
};
/* For NPA LF context alloc and init */ struct npa_lf_alloc_req { struct mbox_msghdr hdr; int node; int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
u64 way_mask;
};
struct npa_lf_alloc_rsp { struct mbox_msghdr hdr;
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 qints; /* NPA_AF_CONST::QINTS */
u8 cache_lines; /*BATCH ALLOC DMA */
};
/* NPA AQ enqueue msg */ struct npa_aq_enq_req { struct mbox_msghdr hdr;
u32 aura_id;
u8 ctype;
u8 op; union { /* Valid when op == WRITE/INIT and ctype == AURA. * LF fills the pool_id in aura.pool_addr. AF will translate * the pool_id to pool context pointer.
*/ struct npa_aura_s aura; /* Valid when op == WRITE/INIT and ctype == POOL */ struct npa_pool_s pool;
}; /* Mask data when op == WRITE (1=write, 0=don't write) */ union { /* Valid when op == WRITE and ctype == AURA */ struct npa_aura_s aura_mask; /* Valid when op == WRITE and ctype == POOL */ struct npa_pool_s pool_mask;
};
};
struct npa_aq_enq_rsp { struct mbox_msghdr hdr; union { /* Valid when op == READ and ctype == AURA */ struct npa_aura_s aura; /* Valid when op == READ and ctype == POOL */ struct npa_pool_s pool;
};
};
/* Disable all contexts of type 'ctype' */ struct hwctx_disable_req { struct mbox_msghdr hdr;
u8 ctype;
};
struct nix_vtag_config { struct mbox_msghdr hdr; /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
u8 vtag_size; /* cfg_type is '0' for tx vlan cfg * cfg_type is '1' for rx vlan cfg
*/
u8 cfg_type; union { /* valid when cfg_type is '0' */ struct {
u64 vtag0;
u64 vtag1;
/* cfg_vtag0 & cfg_vtag1 fields are valid * when free_vtag0 & free_vtag1 are '0's.
*/ /* cfg_vtag0 = 1 to configure vtag0 */
u8 cfg_vtag0 :1; /* cfg_vtag1 = 1 to configure vtag1 */
u8 cfg_vtag1 :1;
/* vtag0_idx & vtag1_idx are only valid when * both cfg_vtag0 & cfg_vtag1 are '0's, * these fields are used along with free_vtag0 * & free_vtag1 to free the nix lf's tx_vlan * configuration. * * Denotes the indices of tx_vtag def registers * that needs to be cleared and freed.
*/ int vtag0_idx; int vtag1_idx;
/* free_vtag0 & free_vtag1 fields are valid * when cfg_vtag0 & cfg_vtag1 are '0's.
*/ /* free_vtag0 = 1 clears vtag0 configuration * vtag0_idx denotes the index to be cleared.
*/
u8 free_vtag0 :1; /* free_vtag1 = 1 clears vtag1 configuration * vtag1_idx denotes the index to be cleared.
*/
u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */ struct { /* rx vtag type index, valid values are in 0..7 range */
u8 vtag_type; /* rx vtag strip */
u8 strip_vtag :1; /* rx vtag capture */
u8 capture_vtag :1;
} rx;
};
};
struct nix_vtag_config_rsp { struct mbox_msghdr hdr; int vtag0_idx; int vtag1_idx; /* Indices of tx_vtag def registers used to configure * tx vtag0 & vtag1 headers, these indices are valid * when nix_vtag_config mbox requested for vtag0 and/ * or vtag1 configuration.
*/
};
struct nix_bp_cfg_req { struct mbox_msghdr hdr;
u16 chan_base; /* Starting channel number */
u8 chan_cnt; /* Number of channels */
u8 bpid_per_chan; /* bpid_per_chan = 0 assigns single bp id for range of channels */ /* bpid_per_chan = 1 assigns separate bp id for each channel */
};
/* Maximum channels any single NIX interface can have */ #define NIX_MAX_BPID_CHAN 256 struct nix_bp_cfg_rsp { struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
struct nix_mcast_grp_create_rsp { struct mbox_msghdr hdr; /* This mcast_grp_idx should be passed during MCAM * write entry for multicast. AF will identify the * corresponding multicast table index associated * with the group id and program the same to MCAM entry. * This group id is also needed during group delete * and update request.
*/
u32 mcast_grp_idx;
};
struct nix_mcast_grp_destroy_req { struct mbox_msghdr hdr; /* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx; /* If AF is requesting for destroy, then set * it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
struct nix_mcast_grp_update_req { struct mbox_msghdr hdr; /* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx; /* Number of multicast/mirror entries requested */
u32 num_mce_entry; #define NIX_MCE_ENTRY_MAX 64 #define NIX_RX_RQ 0 #define NIX_RX_RSS 1 /* Receive queue or RSS index within pf_func */
u32 rq_rss_index[NIX_MCE_ENTRY_MAX]; /* pcifunc is required for both ingress and egress multicast */
u16 pcifunc[NIX_MCE_ENTRY_MAX]; /* channel is required for egress multicast */
u16 channel[NIX_MCE_ENTRY_MAX]; #define NIX_MCAST_OP_ADD_ENTRY 0 #define NIX_MCAST_OP_DEL_ENTRY 1 /* Destination type. 0:Receive queue, 1:RSS*/
u8 dest_type[NIX_MCE_ENTRY_MAX];
u8 op; /* If AF is requesting for update, then set * it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
/* There is no need to allocate morethan 1 bandwidth profile * per RQ of a PF_FUNC's NIXLF. So limit the maximum * profiles to 64 per PF_FUNC.
*/ #define MAX_BANDPROF_PER_PFFUNC 64
u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
};
struct npc_mcam_alloc_entry_rsp { struct mbox_msghdr hdr;
u16 entry; /* Entry allocated or start index if contiguous. * Invalid incase of non-contiguous.
*/
u16 count; /* Number of entries allocated */
u16 free_count; /* Number of entries available */
u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
};
struct npc_mcam_free_entry_req { struct mbox_msghdr hdr;
u16 entry; /* Entry index to be freed */
u8 all; /* If all entries allocated to this PFVF to be freed */
};
struct mcam_entry { #define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
u64 kw[NPC_MAX_KWS_IN_KEY];
u64 kw_mask[NPC_MAX_KWS_IN_KEY];
u64 action;
u64 vtag_action;
};
struct npc_mcam_write_entry_req { struct mbox_msghdr hdr; struct mcam_entry entry_data;
u16 entry; /* MCAM entry to write this match key */
u16 cntr; /* Counter for this MCAM entry */
u8 intf; /* Rx or Tx interface */
u8 enable_entry;/* Enable this MCAM entry ? */
u8 set_cntr; /* Set counter for this entry ? */
};
/* Enable/Disable a given entry */ struct npc_mcam_ena_dis_entry_req { struct mbox_msghdr hdr;
u16 entry;
};
struct npc_mcam_shift_entry_req { struct mbox_msghdr hdr; #define NPC_MCAM_MAX_SHIFTS 64
u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
u16 new_entry[NPC_MCAM_MAX_SHIFTS];
u16 shift_count; /* Number of entries to shift */
};
struct npc_mcam_shift_entry_rsp { struct mbox_msghdr hdr;
u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
};
struct npc_mcam_alloc_counter_rsp { struct mbox_msghdr hdr;
u16 cntr; /* Counter allocated or start index if contiguous. * Invalid incase of non-contiguous.
*/
u16 count; /* Number of counters allocated */
u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
};
struct npc_mcam_oper_counter_req { struct mbox_msghdr hdr;
u16 cntr; /* Free a counter or clear/fetch it's stats */
};
struct npc_mcam_oper_counter_rsp { struct mbox_msghdr hdr;
u64 stat; /* valid only while fetching counter's stats */
};
struct npc_mcam_unmap_counter_req { struct mbox_msghdr hdr;
u16 cntr;
u16 entry; /* Entry and counter to be unmapped */
u8 all; /* Unmap all entries using this counter ? */
};
struct mcs_alloc_rsrc_req { struct mbox_msghdr hdr;
u8 rsrc_type;
u8 rsrc_cnt; /* Resources count */
u8 mcs_id; /* MCS block ID */
u8 dir; /* Macsec ingress or egress side */
u8 all; /* Allocate all resource type one each */
u64 rsvd;
};
struct mcs_alloc_rsrc_rsp { struct mbox_msghdr hdr;
u8 flow_ids[128]; /* Index of reserved entries */
u8 secy_ids[128];
u8 sc_ids[128];
u8 sa_ids[256];
u8 rsrc_type;
u8 rsrc_cnt; /* No of entries reserved */
u8 mcs_id;
u8 dir;
u8 all;
u8 rsvd[256]; /* reserved fields for future expansion */
};
struct mcs_free_rsrc_req { struct mbox_msghdr hdr;
u8 rsrc_id; /* Index of the entry to be freed */
u8 rsrc_type;
u8 mcs_id;
u8 dir;
u8 all; /* Free all the cam resources */
u64 rsvd;
};
struct mcs_flowid_entry_write_req { struct mbox_msghdr hdr;
u64 data[4];
u64 mask[4];
u64 sci; /* CNF10K-B for tx_secy_mem_map */
u8 flow_id;
u8 secy_id; /* secyid for which flowid is mapped */
u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
u8 ena; /* Enable tcam entry */
u8 ctrl_pkt;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.