/* bnx2x.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation * Copyright (c) 2014 QLogic Corporation * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver
*/
/* define this to make the driver freeze on error to allow getting debug info
* (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */
/* FIXME: Delete the DRV_MODULE_VERSION below, but please be warned * that it is not an easy task because such change has all chances * to break this driver due to amount of abuse of in-kernel interfaces * between modules and FW. * * DO NOT UPDATE DRV_MODULE_VERSION below.
*/ #define DRV_MODULE_VERSION "1.713.36-0" #define BNX2X_BC_VER 0x040200
/* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ if (unlikely(netif_msg_probe(bp))) \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
/** * CIDs and CLIDs: * CLIDs below is a CLID for func 0, then the CLID for other * functions will be calculated by the formula: * * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X *
*/ enum {
BNX2X_ISCSI_ETH_CL_ID_IDX,
BNX2X_FCOE_ETH_CL_ID_IDX,
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
/* use a value high enough to be above all the PFs, which has least significant * nibble as 8, so when cnic needs to come up with a CID for UIO to use to * calculate doorbell address according to old doorbell configuration scheme * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number * We must avoid coming up with cid 8 for iscsi since according to this method * the designated UIO cid will come out 0 and it has a special handling for that * case which doesn't suit us. Therefore will will cieling to closes cid which * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
*/
#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos) /* amount of cids traversed by UIO's DPM addition to doorbell */ #define UIO_DPM 8 /* roundup to DPM offset */ #define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
UIO_DPM)) /* offset to nearest value which has lsb nibble matching DPM */ #define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
(UIO_DPM * 2)) /* add offset to rounded-up cid to get a value which could be used with UIO */ #define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp)) /* but wait - avoid UIO special case for cid 0 */ #define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
(UIO_DPM_ALIGN(bp) == UIO_DPM)) /* Properly DPM aligned CID dajusted to cid 0 secal case */ #define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
(UIO_DPM_CID0_OFFSET(bp))) /* how many cids were wasted - need this value for cid allocation */ #define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
BNX2X_1st_NON_L2_ETH_CID(bp)) /* iSCSI L2 */ #define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) /* FCoE L2 */ #define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/* defines for multiple tx priority indices */ #define FIRST_TX_ONLY_COS_INDEX 1 #define FIRST_TX_COS_INDEX 0
/* rules for calculating the cids of tx-only connections */ #define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) #define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
(cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */ #define FP_COS_TO_TXQ(fp, cos, bp) \
((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* Indexes for transmission queues array: * txdata for RSS i CoS j is at location i + (j * num of RSS) * txdata for FCoE (if exist) is at location max cos * num of RSS * txdata for FWD (if exist) is one location after FCoE * txdata for OOO (if exist) is one location after FWD
*/ enum {
FCOE_TXQ_IDX_OFFSET,
FWD_TXQ_IDX_OFFSET,
OOO_TXQ_IDX_OFFSET,
}; #define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
/* fast path */ /* * This driver uses new build_skb() API : * RX ring buffer contains pointer to kmalloc() data only, * skb are built only after Hardware filled the frame.
*/ struct sw_rx_bd {
u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd { struct sk_buff *skb;
u16 first_bd;
u8 flags; /* Set on the first BD descriptor when there is a split BD */ #define BNX2X_TSO_SPLIT_BD (1<<0) #define BNX2X_HAS_SECOND_PBD (1<<1)
};
/* SGE ring related macros */ #define NUM_RX_SGE_PAGES 2 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) #define NEXT_PAGE_SGE_DESC_CNT 2 #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) /* RX_SGE_CNT is promised to be a power of 2 */ #define RX_SGE_MASK (RX_SGE_CNT - 1) #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) #define MAX_RX_SGE (NUM_RX_SGE - 1) #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
(MAX_RX_SGE_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
(x) + 1) #define RX_SGE(x) ((x) & MAX_RX_SGE)
/* * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for * these aggregations will probably consume SGE immediately) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
*/ #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
MAX_RX_SGE_CNT) #define SGE_TH_LO(bp) (NUM_SGE_REQ + \
NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
/* Manipulate a bit vector defined as an array of u64 */
/* Number of bits in one sge_mask array element */ #define BIT_VEC64_ELEM_SZ 64 #define BIT_VEC64_ELEM_SHIFT 6 #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
#define __BIT_VEC64_SET_BIT(el, bit) \ do { \
el = ((el) | ((u64)0x1 << (bit))); \
} while (0)
#define __BIT_VEC64_CLEAR_BIT(el, bit) \ do { \
el = ((el) & (~((u64)0x1 << (bit)))); \
} while (0)
/* Creates a bitmask of all ones in less significant bits.
idx - index of the most significant bit in the created mask */ #define BIT_VEC64_ONES_MASK(idx) \
(((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) #define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
/* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
union host_hc_status_block { /* pointer to fp status block e1x */ struct host_hc_status_block_e1x *e1x_sb; /* pointer to fp status block e2 */ struct host_hc_status_block_e2 *e2_sb;
};
struct bnx2x_agg_info { /* * First aggregation buffer is a data buffer, the following - are pages. * We will preallocate the data buffer for each aggregation when * we open the interface and will replace the BD at the consumer * with this one when we receive the TPA_START CQE in order to * keep the Rx BD ring consistent.
*/ struct sw_rx_bd first_buf;
u8 tpa_state; #define BNX2X_TPA_START 1 #define BNX2X_TPA_STOP 2 #define BNX2X_TPA_ERROR 3
u8 placement_offset;
u16 parsing_flags;
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash; enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
union eth_rx_cqe *rx_comp_ring;
dma_addr_t rx_comp_mapping;
/* SGE ring */ struct eth_rx_sge *rx_sge_ring;
dma_addr_t rx_sge_mapping;
u64 sge_mask[RX_SGE_MASK_LEN];
u32 cid;
__le16 fp_hc_idx;
u8 index; /* number in fp array */
u8 rx_queue; /* index for skb_record */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
u8 igu_sb_id; /* status block number in HW */
/* TPA related */ struct bnx2x_agg_info *tpa_info; #ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used; #endif /* The size is calculated using the following: sizeof name field from netdev structure + 4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE];
/* number of NEXT_PAGE descriptors may be required during placement */ #define NEXT_CNT_PER_TX_PKT(bds) \
(((bds) + MAX_TX_DESC_CNT - 1) / \
MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT) /* max BDs per tx packet w/o next_pages: * START_BD - describes packed * START_BD(splitted) - includes unpaged data segment for GSO * PARSING_BD - for TSO and CSUM data * PARSING_BD2 - for encapsulation data * Frag BDs - describes pages for frags
*/ #define BDS_PER_TX_PKT 4 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) /* max BDs per tx packet including next pages */ #define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ #define NUM_RX_RINGS 8 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) #define NEXT_PAGE_RX_DESC_CNT 2 #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) #define RX_DESC_MASK (RX_DESC_CNT - 1) #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) #define MAX_RX_BD (NUM_RX_BD - 1) #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
/* dropless fc calculations for BDs * * Number of BDs should as number of buffers in BRB: * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT * "next" elements on each page
*/ #define NUM_BD_REQ BRB_SIZE(bp) #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
MAX_RX_DESC_CNT) #define BD_TH_LO(bp) (NUM_BD_REQ + \
NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
FW_DROP_LEVEL(bp)) #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
/* * As long as CQE is X times bigger than BD entry we have to allocate X times * more pages for CQ ring in order to keep it balanced with BD ring
*/ #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) #define NEXT_PAGE_RCQ_DESC_CNT 1 #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) #define MAX_RCQ_BD (NUM_RCQ_BD - 1) #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
(MAX_RCQ_DESC_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
(x) + 1) #define RCQ_BD(x) ((x) & MAX_RCQ_BD)
/* dropless fc calculations for RCQs * * Number of RCQs should be as number of buffers in BRB: * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT * "next" elements on each page
*/ #define NUM_RCQ_REQ BRB_SIZE(bp) #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
MAX_RCQ_DESC_CNT) #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
FW_DROP_LEVEL(bp)) #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
/* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
/* used on a CID received from the HW */ #define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) #define CQE_CMD(x) (le32_to_cpu(x) >> \
COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
(CHIP_REV_SHIFT + 1)) \
<< CHIP_REV_SHIFT) #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
CHIP_REV_SIM(bp) :\
CHIP_REV_VAL(bp)) #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Bx)) #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax)) /* This define is used in two main places: * 1. In the early stages of nic_load, to know if to configure Parser / Searcher * to nic-only mode or to offload mode. Offload mode is configured if either the * chip is E1x (where MIC_MODE register is not applicable), or if cnic already * registered for this port (which means that the user wants storage services). * 2. During cnic-related load, to know if offload mode is already configured in * the HW or needs to be configured. * Since the transition from nic-mode to offload-mode in HW causes traffic * corruption, nic-mode is configured only in ports on which storage services * where never requested.
*/ #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
int flash_size; #define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ #define BNX2X_NVRAM_TIMEOUT_COUNT 30000 #define BNX2X_NVRAM_PAGE_SIZE 256
/* slow path */ #define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
/* We need to reserve doorbell addresses for all VF and queue combinations */ #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
/* The doorbell is configured to have the same number of CIDs for PFs and for * VFs. For this reason the PF CID zone is as large as the VF zone.
*/ #define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS #define BNX2X_MAX_NUM_VF_QUEUES 64 #define BNX2X_VF_ID_INVALID 0xFF
/* the number of VF CIDS multiplied by the amount of bytes reserved for each * cid must not exceed the size of the VF doorbell
*/ #define BNX2X_VF_BAR_SIZE 512 #if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) #error"VF doorbell bar size is 512" #endif
/* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the * device (HW/FW). Each fast-path status block (FP-SB) aka non-default * status block represents an independent interrupts context that can * serve a regular L2 networking queue. However special L2 queues such * as the FCoE queue do not require a FP-SB and other components like * the CNIC may consume FP-SB reducing the number of possible L2 queues * * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE * L2 queue is supported. The cid for the FCoE L2 queue is always X.
*/
/* DMA memory not used in fastpath */ struct bnx2x_slowpath { union { struct mac_configuration_cmd e1x; struct eth_classify_rules_ramrod_data e2;
} mac_rdata;
union { struct eth_classify_rules_ramrod_data e2;
} vlan_rdata;
union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
union { struct mac_configuration_cmd e1; struct eth_multicast_rules_ramrod_data e2;
} mcast_rdata;
struct eth_rss_update_ramrod_data rss_rdata;
/* Queue State related ramrods are always sent under rtnl_lock */ union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union { struct function_start_data func_start; /* pfc configuration for DCBX ramrod */ struct flow_control_configuration pfc_config;
} func_rdata;
/* afex ramrod can not be a part of func_rdata union because these * events might arrive in parallel to other events from func_rdata. * Therefore, if they would have been defined in the same union, * data can get corrupted.
*/ union { struct afex_vif_list_ramrod_data viflist_data; struct function_update_data func_update;
} func_afex_rdata;
/* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C];
/* This is a data that will be used to create a link report message. * We will keep the data used for the last link report in order * to prevent reporting the same link parameters twice.
*/ struct bnx2x_link_report_data {
u16 line_speed; /* Effective line speed */ unsignedlong link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
};
struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure
*/ struct bnx2x_fastpath *fp; struct bnx2x_sp_objs *sp_objs; struct bnx2x_fp_stats *fp_stats; struct bnx2x_fp_txdata *bnx2x_txq; void __iomem *regview; void __iomem *doorbells;
u16 db_size;
/* Max supported alignment is 256 (8 shift) * minimal alignment shift 6 is optimal for 57xxx HW performance
*/ #define BNX2X_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
/* FW uses 2 Cache lines Alignment for start packet and size * * We assume skb_build() uses sizeof(struct skb_shared_info) bytes * at the end of skb->data, to avoid wasting a full cache line. * This reduces memory use (skb->truesize).
*/ #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
/* Total number of FW statistics requests */
u8 fw_stats_num;
/* * This is a memory buffer that will contain both statistics * ramrod request and data.
*/ void *fw_stats;
dma_addr_t fw_stats_mapping;
/* * FW statistics request shortcut (points at the * beginning of fw_stats buffer).
*/ struct bnx2x_fw_stats_req *fw_stats_req;
dma_addr_t fw_stats_req_mapping; int fw_stats_req_sz;
/* * FW statistics data shortcut (points at the beginning of * fw_stats buffer + fw_stats_req_sz).
*/ struct bnx2x_fw_stats_data *fw_stats_data;
dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz;
/* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB * context size we need 8 ILT entries.
*/ #define ILT_MAX_L2_LINES 32 struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt; #define BP_ILT(bp) ((bp)->ilt) #define ILT_MAX_LINES 256 /* * Maximum supported number of RSS queues: number of IGU SBs minus one that goes * to CNIC.
*/ #define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
/* * Maximum CID count that might be required by the bnx2x: * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
/*self test*/ int bnx2x_idle_chk(struct bnx2x *bp);
/** * bnx2x_set_mac_one - configure a single MAC address * * @bp: driver handle * @mac: MAC to configure * @obj: MAC object handle * @set: if 'true' add a new MAC, otherwise - delete * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) * * Configures one MAC according to provided parameters or continues the * execution of previously scheduled commands if RAMROD_CONT is set in * ramrod_flags. * * Returns zero if operation has successfully completed, a positive value if the * operation has been successfully scheduled and a negative - if a requested * operations has failed.
*/ int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsignedlong *ramrod_flags);
/** * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * * @bp: driver handle * @mac_obj: MAC object handle * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) * @wait_for_comp: if 'true' block until completion * * Deletes all MACs of the specific type (e.g. ETH, UC list). * * Returns zero if operation has successfully completed, a positive value if the * operation has been successfully scheduled and a negative - if a requested * operations has failed.
*/ int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp);
/* Init Function API */ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id); int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); void bnx2x_read_mf_cfg(struct bnx2x *bp);
int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
#define BNX2X_ILT_ZALLOC(x, y, size) \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \ do { \ if (x) { \
dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
} while (0)
#define ILOG2(x) (ilog2((x)))
#define ILT_NUM_PAGE_ENTRIES (3072) /* In 57710/11 we use whole table since we have 8 func * In 57712 we have only 4 func, but use same size per func, then only half of * the table in use
*/ #define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) /* * the phys address is shifted right 12 bits and has an added * 1=valid bit added to the 53rd bit * then since this is a wide register(TM) * we split it into two 32 bit writes
*/ #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.