/* * Control context is always 0 and handles the error packets. * It also handles the VL15 and multicast packets.
*/ #define HFI1_CTRL_CTXT 0
/* * Driver context will store software counters for each of the events * associated with these status registers
*/ #define NUM_CCE_ERR_STATUS_COUNTERS 41 #define NUM_RCV_ERR_STATUS_COUNTERS 64 #define NUM_MISC_ERR_STATUS_COUNTERS 13 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64 #define NUM_SEND_ERR_STATUS_COUNTERS 3 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
/* * per driver stats, either not device nor port-specific, or * summed over all of the devices and ports. * They are described by name via ipathfs filesystem, so layout * and number of elements can change without breaking compatibility. * If members are added or deleted hfi1_statnames[] in debugfs.c must * change to match.
*/ struct hfi1_ib_stats {
__u64 sps_ints; /* number of interrupts handled */
__u64 sps_errints; /* number of error interrupts */
__u64 sps_txerrs; /* tx-related packet errors */
__u64 sps_rcverrs; /* non-crc rcv packet errors */
__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
__u64 sps_ctxts; /* number of contexts currently open */
__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
__u64 sps_buffull;
__u64 sps_hdrfull;
};
/* * First-cut criterion for "device is active" is * two thousand dwords combined Tx, Rx traffic per * 5-second interval. SMA packets are 64 dwords, * and occur "a few per second", presumably each way.
*/ #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
/* * Below contains all data related to a single context (formerly called port).
*/
struct hfi1_opcode_stats_perctx;
struct ctxt_eager_bufs { struct eager_buffer { void *addr;
dma_addr_t dma;
ssize_t len;
} *buffers; struct { void *addr;
dma_addr_t dma;
} *rcvtids;
u32 size; /* total size of eager buffers */
u32 rcvtid_size; /* size of each eager rcv tid */
u16 count; /* size of buffers array */
u16 numbufs; /* number of buffers allocated */
u16 alloced; /* number of rcvarray entries used */
u16 threshold; /* head update threshold */
};
struct tid_queue { struct list_head queue_head; /* queue head for QP TID resource waiters */
u32 enqueue; /* count of tid enqueues */
u32 dequeue; /* count of tid dequeues */
};
struct hfi1_ctxtdata { /* rcvhdrq base, needs mmap before useful */ void *rcvhdrq; /* kernel virtual address where hdrqtail is updated */ volatile __le64 *rcvhdrtail_kvaddr; /* so functions that need physical port can get it easily */ struct hfi1_pportdata *ppd; /* so file ops can get at unit */ struct hfi1_devdata *dd; /* this receive context's assigned PIO ACK send context */ struct send_context *sc; /* per context recv functions */ const rhf_rcv_function_ptr *rhf_rcv_function_map; /* * The interrupt handler for a particular receive context can vary * throughout it's lifetime. This is not a lock protected data member so * it must be updated atomically and the prev and new value must always * be valid. Worst case is we process an extra interrupt and up to 64 * packets with the wrong interrupt handler.
*/
intr_handler do_interrupt; /** fast handler after autoactive */
intr_handler fast_handler; /** slow handler */
intr_handler slow_handler; /* napi pointer assiociated with netdev */ struct napi_struct *napi; /* verbs rx_stats per rcd */ struct hfi1_opcode_stats_perctx *opstats; /* clear interrupt mask */
u64 imask; /* ctxt rcvhdrq head offset */
u32 head; /* number of rcvhdrq entries */
u16 rcvhdrq_cnt;
u8 ireg; /* clear interrupt register */ /* receive packet sequence counter */
u8 seq_cnt; /* size of each of the rcvhdrq entries */
u8 rcvhdrqentsize; /* offset of RHF within receive header entry */
u8 rhf_offset; /* dynamic receive available interrupt timeout */
u8 rcvavail_timeout; /* Indicates that this is vnic context */ bool is_vnic; /* vnic queue index this context is mapped to */
u8 vnic_q_idx; /* Is ASPM interrupt supported for this context */ bool aspm_intr_supported; /* ASPM state (enabled/disabled) for this context */ bool aspm_enabled; /* Is ASPM processing enabled for this context (in intr context) */ bool aspm_intr_enable; struct ctxt_eager_bufs egrbufs; /* QPs waiting for context processing */ struct list_head qp_wait_list; /* tid allocation lists */ struct exp_tid_set tid_group_list; struct exp_tid_set tid_used_list; struct exp_tid_set tid_full_list;
/* Timer for re-enabling ASPM if interrupt activity quiets down */ struct timer_list aspm_timer; /* per-context configuration flags */ unsignedlong flags; /* array of tid_groups */ struct tid_group *groups; /* mmap of hdrq, must fit in 44 bits */
dma_addr_t rcvhdrq_dma;
dma_addr_t rcvhdrqtailaddr_dma; /* Last interrupt timestamp */
ktime_t aspm_ts_last_intr; /* Last timestamp at which we scheduled a timer for this context */
ktime_t aspm_ts_timer_sched; /* Lock to serialize between intr, timer intr and user threads */
spinlock_t aspm_lock; /* Reference count the base context usage */ struct kref kref; /* numa node of this context */ int numa_id; /* associated msix interrupt. */
s16 msix_intr; /* job key */
u16 jkey; /* number of RcvArray groups for this context. */
u16 rcv_array_groups; /* index of first eager TID entry. */
u16 eager_base; /* number of expected TID entries */
u16 expected_count; /* index of first expected TID entry. */
u16 expected_base; /* Device context index */
u8 ctxt;
/* PSM Specific fields */ /* lock protecting all Expected TID data */ struct mutex exp_mutex; /* lock protecting all Expected TID data of kernel contexts */
spinlock_t exp_lock; /* Queue for QP's waiting for HW TID flows */ struct tid_queue flow_queue; /* Queue for QP's waiting for HW receive array entries */ struct tid_queue rarr_queue; /* when waiting for rcv or pioavail */
wait_queue_head_t wait; /* uuid from PSM */
u8 uuid[16]; /* same size as task_struct .comm[], command that opened context */ char comm[TASK_COMM_LEN]; /* Bitmask of in use context(s) */
DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS); /* per-context event flags for fileops/intr communication */ unsignedlong event_flags; /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ void *subctxt_uregbase; /* An array of pages for the eager receive buffers * N */ void *subctxt_rcvegrbuf; /* An array of pages for the eager header queue entries * N */ void *subctxt_rcvhdr_base; /* total number of polled urgent packets */
u32 urgent; /* saved total number of polled urgent packets for poll edge trigger */
u32 urgent_poll; /* Type of packets or conditions we want to poll for */
u16 poll_type; /* non-zero if ctxt is being shared. */
u16 subctxt_id; /* The version of the library which opened this ctxt */
u32 userversion; /* * non-zero if ctxt can be shared, and defines the maximum number of * sub-contexts for this device context.
*/
u8 subctxt_cnt;
/* Bit mask to track free TID RDMA HW flows */ unsignedlong flow_mask; struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
};
/** * rcvhdrq_size - return total size in bytes for header queue * @rcd: the receive context * * rcvhdrqentsize is in DWs, so we have to convert to bytes *
*/ staticinline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
{ return PAGE_ALIGN(rcd->rcvhdrq_cnt *
rcd->rcvhdrqentsize * sizeof(u32));
}
/* * Represents a single packet at a high level. Put commonly computed things in * here so we do not have to keep doing them over and over. The rule of thumb is * if something is used one time to derive some value, store that something in * here. If it is used multiple times, then store the result of that derivation * in here.
*/ struct hfi1_packet { void *ebuf; void *hdr; void *payload; struct hfi1_ctxtdata *rcd;
__le32 *rhf_addr; struct rvt_qp *qp; struct ib_other_headers *ohdr; struct ib_grh *grh; struct opa_16b_mgmt *mgmt;
u64 rhf;
u32 maxcnt;
u32 rhqoff;
u32 dlid;
u32 slid; int numpkt;
u16 tlen;
s16 etail;
u16 pkey;
u8 hlen;
u8 rsize;
u8 updegr;
u8 etype;
u8 extra_byte;
u8 pad;
u8 sc;
u8 sl;
u8 opcode; bool migrated;
};
/* * Get/Set IB link-level config parameters for f_get/set_ib_cfg() * Mostly for MADs that set or query link parameters, also ipath * config interfaces
*/ #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */ #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */ #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */ #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */ #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */ #define HFI1_IB_CFG_SPD 5 /* current Link spd */ #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */ #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */ #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */ #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */ #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */ #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */ #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */ #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */ #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */ #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */ #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */ #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */ #define HFI1_IB_CFG_VL_HIGH_LIMIT 19 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */ #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
/* * HFI or Host Link States * * These describe the states the driver thinks the logical and physical * states are in. Used as an argument to set_link_state(). Implemented * as bits for easy multi-state checking. The actual state can only be * one.
*/ #define __HLS_UP_INIT_BP 0 #define __HLS_UP_ARMED_BP 1 #define __HLS_UP_ACTIVE_BP 2 #define __HLS_DN_DOWNDEF_BP 3 /* link down default */ #define __HLS_DN_POLL_BP 4 #define __HLS_DN_DISABLE_BP 5 #define __HLS_DN_OFFLINE_BP 6 #define __HLS_VERIFY_CAP_BP 7 #define __HLS_GOING_UP_BP 8 #define __HLS_GOING_OFFLINE_BP 9 #define __HLS_LINK_COOLDOWN_BP 10
/* use this MTU size if none other is given */ #define HFI1_DEFAULT_ACTIVE_MTU 10240 /* use this MTU size as the default maximum */ #define HFI1_DEFAULT_MAX_MTU 10240 /* default partition key */ #define DEFAULT_PKEY 0xffff
/* * Possible fabric manager config parameters for fm_{get,set}_table()
*/ #define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */ #define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */ #define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */ #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */ #define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */ #define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
/* * Possible "operations" for f_rcvctrl(ppd, op, ctxt) * these are bits so they can be combined, e.g. * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
*/ #define HFI1_RCVCTRL_TAILUPD_ENB 0x01 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02 #define HFI1_RCVCTRL_CTXT_ENB 0x04 #define HFI1_RCVCTRL_CTXT_DIS 0x08 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20 #define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */ #define HFI1_RCVCTRL_PKEY_DIS 0x80 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000 #define HFI1_RCVCTRL_URGENT_ENB 0x40000 #define HFI1_RCVCTRL_URGENT_DIS 0x80000
/* * The structure below encapsulates data relevant to a physical IB Port. * Current chips support only one such port, but the separation * clarifies things a bit. Note that to conform to IB conventions, * port-numbers are one-based. The first or only port is port1.
*/ struct hfi1_pportdata { struct hfi1_ibport ibport_data;
struct hfi1_devdata *dd;
/* PHY support */ struct qsfp_data qsfp_info; /* Values for SI tuning of SerDes */
u32 port_type;
u32 tx_preset_eq;
u32 tx_preset_noeq;
u32 rx_preset;
u8 local_atten;
u8 remote_atten;
u8 default_atten;
u8 max_power_class;
/* did we read platform config from scratch registers? */ bool config_from_scratch;
/* GUIDs for this interface, in host order, guids[0] is a port guid */
u64 guids[HFI1_GUIDS_PER_PORT];
/* GUID for peer interface, in host order */
u64 neighbor_guid;
/* up or down physical link state */
u32 linkup;
/* * this address is mapped read-only into user processes so they can * get status cheaply, whenever they want. One qword of status per port
*/
u64 *statusp;
u32 ibmtu; /* The MTU programmed for this unit */ /* * Current max size IB packet (in bytes) including IB headers, that * we can send. Changes when ibmtu changes.
*/
u32 ibmaxlen;
u32 current_egress_rate; /* units [10^6 bits/sec] */ /* LID programmed for this instance */
u32 lid; /* list of pkeys programmed; 0 if not set */
u16 pkeys[MAX_PKEY_VALUES];
u16 link_width_supported;
u16 link_width_downgrade_supported;
u16 link_speed_supported;
u16 link_width_enabled;
u16 link_width_downgrade_enabled;
u16 link_speed_enabled;
u16 link_width_active;
u16 link_width_downgrade_tx_active;
u16 link_width_downgrade_rx_active;
u16 link_speed_active;
u8 vls_supported;
u8 vls_operational;
u8 actual_vls_operational; /* LID mask control */
u8 lmc; /* Rx Polarity inversion (compensate for ~tx on partner) */
u8 rx_pol_inv;
u8 hw_pidx; /* physical port index */
u32 port; /* IB port number and index into dd->pports - 1 */ /* type of neighbor node */
u8 neighbor_type;
u8 neighbor_normal;
u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
u8 neighbor_port_number;
u8 is_sm_config_started;
u8 offline_disabled_reason;
u8 is_active_optimize_enabled;
u8 driver_link_ready; /* driver ready for active link */
u8 link_enabled; /* link enabled? */
u8 linkinit_reason;
u8 local_tx_rate; /* rate given to 8051 firmware */
u8 qsfp_retry_count;
/* Used to override LED behavior for things like maintenance beaconing*/ /* * Alternates per phase of blink * [0] holds LED off duration, [1] holds LED on duration
*/ unsignedlong led_override_vals[2];
u8 led_override_phase; /* LSB picks from vals[] */
atomic_t led_override_timer_active; /* Used to flash LEDs in override mode */ struct timer_list led_override_timer;
u32 sm_trap_qp;
u32 sa_qp;
/* * cca_timer_lock protects access to the per-SL cca_timer * structures (specifically the ccti member).
*/
spinlock_t cca_timer_lock ____cacheline_aligned_in_smp; struct cca_timer cca_timer[OPA_MAX_SLS];
/* List of congestion control table entries */ struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
/* congestion entries, each entry corresponding to a SL */ struct opa_congestion_setting_entry_shadow
congestion_entries[OPA_MAX_SLS];
/* * cc_state_lock protects (write) access to the per-port * struct cc_state.
*/
spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
struct cc_state __rcu *cc_state;
/* Total number of congestion control table entries */
u16 total_cct_entry;
/* Bit map identifying service level */
u32 cc_sl_control_map;
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
/* * begin congestion log related entries * cc_log_lock protects all congestion log related data
*/
spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
u16 threshold_event_counter; struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS]; int cc_log_idx; /* index for logging events */ int cc_mad_idx; /* index for reporting events */ /* end congestion log related entries */
struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
/* port relative counter buffer */
u64 *cntrs; /* port relative synthetic counter buffer */
u64 *scntrs; /* port_xmit_discards are synthesized from different egress errors */
u64 port_xmit_discards;
u64 port_xmit_discards_vl[C_VL_COUNT];
u64 port_xmit_constraint_errors;
u64 port_rcv_constraint_errors; /* count of 'link_err' interrupts from DC */
u64 link_downed; /* number of times link retrained successfully */
u64 link_up; /* number of times a link unknown frame was reported */
u64 unknown_frame_count; /* port_ltp_crc_mode is returned in 'portinfo' MADs */
u16 port_ltp_crc_mode; /* port_crc_mode_enabled is the crc we support */
u8 port_crc_mode_enabled; /* mgmt_allowed is also returned in 'portinfo' MADs */
u8 mgmt_allowed;
u8 part_enforce; /* partition enforcement flags */ struct link_down_reason local_link_down_reason; struct link_down_reason neigh_link_down_reason; /* Value to be sent to link peer on LinkDown .*/
u8 remote_link_down_reason; /* Error events that will cause a port bounce. */
u32 port_error_action; struct work_struct linkstate_active_work; /* Does this port need to prescan for FECNs */ bool cc_prescan; /* * Sample sendWaitCnt & sendWaitVlCnt during link transition * and counter request.
*/
u64 port_vl_xmit_wait_last[C_VL_COUNT + 1];
u16 prev_link_width;
u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
};
struct hfi1_temp { unsignedint curr; /* current temperature */ unsignedint lo_lim; /* low temperature limit */ unsignedint hi_lim; /* high temperature limit */ unsignedint crit_lim; /* critical temperature limit */
u8 triggers; /* temperature triggers */
};
struct hfi1_i2c_bus { struct hfi1_devdata *controlling_dd; /* current controlling device */ struct i2c_adapter adapter; /* bus details */ struct i2c_algo_bit_data algo; /* bus algorithm details */ int num; /* bus number, 0 or 1 */
};
/* common data between shared ASIC HFIs */ struct hfi1_asic_data { struct hfi1_devdata *dds[2]; /* back pointers */ struct mutex asic_resource_mutex; struct hfi1_i2c_bus *i2c_bus0; struct hfi1_i2c_bus *i2c_bus1;
};
/* sizes for both the QP and RSM map tables */ #define NUM_MAP_ENTRIES 256 #define NUM_MAP_REGS 32
/* Virtual NIC information */ struct hfi1_vnic_data { struct kmem_cache *txreq_cache;
u8 num_vports;
};
struct hfi1_vnic_vport_info;
/* device data struct now contains only "general per-device" info. * fields related to a physical IB port are in a hfi1_pportdata struct.
*/ struct sdma_engine; struct sdma_vl_map;
#define BOARD_VERS_MAX 96 /* how long the version string can be */ #define SERIAL_MAX 16 /* length of the serial number */
typedefint (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); struct hfi1_netdev_rx; struct hfi1_devdata { struct hfi1_ibdev verbs_dev; /* must be first */ /* pointers to related structs for this device */ /* pci access data structure */ struct pci_dev *pcidev; struct cdev user_cdev; struct cdev diag_cdev; struct cdev ui_cdev; struct device *user_device; struct device *diag_device; struct device *ui_device;
/* first mapping up to RcvArray */
u8 __iomem *kregbase1;
resource_size_t physaddr;
/* second uncached mapping from RcvArray to pio send buffers */
u8 __iomem *kregbase2; /* for detecting offset above kregbase2 address */
u32 base2_start;
/* Per VL data. Enough for all VLs but not all elements are set/used. */ struct per_vl_data vld[PER_VL_SEND_CONTEXTS]; /* send context data */ struct send_context_info *send_contexts; /* map hardware send contexts to software index */
u8 *hw_to_sw; /* spinlock for allocating and releasing send context resources */
spinlock_t sc_lock; /* lock for pio_map */
spinlock_t pio_map_lock; /* Send Context initialization lock. */
spinlock_t sc_init_lock; /* lock for sdma_map */
spinlock_t sde_map_lock; /* array of kernel send contexts */ struct send_context **kernel_send_context; /* array of vl maps */ struct pio_vl_map __rcu *pio_map; /* default flags to last descriptor */
u64 default_desc1;
/* fields common to all SDMA engines */
volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
dma_addr_t sdma_heads_phys; void *sdma_pad_dma; /* DMA'ed by chip */
dma_addr_t sdma_pad_phys; /* for deallocation */
size_t sdma_heads_size; /* num used */
u32 num_sdma; /* array of engines sized by num_sdma */ struct sdma_engine *per_sdma; /* array of vl maps */ struct sdma_vl_map __rcu *sdma_map; /* SPC freeze waitqueue and variable */
wait_queue_head_t sdma_unfreeze_wq;
atomic_t sdma_unfreeze_count;
u32 lcb_access_count; /* count of LCB users */
/* common data between shared ASIC HFIs in this OS */ struct hfi1_asic_data *asic_data;
/* mem-mapped pointer to base of PIO buffers */ void __iomem *piobase; /* * write-combining mem-mapped pointer to base of RcvArray * memory.
*/ void __iomem *rcvarray_wc; /* * credit return base - a per-NUMA range of DMA address that * the chip will use to update the per-context free counter
*/ struct credit_return_base *cr_base;
/* send context numbers and sizes for each type */ struct sc_config_sizes sc_sizes[SC_MAX];
char *boardname; /* human readable board info */
u64 ctx0_seq_drop;
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
u64 z_send_schedule;
u64 __percpu *send_schedule; /* number of reserved contexts for netdev usage */
u16 num_netdev_contexts; /* number of receive contexts in use by the driver */
u32 num_rcv_contexts; /* number of pio send contexts in use by the driver */
u32 num_send_contexts; /* * number of ctxts available for PSM open
*/
u32 freectxts; /* total number of available user/PSM contexts */
u32 num_user_contexts; /* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
spinlock_t uctxt_lock; /* protect rcd changes */ struct mutex dc8051_lock; /* exclusive access to 8051 */ struct workqueue_struct *update_cntr_wq; struct work_struct update_cntr_work; /* exclusive access to 8051 memory */
spinlock_t dc8051_memlock; int dc8051_timed_out; /* remember if the 8051 timed out */ /* * A page that will hold event notification bitmaps for all * contexts. This page will be mapped into all processes.
*/ unsignedlong *events; /* * per unit status, see also portdata statusp * mapped read-only into user processes so they can get unit and * IB link status cheaply
*/ struct hfi1_status *status;
/* revision register shadow */
u64 revision; /* Base GUID for device (network order) */
u64 base_guid;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
u8 dc_shutdown; /* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width; /* localbus speed in MHz */
u32 lbus_speed; int unit; /* unit # of this chip */ int node; /* home node of this chip */
/* save these PCI fields to restore after a reset */
u32 pcibar0;
u32 pcibar1;
u32 pci_rom;
u16 pci_command;
u16 pcie_devctl;
u16 pcie_lnkctl;
u16 pcie_devctl2;
u32 pci_msix0;
u32 pci_tph2;
/* * ASCII serial number, from flash, large enough for original * all digit strings, and longer serial number format
*/
u8 serial[SERIAL_MAX]; /* human readable board version */
u8 boardversion[BOARD_VERS_MAX];
u8 lbus_info[32]; /* human readable localbus info */ /* chip major rev, from CceRevision */
u8 majrev; /* chip minor rev, from CceRevision */
u8 minrev; /* hardware ID */
u8 hfi1_id; /* implementation code */
u8 icode; /* vAU of this device */
u8 vau; /* vCU of this device */
u8 vcu; /* link credits of this device */
u16 link_credits; /* initial vl15 credits to use */
u16 vl15_init;
/* * Cached value for vl15buf, read during verify cap interrupt. VL15 * credits are to be kept at 0 and set when handling the link-up * interrupt. This removes the possibility of receiving VL15 MAD * packets before this HFI is ready.
*/
u16 vl15buf_cached;
/* Misc small ints */
u8 n_krcv_queues;
u8 qos_shift;
/* * Software counters for the status bits defined by the * associated error status registers
*/
u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
/* Software counter that spans all contexts */
u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS]; /* Software counter that spans all DMA engines */
u64 sw_send_dma_eng_err_status_cnt[
NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS]; /* Software counter that aggregates all cce_err_status errors */
u64 sw_cce_err_status_aggregate; /* Software counter that aggregates all bypass packet rcv errors */
u64 sw_rcv_bypass_packet_errors;
/* Save the enabled LCB error bits */
u64 lcb_err_en; struct cpu_mask_set *comp_vect; int *comp_vect_mappings;
u32 comp_vect_possible_cpus;
/* * Capability to have different send engines simply by changing a * pointer value.
*/
send_routine process_pio_send ____cacheline_aligned_in_smp;
send_routine process_dma_send; void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, constvoid *from, size_t count); int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx, struct hfi1_vnic_vport_info *vinfo, struct sk_buff *skb, u64 pbc, u8 plen); /* hfi1_pportdata, points to array of (physical) port-specific * data structs, indexed by pidx (0..n-1)
*/ struct hfi1_pportdata *pport; /* receive context data */ struct hfi1_ctxtdata **rcd;
u64 __percpu *int_counter; /* verbs tx opcode stats */ struct hfi1_opcode_stats_perctx __percpu *tx_opstats; /* device (not port) flags, basically device capabilities */
u16 flags; /* Number of physical ports available */
u8 num_pports; /* Lowest context number which can be used by user processes or VNIC */
u8 first_dyn_alloc_ctxt; /* adding a new field here would make it part of this cacheline */
/* seqlock for sc2vl */
seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
u64 sc2vl[4];
u64 __percpu *rcv_limit; /* adding a new field here would make it part of this cacheline */
/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
u8 oui1;
u8 oui2;
u8 oui3;
/* Timer and counter used to detect RcvBufOvflCnt changes */ struct timer_list rcverr_timer;
u32 rcv_ovfl_cnt; /* Serialize ASPM enable/disable between multiple verbs contexts */
spinlock_t aspm_lock; /* Number of verbs contexts which have disabled ASPM */
atomic_t aspm_disabled_cnt; /* Keeps track of user space clients */
refcount_t user_refcount; /* Used to wait for outstanding user space clients before dev removal */ struct completion user_comp;
bool eprom_available; /* true if EPROM is available for this device */ bool aspm_supported; /* Does HW support ASPM */ bool aspm_enabled; /* ASPM state: enabled/disabled */ struct rhashtable *sdma_rht;
/* vnic data */ struct hfi1_vnic_data vnic; /* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock; int vnic_num_vports; struct hfi1_netdev_rx *netdev_rx; struct hfi1_affinity_node *affinity_entry;
/** * hfi1_rcd_head - add accessor for rcd head * @rcd: the context
*/ staticinline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
{ return rcd->head;
}
/** * hfi1_set_rcd_head - add accessor for rcd head * @rcd: the context * @head: the new head
*/ staticinlinevoid hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
{
rcd->head = head;
}
/** * hfi1_seq_incr_wrap - wrapping increment for sequence * @seq: the current sequence number * * Returns: the incremented seq
*/ staticinline u8 hfi1_seq_incr_wrap(u8 seq)
{ if (++seq > RHF_MAX_SEQ)
seq = 1; return seq;
}
/** * hfi1_seq_cnt - return seq_cnt member * @rcd: the receive context * * Return seq_cnt member
*/ staticinline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
{ return rcd->seq_cnt;
}
/** * hfi1_set_seq_cnt - return seq_cnt member * @rcd: the receive context * * Return seq_cnt member
*/ staticinlinevoid hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
{
rcd->seq_cnt = cnt;
}
/** * last_rcv_seq - is last * @rcd: the receive context * @seq: sequence * * return true if last packet
*/ staticinlinebool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
{ return seq != rcd->seq_cnt;
}
/** * rcd_seq_incr - increment context sequence number * @rcd: the receive context * @seq: the current sequence number * * Returns: true if the this was the last packet
*/ staticinlinebool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
{
rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); return last_rcv_seq(rcd, seq);
}
/** * hfi1_is_slowpath - check if this context is slow path * @rcd: the receive context
*/ staticinlinebool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
{ return rcd->do_interrupt == rcd->slow_handler;
}
/** * hfi1_is_fastpath - check if this context is fast path * @rcd: the receive context
*/ staticinlinebool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
{ if (rcd->ctxt == HFI1_CTRL_CTXT) returnfalse;
return rcd->do_interrupt == rcd->fast_handler;
}
/** * hfi1_set_fast - change to the fast handler * @rcd: the receive context
*/ staticinlinevoid hfi1_set_fast(struct hfi1_ctxtdata *rcd)
{ if (unlikely(!rcd)) return; if (unlikely(!hfi1_is_fastpath(rcd)))
rcd->do_interrupt = rcd->fast_handler;
}
switch (link_width) { case OPA_LINK_WIDTH_4X:
egress_rate *= 4; break; case OPA_LINK_WIDTH_3X:
egress_rate *= 3; break; case OPA_LINK_WIDTH_2X:
egress_rate *= 2; break; default: /* assume IB_WIDTH_1X */ break;
}
return egress_rate;
}
/* * egress_cycles * * Returns the number of 'fabric clock cycles' to egress a packet * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock * rate is (approximately) 805 MHz, the units of the returned value * are (1/805 MHz).
*/ staticinline u32 egress_cycles(u32 len, u32 rate)
{
u32 cycles;
/* * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent * being an entry from the ingress partition key table), return 0 * otherwise. Use the matching criteria for ingress partition keys * specified in the OPAv1 spec., section 9.10.14.
*/ staticinlineint ingress_pkey_matches_entry(u16 pkey, u16 ent)
{
u16 mkey = pkey & PKEY_LOW_15_MASK;
u16 ment = ent & PKEY_LOW_15_MASK;
if (mkey == ment) { /* * If pkey[15] is clear (limited partition member), * is bit 15 in the corresponding table element * clear (limited member)?
*/ if (!(pkey & PKEY_MEMBER_MASK)) return !!(ent & PKEY_MEMBER_MASK); return 1;
} return 0;
}
/* * ingress_pkey_table_search - search the entire pkey table for * an entry which matches 'pkey'. return 0 if a match is found, * and 1 otherwise.
*/ staticint ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
{ int i;
for (i = 0; i < MAX_PKEY_VALUES; i++) { if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i])) return 0;
} return 1;
}
/* * ingress_pkey_table_fail - record a failure of ingress pkey validation, * i.e., increment port_rcv_constraint_errors for the port, and record * the 'error info' for this failure.
*/ staticvoid ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
u32 slid)
{ struct hfi1_devdata *dd = ppd->dd;
/* * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx * is a hint as to the best place in the partition key table to begin * searching. This function should not be called on the data path because * of performance reasons. On datapath pkey check is expected to be done * by HW and rcv_pkey_check function should be called instead.
*/ staticinlineint ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
u8 sc5, u8 idx, u32 slid, bool force)
{ if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) return 0;
/* If SC15, pkey[0:14] must be 0x7fff */ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) goto bad;
/* Is the pkey = 0x0, or 0x8000? */ if ((pkey & PKEY_LOW_15_MASK) == 0) goto bad;
/* The most likely matching pkey has index 'idx' */ if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx])) return 0;
/* no match - try the whole table */ if (!ingress_pkey_table_search(ppd, pkey)) return 0;
/* * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1 * otherwise. It only ensures pkey is vlid for QP0. This function * should be called on the data path instead of ingress_pkey_check * as on data path, pkey check is done by HW (except for QP0).
*/ staticinlineint rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
u8 sc5, u16 slid)
{ if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) return 0;
/* If SC15, pkey[0:14] must be 0x7fff */ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) goto bad;
/** * hfi1_may_ecn - Check whether FECN or BECN processing should be done * @pkt: the packet to be evaluated * * Check whether the FECN or BECN bits in the packet's header are * enabled, depending on packet type. * * This function only checks for FECN and BECN bits. Additional checks * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to * ensure correct handling.
*/ staticinlinebool hfi1_may_ecn(struct hfi1_packet *pkt)
{ bool fecn, becn;
/* * Called by readers of cc_state only, must call under rcu_read_lock().
*/ staticinlinestruct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
{ return rcu_dereference(ppd->cc_state);
}
/* * Called by writers of cc_state only, must call under cc_state_lock.
*/ staticinline struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
{ return rcu_dereference_protected(ppd->cc_state,
lockdep_is_held(&ppd->cc_state_lock));
}
/* * values for dd->flags (_device_ related flags)
*/ #define HFI1_INITTED 0x1 /* chip and driver up and initted */ #define HFI1_PRESENT 0x2 /* chip accesses can be done */ #define HFI1_FROZEN 0x4 /* chip in SPC freeze */ #define HFI1_HAS_SDMA_TIMEOUT 0x8 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ #define HFI1_SHUTDOWN 0x100 /* device is shutting down */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
/* ctxt_flag bit offsets */ /* base context has not finished initializing */ #define HFI1_CTXT_BASE_UNINIT 1 /* base context initaliation failed */ #define HFI1_CTXT_BASE_FAILED 2 /* waiting for a packet to arrive */ #define HFI1_CTXT_WAITING_RCV 3 /* waiting for an urgent packet to arrive */ #define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */ int hfi1_init_dd(struct hfi1_devdata *dd); void hfi1_free_devdata(struct hfi1_devdata *dd);
/* * The number of words for the KDETH protocol field. If this is * larger then the actual field used, then part of the payload * will be in the header. * * Optimally, we want this sized so that a typical case will * use full cache lines. The typical local KDETH header would * be: * * Bytes Field * 8 LRH * 12 BHT * ?? KDETH * 8 RHF * --- * 28 + KDETH * * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
*/ #define DEFAULT_RCVHDRSIZE 9
/* * Maximal header byte count: * * Bytes Field * 8 LRH * 40 GRH (optional) * 12 BTH * ?? KDETH * 8 RHF * --- * 68 + KDETH * * We also want to maintain a cache line alignment to assist DMA'ing * of the header bytes. Round up to a good size.
*/ #define DEFAULT_RCVHDR_ENTSIZE 32
staticinline u32 get_rcvhdrtail(conststruct hfi1_ctxtdata *rcd)
{ /* * volatile because it's a DMA target from the chip, routine is * inlined, and don't want register caching or reordering.
*/ return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
}
int hfi1_device_create(struct hfi1_devdata *dd); void hfi1_device_remove(struct hfi1_devdata *dd);
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd); void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); /* Hook for sysfs read of QSFP */ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
int hfi1_pcie_init(struct hfi1_devdata *dd); void hfi1_pcie_cleanup(struct pci_dev *pdev); int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); void hfi1_pcie_ddcleanup(struct hfi1_devdata *); int pcie_speeds(struct hfi1_devdata *dd); int restore_pci_variables(struct hfi1_devdata *dd); int save_pci_variables(struct hfi1_devdata *dd); int do_pcie_gen3_transition(struct hfi1_devdata *dd); void tune_pcie_caps(struct hfi1_devdata *dd); int parse_platform_config(struct hfi1_devdata *dd); int get_platform_config_field(struct hfi1_devdata *dd, enum platform_config_table_type_encoding
table_type, int table_index, int field_index,
u32 *data, u32 len);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.