staticunsignedint hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
S_IRUGO);
MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)");
staticunsignedint hfi1_max_pds = 0xFFFF;
module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support");
staticunsignedint hfi1_max_ahs = 0xFFFF;
module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
unsignedint hfi1_max_cqes = 0x2FFFFF;
module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support");
unsignedint hfi1_max_cqs = 0x1FFFF;
module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
unsignedint hfi1_max_qp_wrs = 0x3FFF;
module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
unsignedint hfi1_max_qps = 32768;
module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
unsignedint hfi1_max_sges = 0x60;
module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
unsignedint hfi1_max_mcast_grps = 16384;
module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support");
unsignedint hfi1_max_mcast_qp_attached = 16;
module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
uint, S_IRUGO);
MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support");
unsignedint hfi1_max_srqs = 1024;
module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
unsignedint hfi1_max_srq_sges = 128;
module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
unsignedint hfi1_max_srq_wrs = 0x1FFFF;
module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
unsignedshort piothreshold = 256;
module_param(piothreshold, ushort, S_IRUGO);
MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
staticunsignedint sge_copy_mode;
module_param(sge_copy_mode, uint, S_IRUGO);
MODULE_PARM_DESC(sge_copy_mode, "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
staticvoid verbs_sdma_complete( struct sdma_txreq *cookie, int status);
/* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24
static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); static uint wss_clean_period = 256;
module_param(wss_clean_period, uint, S_IRUGO);
MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
/* * System image GUID.
*/
__be64 ib_hfi1_sys_image_guid;
/* * Make sure the QP is ready and able to accept the given opcode.
*/ staticinline opcode_handler qp_ok(struct hfi1_packet *packet)
{ if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) return NULL; if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
packet->qp->allowed_ops) ||
(packet->opcode == IB_OPCODE_CNP)) return opcode_handler_tbl[packet->opcode];
return NULL;
}
static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
{ #ifdef CONFIG_FAULT_INJECTION if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) { /* * In order to drop non-IB traffic we * set PbcInsertHrc to NONE (0x2). * The packet will still be delivered * to the receiving node but a * KHdrHCRCErr (KDETH packet with a bad * HCRC) will be triggered and the * packet will not be delivered to the * correct context.
*/
pbc &= ~PBC_INSERT_HCRC_SMASK;
pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
} else { /* * In order to drop regular verbs * traffic we set the PbcTestEbp * flag. The packet will still be * delivered to the receiving node but * a 'late ebp error' will be * triggered and will be dropped.
*/
pbc |= PBC_TEST_EBP;
} #endif return pbc;
}
/** * hfi1_ib_rcv - process an incoming packet * @packet: data packet information * * This is called to process an incoming packet at interrupt level.
*/ void hfi1_ib_rcv(struct hfi1_packet *packet)
{ struct hfi1_ctxtdata *rcd = packet->rcd;
/* * This is called from a timer to check for QPs * which need kernel memory in order to send a packet.
*/ staticvoid mem_timer(struct timer_list *t)
{ struct hfi1_ibdev *dev = timer_container_of(dev, t, mem_timer); struct list_head *list = &dev->memwait; struct rvt_qp *qp = NULL; struct iowait *wait; unsignedlong flags; struct hfi1_qp_priv *priv;
write_seqlock_irqsave(&dev->iowait_lock, flags); if (!list_empty(list)) {
wait = list_first_entry(list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL; /* refcount held until actual wake up */ if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
}
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
if (qp)
hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
}
/* * This is called with progress side lock held.
*/ /* New API */ staticvoid verbs_sdma_complete( struct sdma_txreq *cookie, int status)
{ struct verbs_txreq *tx =
container_of(cookie, struct verbs_txreq, txreq); struct rvt_qp *qp = tx->qp;
/** * update_tx_opstats - record stats by opcode * @qp: the qp * @ps: transmit packet state * @plen: the plen in dwords * * This is a routine to record the tx opstats after a * packet has been presented to the egress mechanism.
*/ staticvoid update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u32 plen)
{ #ifdef CONFIG_DEBUG_FS struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
/* * Build the number of DMA descriptors needed to send length bytes of data. * * NOTE: DMA mapping is held in the tx until completed in the ring or * the tx desc is freed without having been submitted to the ring * * This routine ensures all the helper routine calls succeed.
*/ /* New API */ staticint build_verbs_tx_desc( struct sdma_engine *sde,
u32 length, struct verbs_txreq *tx, struct hfi1_ahg_info *ahg_info,
u64 pbc)
{ int ret = 0; struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2;
u8 extra_bytes = 0;
if (tx->phdr.hdr.hdr_type) { /* * hdrbytes accounts for PBC. Need to subtract 8 bytes * before calculating padding.
*/
extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
(SIZE_OF_CRC << 2) + SIZE_OF_LT;
} if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
&tx->txreq,
ahg_info->tx_flags,
hdrbytes + length +
extra_bytes,
ahg_info->ahgidx,
0,
NULL,
0,
verbs_sdma_complete); if (ret) goto bail_txadd;
phdr->pbc = cpu_to_le64(pbc);
ret = sdma_txadd_kvaddr(
sde->dd,
&tx->txreq,
phdr,
hdrbytes); if (ret) goto bail_txadd;
} else {
ret = sdma_txinit_ahg(
&tx->txreq,
ahg_info->tx_flags,
length,
ahg_info->ahgidx,
ahg_info->ahgcount,
ahg_info->ahgdesc,
hdrbytes,
verbs_sdma_complete); if (ret) goto bail_txadd;
} /* add the ulp payload - if any. tx->ss can be NULL for acks */ if (tx->ss) {
ret = build_verbs_ulp_payload(sde, length, tx); if (ret) goto bail_txadd;
}
/* add icrc, lt byte, and padding to flit */ if (extra_bytes)
ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
extra_bytes);
tx = ps->s_txreq; if (!sdma_txreq_built(&tx->txreq)) { if (likely(pbc == 0)) {
u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
/* No vl15 here */ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */ if (ps->s_txreq->phdr.hdr.hdr_type)
pbc |= PBC_PACKET_BYPASS |
PBC_INSERT_BYPASS_ICRC; else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
bail_ecomm: /* The current one got "sent" */ return 0;
bail_build:
ret = wait_kmem(dev, qp, ps); if (!ret) { /* free txreq - bad state */
hfi1_put_txreq(ps->s_txreq);
ps->s_txreq = NULL;
} return ret;
}
/* * If we are now in the error state, return zero to flush the * send work request.
*/ staticint pio_wait(struct rvt_qp *qp, struct send_context *sc, struct hfi1_pkt_state *ps,
u32 flag)
{ struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; unsignedlong flags; int ret = 0;
/* * Note that as soon as want_buffer() is called and * possibly before it returns, sc_piobufavail() * could be called. Therefore, put QP on the I/O wait list before * enabling the PIO avail interrupt.
*/
spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&sc->waitlock);
list_add_tail(&ps->s_txreq->txreq.list,
&ps->wait->tx_head); if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibdev *dev = &dd->verbs_dev; int was_empty;
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
iowait_get_priority(&priv->s_iowait);
iowait_queue(ps->pkts_sent, &priv->s_iowait,
&sc->piowait);
priv->s_iowait.lock = &sc->waitlock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
rvt_get_qp(qp); /* counting: only call wantpiobuf_intr if first user */ if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&sc->waitlock);
hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags); return ret;
}
/* only RC/UC use complete */ switch (qp->ibqp.qp_type) { case IB_QPT_RC: case IB_QPT_UC:
cb = verbs_pio_complete; break; default: break;
}
/* vl15 special case taken care of in ud.c */
sc5 = priv->s_sc;
sc = ps->s_txreq->psc;
if (likely(pbc == 0)) {
u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
/* set PBC_DC_INFO bit (aka SC[4]) in pbc */ if (ps->s_txreq->phdr.hdr.hdr_type)
pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc); else /* Update HCRC based on packet opcode */
pbc = update_hcrc(ps->opcode, pbc);
} if (cb)
iowait_pio_inc(&priv->s_iowait);
pbuf = sc_buffer_alloc(sc, plen, cb, qp); if (IS_ERR_OR_NULL(pbuf)) { if (cb)
verbs_pio_complete(qp, 0); if (IS_ERR(pbuf)) { /* * If we have filled the PIO buffers to capacity and are * not in an active state this request is not going to * go out to so just complete it with an error or else a * ULP or the core may be stuck waiting.
*/
hfi1_cdbg(
PIO, "alloc failed. state not active, completing");
wc_status = IB_WC_GENERAL_ERR; goto pio_bail;
} else { /* * This is a normal occurrence. The PIO buffs are full * up but we are still happily sending, well we could be * so lets continue to queue the request.
*/
hfi1_cdbg(PIO, "alloc failed. state active, queuing");
ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO); if (!ret) /* txreq not queued - free */ goto bail; /* tx consumed in wait */ return ret;
}
}
/* * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent * being an entry from the partition key table), return 0 * otherwise. Use the matching criteria for egress partition keys * specified in the OPAv1 spec., section 9.1l.7.
*/ staticinlineint egress_pkey_matches_entry(u16 pkey, u16 ent)
{
u16 mkey = pkey & PKEY_LOW_15_MASK;
u16 mentry = ent & PKEY_LOW_15_MASK;
if (mkey == mentry) { /* * If pkey[15] is set (full partition member), * is bit 15 in the corresponding table element * clear (limited member)?
*/ if (pkey & PKEY_MEMBER_MASK) return !!(ent & PKEY_MEMBER_MASK); return 1;
} return 0;
}
/** * egress_pkey_check - check P_KEY of a packet * @ppd: Physical IB port data * @slid: SLID for packet * @pkey: PKEY for header * @sc5: SC for packet * @s_pkey_index: It will be used for look up optimization for kernel contexts * only. If it is negative value, then it means user contexts is calling this * function. * * It checks if hdr's pkey is valid. * * Return: 0 on success, otherwise, 1
*/ int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
u8 sc5, int8_t s_pkey_index)
{ struct hfi1_devdata *dd; int i; int is_user_ctxt_mechanism = (s_pkey_index < 0);
if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) return 0;
/* If SC15, pkey[0:14] must be 0x7fff */ if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) goto bad;
/* Is the pkey = 0x0, or 0x8000? */ if ((pkey & PKEY_LOW_15_MASK) == 0) goto bad;
/* * For the kernel contexts only, if a qp is passed into the function, * the most likely matching pkey has index qp->s_pkey_index
*/ if (!is_user_ctxt_mechanism &&
egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) { return 0;
}
for (i = 0; i < MAX_PKEY_VALUES; i++) { if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) return 0;
}
bad: /* * For the user-context mechanism, the P_KEY check would only happen * once per SDMA request, not once per packet. Therefore, there's no * need to increment the counter for the user-context mechanism.
*/ if (!is_user_ctxt_mechanism) {
incr_cntr64(&ppd->port_xmit_constraint_errors);
dd = ppd->dd; if (!(dd->err_info_xmit_constraint.status &
OPA_EI_STATUS_SMASK)) {
dd->err_info_xmit_constraint.status |=
OPA_EI_STATUS_SMASK;
dd->err_info_xmit_constraint.slid = slid;
dd->err_info_xmit_constraint.pkey = pkey;
}
} return 1;
}
/* * get_send_routine - choose an egress routine * * Choose an egress routine based on QP type * and size
*/ staticinline send_routine get_send_routine(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; struct verbs_txreq *tx = ps->s_txreq;
if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) return dd->process_pio_send; switch (qp->ibqp.qp_type) { case IB_QPT_SMI: return dd->process_pio_send; case IB_QPT_GSI: case IB_QPT_UD: break; case IB_QPT_UC: case IB_QPT_RC:
priv->s_running_pkt_size =
(tx->s_cur_size + priv->s_running_pkt_size) / 2; if (piothreshold &&
priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) &&
(BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq)) return dd->process_pio_send; break; default: break;
} return dd->process_dma_send;
}
/** * hfi1_verbs_send - send a packet * @qp: the QP to send on * @ps: the state of the packet to send * * Return zero if packet is sent or queued OK. * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; struct ib_other_headers *ohdr = NULL;
send_routine sr; int ret;
u16 pkey;
u32 slid;
u8 l4 = 0;
/* locate the pkey within the headers */ if (ps->s_txreq->phdr.hdr.hdr_type) { struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
sr = get_send_routine(qp, ps);
ret = egress_pkey_check(dd->pport, slid, pkey,
priv->s_sc, qp->s_pkey_index); if (unlikely(ret)) { /* * The value we are returning here does not get propagated to * the verbs caller. Thus we need to complete the request with * error otherwise the caller could be sitting waiting on the * completion event. Only do this for PIO. SDMA has its own * mechanism for handling the errors. So for SDMA we can just * return.
*/ if (sr == dd->process_pio_send) { unsignedlong flags;
/** * hfi1_fill_device_attr - Fill in rvt dev info device attributes. * @dd: the device data structure
*/ staticvoid hfi1_fill_device_attr(struct hfi1_devdata *dd)
{ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u32 ver = dd->dc8051_ver;
staticinline u16 opa_speed_to_ib(u16 in)
{
u16 out = 0;
if (in & OPA_LINK_SPEED_25G)
out |= IB_SPEED_EDR; if (in & OPA_LINK_SPEED_12_5G)
out |= IB_SPEED_FDR;
return out;
}
/* * Convert a single OPA link width (no multiple flags) to an IB value. * A zero OPA link width means link down, which means the IB width value * is a don't care.
*/ staticinline u16 opa_width_to_ib(u16 in)
{ switch (in) { case OPA_LINK_WIDTH_1X: /* map 2x and 3x to 1x as they don't exist in IB */ case OPA_LINK_WIDTH_2X: case OPA_LINK_WIDTH_3X: return IB_WIDTH_1X; default: /* link down or unknown, return our largest width */ case OPA_LINK_WIDTH_4X: return IB_WIDTH_4X;
}
}
/* props being zeroed by the caller, avoid zeroing it here */
props->lid = lid ? lid : 0;
props->lmc = ppd->lmc; /* OPA logical states match IB logical states */
props->state = driver_lstate(ppd);
props->phys_state = driver_pstate(ppd);
props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */
props->active_speed = opa_speed_to_ib(ppd->link_speed_active);
props->max_vl_num = ppd->vls_supported;
/* Once we are a "first class" citizen and have added the OPA MTUs to * the core we can advertise the larger MTU enum to the ULPs, for now * advertise only 4K. * * Those applications which are either OPA aware or pass the MTU enum * from the Path Records to us will get the new 8k MTU. Those that * attempt to process the MTU enum may fail in various ways.
*/
props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
props->phys_mtu = hfi1_max_mtu;
return 0;
}
staticint modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify)
{ struct hfi1_devdata *dd = dd_from_ibdev(device); unsigned i; int ret;
if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
ret = -EOPNOTSUPP; goto bail;
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
memcpy(device->node_desc, device_modify->node_desc,
IB_DEVICE_NODE_DESC_MAX); for (i = 0; i < dd->num_pports; i++) { struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
hfi1_node_desc_chg(ibp);
}
}
if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
ib_hfi1_sys_image_guid =
cpu_to_be64(device_modify->sys_image_guid); for (i = 0; i < dd->num_pports; i++) { struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
for (i = 0; i < sz; i++) {
ibp->sl_to_sc[i] = i;
ibp->sc_to_sl[i] = i;
}
for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
spin_lock_init(&ibp->rvp.lock); /* Set the prefix to the default value (see ch. 4.1.1) */
ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
ibp->rvp.sm_lid = 0; /* * Below should only set bits defined in OPA PortInfo.CapabilityMask * and PortInfo.CapabilityMask3
*/
ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
IB_PORT_CAP_MASK_NOTICE_SUP;
ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
/* * Convert a list of names separated by '\n' into an array of NULL terminated * strings. Optionally some entries can be reserved in the array to hold extra * external strings.
*/ staticint init_cntr_names(constchar *names_in, const size_t names_len, int num_extra_names, int *num_cntrs, struct rdma_stat_desc **cntr_descs)
{ struct rdma_stat_desc *names_out; char *p; int i, n;
n = 0; for (i = 0; i < names_len; i++) if (names_in[i] == '\n')
n++;
ret = verbs_txreq_init(dev); if (ret) goto err_verbs_txreq;
/* Use first-port GUID as node guid */
ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
/* * The system image GUID is supposed to be the same for all * HFIs in a single system but since there can be other * device types in the system, we can't be sure this is unique.
*/ if (!ib_hfi1_sys_image_guid)
ib_hfi1_sys_image_guid = ibdev->node_guid;
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dev.parent = &dd->pcidev->dev;
if (!list_empty(&dev->txwait))
dd_dev_err(dd, "txwait list not empty!\n"); if (!list_empty(&dev->memwait))
dd_dev_err(dd, "memwait list not empty!\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.