if (!lock) return;
write_seqlock_irqsave(lock, flags); if (!list_empty(&priv->s_iowait.list)) {
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
}
write_sequnlock_irqrestore(lock, flags);
}
/* * This function is what we would push to the core layer if we wanted to be a * "first class citizen". Instead we hide this here and rely on Verbs ULPs * to blindly pass the MTU enum value from the PathRecord to us.
*/ staticinlineint verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{ /* Constraining 10KB packets to 8KB packets */ if (mtu == (enum ib_mtu)OPA_MTU_10240)
mtu = (enum ib_mtu)OPA_MTU_8192; return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
if (attr_mask & IB_QP_AV) {
sc = ah_to_sc(ibqp->device, &attr->ah_attr); if (sc == 0xf) return -EINVAL;
if (!qp_to_sdma_engine(qp, sc) &&
dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL;
if (!qp_to_send_context(qp, sc)) return -EINVAL;
}
if (attr_mask & IB_QP_ALT_PATH) {
sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); if (sc == 0xf) return -EINVAL;
if (!qp_to_sdma_engine(qp, sc) &&
dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL;
if (!qp_to_send_context(qp, sc)) return -EINVAL;
}
return 0;
}
/* * qp_set_16b - Set the hdr_type based on whether the slid or the * dlid in the connection is extended. Only applicable for RC and UC * QPs. UD QPs determine this on the fly from the ah in the wqe
*/ staticinlinevoid qp_set_16b(struct rvt_qp *qp)
{ struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct hfi1_qp_priv *priv = qp->priv;
/* Update ah_attr to account for extended LIDs */
hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
/* Create 32 bit LIDs */
hfi1_make_opa_lid(&qp->remote_ah_attr);
if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) return;
/** * hfi1_setup_wqe - set up the wqe * @qp: The qp * @wqe: The built wqe * @call_send: Determine if the send should be posted or scheduled. * * Perform setup of the wqe. This is called * prior to inserting the wqe into the ring but after * the wqe has been setup by RDMAVT. This function * allows the driver the opportunity to perform * validation and additional setup of the wqe. * * Returns 0 on success, -EINVAL on failure *
*/ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
{ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct rvt_ah *ah; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd;
switch (qp->ibqp.qp_type) { case IB_QPT_RC:
hfi1_setup_tid_rdma_wqe(qp, wqe);
fallthrough; case IB_QPT_UC: if (wqe->length > 0x80000000U) return -EINVAL; if (wqe->length > qp->pmtu)
*call_send = false; break; case IB_QPT_SMI: /* * SM packets should exclusively use VL15 and their SL is * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah * is created, SL is 0 in most cases and as a result some * fields (vl and pmtu) in ah may not be set correctly, * depending on the SL2SC and SC2VL tables at the time.
*/
ppd = ppd_from_ibp(ibp);
dd = dd_from_ppd(ppd); if (wqe->length > dd->vld[15].mtu) return -EINVAL; break; case IB_QPT_GSI: case IB_QPT_UD:
ah = rvt_get_swqe_ah(wqe); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) return -EINVAL; break; default: break;
}
/* * System latency between send and schedule is large enough that * forcing call_send to true for piothreshold packets is necessary.
*/ if (wqe->length <= piothreshold)
*call_send = true; return 0;
}
/** * _hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress w/o regard to the s_flags. * * It is only used in the post send, which doesn't hold * the s_lock.
*/ bool _hfi1_schedule_send(struct rvt_qp *qp)
{ struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = ppd->dd;
if (!priv->s_sendcontext) return; while (iowait_pio_pending(&priv->s_iowait)) {
write_seqlock_irq(&priv->s_sendcontext->waitlock);
hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
write_sequnlock_irq(&priv->s_sendcontext->waitlock);
iowait_pio_drain(&priv->s_iowait);
write_seqlock_irq(&priv->s_sendcontext->waitlock);
hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
write_sequnlock_irq(&priv->s_sendcontext->waitlock);
}
}
/** * hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress and caller should hold * the s_lock. * @return true if the first leg is scheduled; * false if the first leg is not scheduled.
*/ bool hfi1_schedule_send(struct rvt_qp *qp)
{
lockdep_assert_held(&qp->s_lock); if (hfi1_send_ok(qp)) {
_hfi1_schedule_send(qp); returntrue;
} if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
IOWAIT_PENDING_IB); returnfalse;
}
if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
ret = hfi1_schedule_send(qp); if (ret)
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
} if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
ret = hfi1_schedule_tid_send(qp); if (ret)
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
}
}
if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
qp->s_flags &= ~RVT_S_BUSY; /* * If we are sending a first-leg packet from the second leg, * we need to clear the busy flag from priv->s_flags to * avoid a race condition when the qp wakes up before * the call to hfi1_verbs_send() returns to the second * leg. In that case, the second leg will terminate without * being re-scheduled, resulting in failure to send TID RDMA * WRITE DATA and TID RDMA ACK packets.
*/ if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
RVT_S_BUSY);
iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
}
} else {
priv->s_flags &= ~RVT_S_BUSY;
}
}
spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy.
*/ /* Make a common routine? */
list_add_tail(&stx->list, &wait->tx_head);
write_seqlock(&sde->waitlock); if (sdma_progress(sde, seq, stx)) goto eagain; if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
/* * This happens when the send engine notes * a QP in the error state and cannot * do the flush work until that QP's * sdma work has finished.
*/
spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
}
if (qp->s_flags & RVT_S_ACK_PENDING)
w->priority++; if (priv->s_flags & RVT_S_ACK_PENDING)
w->priority++;
}
/** * qp_to_sdma_engine - map a qp to a send engine * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send engine for the qp or NULL for SMI type qp.
*/ struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
{ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct sdma_engine *sde;
/** * qp_to_send_context - map a qp to a send context * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send context for the qp
*/ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
{ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
switch (qp->ibqp.qp_type) { case IB_QPT_SMI: /* SMA packets to VL15 */ return dd->vld[15].sc; default: break;
}
/* Clear any OPFN state */ if (qp->ibqp.qp_type == IB_QPT_RC)
opfn_conn_error(qp);
}
/* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled.
*/ void hfi1_migrate_qp(struct rvt_qp *qp)
{ struct hfi1_qp_priv *priv = qp->priv; struct ib_event ev;
/** * hfi1_qp_iter_cb - callback for iterator * @qp: the qp * @v: the sl in low bits of v * * This is called from the iterator callback to work * on an individual qp.
*/ staticvoid hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
{ int lastwqe; struct ib_event ev; struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u8 sl = (u8)v;
/** * hfi1_error_port_qps - put a port's RC/UC qps into error state * @ibp: the ibport. * @sl: the service level. * * This function places all RC/UC qps with a given service level into error * state. It is generally called to force upper lay apps to abandon stale qps * after an sl->sc mapping change.
*/ void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
{ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.18 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.