lockdep_assert_held(&ie->list_lock);
list_for_each_entry_safe(d, n, &ie->work_list, list) { if (d == desc) {
list_del(&d->list); return d;
}
}
/* * At this point, the desc needs to be aborted is held by the completion * handler where it has taken it off the pending list but has not added to the * work list. It will be cleaned up by the interrupt handler when it sees the * IDXD_COMP_DESC_ABORT for completion status.
*/ return NULL;
}
desc->completion->status = IDXD_COMP_DESC_ABORT; /* * Grab the list lock so it will block the irq thread handler. This allows the * abort code to locate the descriptor need to be aborted.
*/
spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist); if (head) {
llist_for_each_entry_safe(d, t, head, llnode) { if (d == desc) {
found = desc; continue;
}
if (d->completion->status)
list_add_tail(&d->list, &flist); else
list_add_tail(&d->list, &ie->work_list);
}
}
if (!found)
found = list_abort_desc(wq, ie, desc);
spin_unlock(&ie->list_lock);
if (found)
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false,
NULL, NULL);
/* * completing the descriptor will return desc to allocator and * the desc can be acquired by a different process and the * desc->list can be modified. Delete desc from list so the * list traversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
NULL, NULL);
}
}
/* * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver * has better control of number of descriptors being submitted to a shared wq by limiting * the number of driver allocated descriptors to the wq size. However, when the swq is * exported to a guest kernel, it may be shared with multiple guest kernels. This means * the likelihood of getting busy returned on the swq when submitting goes significantly up. * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving * up. The sysfs knob can be tuned by the system administrator.
*/ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, constvoid *desc)
{ unsignedint retries = wq->enqcmds_retries; int rc;
do {
rc = enqcmds(portal, desc); if (rc == 0) break;
cpu_relax();
} while (retries--);
if (!percpu_ref_tryget_live(&wq->wq_active)) {
wait_for_completion(&wq->wq_resurrect); if (!percpu_ref_tryget_live(&wq->wq_active)) return -ENXIO;
}
portal = idxd_wq_portal_addr(wq);
/* * Pending the descriptor to the lockless list for the irq_entry * that we designated the descriptor to.
*/ if (desc_flags & IDXD_OP_FLAG_RCI) {
ie = &wq->ie;
desc->hw->int_handle = ie->int_handle;
llist_add(&desc->llnode, &ie->pending_llist);
}
/* * The wmb() flushes writes to coherent DMA data before * possibly triggering a DMA read. The wmb() is necessary * even on UP because the recipient is a device.
*/
wmb();
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
rc = idxd_enqcmds(wq, portal, desc->hw); if (rc < 0) {
percpu_ref_put(&wq->wq_active); /* abort operation frees the descriptor */ if (ie)
llist_abort_desc(wq, ie, desc); return rc;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.