/* context for admin commands */ struct fun_cmd_ctx {
fun_admin_callback_t cb; /* callback to invoke on completion */ void *cb_data; /* user data provided to callback */ int cpu; /* CPU where the cmd's tag was allocated */
};
/* Check that CSTS RDY has the expected value. Then write a new value to the CC * register and wait for CSTS RDY to match the new CC ENABLE state.
*/ staticint fun_update_cc_enable(struct fun_dev *fdev, unsignedint initial_rdy)
{ int rc = fun_check_csts_rdy(fdev, initial_rdy);
if (info->sqhd == cpu_to_be16(0xffff)) {
dev_dbg(fdev->dev, "adminq event"); if (fdev->adminq_cb)
fdev->adminq_cb(fdev, entry); return;
}
cid = be16_to_cpu(rsp_common->cid);
dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
rsp_common->op, rsp_common->ret);
cmd_ctx = &fdev->cmd_ctx[cid]; if (cmd_ctx->cpu < 0) {
dev_err(fdev->dev, "admin CQE with CID=%u, op=%u does not match a pending command\n",
cid, rsp_common->op); return;
}
if (cmd_ctx->cb)
cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
cpu = cmd_ctx->cpu;
cmd_ctx->cpu = -1;
sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
}
/* Return %true if the admin queue has stopped servicing commands as can be * detected through registers. This isn't exhaustive and may provide false * negatives.
*/ staticbool fun_adminq_stopped(struct fun_dev *fdev)
{
u32 csts = readl(fdev->bar + NVME_REG_CSTS);
/* Submit an asynchronous admin command. Caller is responsible for implementing * any waiting or timeout. Upon command completion the callback @cb is called.
*/ int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
fun_admin_callback_t cb, void *cb_data, bool wait_ok)
{ struct fun_queue *funq = fdev->admin_q; unsignedint cmdsize = cmd->len8 * 8; struct fun_cmd_ctx *cmd_ctx; int tag, cpu, rc = 0;
if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2))) return -EMSGSIZE;
tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu); if (tag < 0) { if (!wait_ok) return -EAGAIN;
tag = fun_wait_for_tag(fdev, &cpu); if (tag < 0) return tag;
}
/* Abandon a pending admin command by clearing the issuer's callback data. * Failure indicates that the command either has already completed or its * completion is racing with this call.
*/ staticbool fun_abandon_admin_cmd(struct fun_dev *fd, conststruct fun_admin_req_common *cmd, void *cb_data)
{
u16 cid = be16_to_cpu(cmd->cid); struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
/* Stop submission of new admin commands and wake up any processes waiting for * tags. Already submitted commands are left to complete or time out.
*/ staticvoid fun_admin_stop(struct fun_dev *fdev)
{
spin_lock(&fdev->admin_q->sq_lock);
fdev->suppress_cmds = true;
spin_unlock(&fdev->admin_q->sq_lock);
sbitmap_queue_wake_all(&fdev->admin_sbq);
}
/* The callback for synchronous execution of admin commands. It copies the * command response to the caller's buffer and signals completion.
*/ staticvoid fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
{ conststruct fun_admin_rsp_common *rsp_common = rsp; struct fun_sync_cmd_ctx *ctx = cb_data;
if (!ctx) return; /* command issuer timed out and left */ if (ctx->rsp_buf) { unsignedint rsp_len = rsp_common->len8 * 8;
if (unlikely(rsp_len > ctx->rsp_len)) {
dev_err(fd->dev, "response for op %u is %uB > response buffer %uB\n",
rsp_common->op, rsp_len, ctx->rsp_len);
rsp_len = ctx->rsp_len;
}
memcpy(ctx->rsp_buf, rsp, rsp_len);
}
ctx->rsp_status = rsp_common->ret;
complete(&ctx->compl);
}
ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx, true); if (ret) return ret;
if (!timeout)
timeout = FUN_ADMIN_CMD_TO_MS;
jiffies_left = wait_for_completion_timeout(&ctx.compl,
msecs_to_jiffies(timeout)); if (!jiffies_left) { /* The command timed out. Attempt to cancel it so we can return. * But if the command is in the process of completing we'll * wait for it.
*/ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
dev_err(fdev->dev, "admin command timed out: %*ph\n",
cmdlen, cmd);
fun_admin_stop(fdev); /* see if the timeout was due to a queue failure */ if (fun_adminq_stopped(fdev))
dev_err(fdev->dev, "device does not accept admin commands\n");
/* Return the number of device resources of the requested type. */ int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
{ union { struct fun_admin_res_count_req req; struct fun_admin_res_count_rsp rsp;
} cmd; int rc;
/* Request that the instance of resource @res with the given id be deleted. */ int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res, unsignedint flags, u32 id)
{ struct fun_admin_generic_destroy_req req = {
.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
.destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
flags, id)
};
/* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the * device must provide additional queues.
*/ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) return -EINVAL;
/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */ staticint fun_alloc_irqs(struct pci_dev *pdev, unsignedint min_vecs)
{ int vecs, num_msix = pci_msix_vec_count(pdev);
if (num_msix < 0) return num_msix; if (min_vecs > num_msix) return -ERANGE;
vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX); if (vecs > 0) {
dev_info(&pdev->dev, "Allocated %d IRQ vectors of %d requested\n",
vecs, num_msix);
} else {
dev_err(&pdev->dev, "Unable to allocate at least %u IRQ vectors\n",
min_vecs);
} return vecs;
}
/* Allocate and initialize the IRQ manager state. */ staticint fun_alloc_irq_mgr(struct fun_dev *fdev)
{
fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL); if (!fdev->irq_map) return -ENOMEM;
spin_lock_init(&fdev->irqmgr_lock); /* mark IRQ 0 allocated, it is used by the admin queue */
__set_bit(0, fdev->irq_map);
fdev->irqs_avail = fdev->num_irqs - 1; return 0;
}
/* Reserve @nirqs of the currently available IRQs and return their indices. */ int fun_reserve_irqs(struct fun_dev *fdev, unsignedint nirqs, u16 *irq_indices)
{ unsignedint b, n = 0; int err = -ENOSPC;
if (!nirqs) return 0;
spin_lock(&fdev->irqmgr_lock); if (nirqs > fdev->irqs_avail) goto unlock;
/* Check and try to get the device into a proper state for initialization, * i.e., CSTS.RDY = CC.EN = 0.
*/ staticint sanitize_dev(struct fun_dev *fdev)
{ int rc;
/* First get RDY to agree with the current EN. Give RDY the opportunity * to complete a potential recent EN change.
*/
rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE); if (rc) return rc;
/* Next, reset the device if EN is currently 1. */ if (fdev->cc_reg & NVME_CC_ENABLE)
rc = fun_disable_ctrl(fdev);
return rc;
}
/* Undo the device initialization of fun_dev_enable(). */ void fun_dev_disable(struct fun_dev *fdev)
{ struct pci_dev *pdev = to_pci_dev(fdev->dev);
/* Perform basic initialization of a device, including * - PCI config space setup and BAR0 mapping * - interrupt management initialization * - 1 admin queue setup * - determination of some device limits, such as number of queues.
*/ int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev, conststruct fun_dev_params *areq, constchar *name)
{ int rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.