staticvoid zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
{ int i, sbal_idx;
for (i = first; i < first + cnt; i++) {
sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
}
}
/* this needs to be called prior to updating the queue fill level */ staticinlinevoid zfcp_qdio_account(struct zfcp_qdio *qdio)
{ unsignedlonglong now, span; int used;
now = get_tod_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
}
staticvoid zfcp_qdio_int_req(struct ccw_device *cdev, unsignedint qdio_err, int queue_no, int idx, int count, unsignedlong parm)
{ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
/* * go through all SBALs from input queue currently * returned by QDIO layer
*/ for (sbal_no = 0; sbal_no < count; sbal_no++) {
sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/* * put SBALs back to response queue
*/ if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
/** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @qdio: pointer to struct zfcp_qdio * @q_req: pointer to struct zfcp_qdio_req * @sg: scatter-gather list * Returns: zero or -EINVAL on error
*/ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, struct scatterlist *sg)
{ struct qdio_buffer_element *sbale;
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= q_req->sbtype;
/** * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary * @qdio: pointer to struct zfcp_qdio * * The req_q_lock must be held by the caller of this function, and * this function may only be called from process context; it will * sleep when waiting for a free sbal. * * Returns: 0 on success, -EIO if there is no free sbal after waiting.
*/ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{ long ret;
ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return -EIO;
/** * zfcp_qdio_send - send req to QDIO * @qdio: pointer to struct zfcp_qdio * @q_req: pointer to struct zfcp_qdio_req * Returns: 0 on success, error otherwise
*/ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{ int retval;
u8 sbal_number = q_req->sbal_number;
/* * This should actually be a spin_lock_bh(stat_lock), to protect against * Request Queue completion processing in tasklet context. * But we can't do so (and are safe), as we always get called with IRQs * disabled by spin_lock_irq[save](req_q_lock).
*/
lockdep_assert_irqs_disabled();
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
if (unlikely(retval)) { /* Failed to submit the IO, roll back our modifications. */
atomic_add(sbal_number, &qdio->req_q_free);
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
sbal_number); return retval;
}
/** * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data * @qdio: pointer to struct zfcp_qdio * Returns: -ENOMEM on memory allocation error or return value from * qdio_allocate
*/ staticint zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{ int ret;
ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); if (ret) return -ENOMEM;
ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); if (ret) goto free_req_q;
init_waitqueue_head(&qdio->req_q_wq);
ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1); if (ret) goto free_res_q;
/** * zfcp_qdio_close - close qdio queues for an adapter * @qdio: pointer to structure zfcp_qdio
*/ void zfcp_qdio_close(struct zfcp_qdio *qdio)
{ struct zfcp_adapter *adapter = qdio->adapter; int idx, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return;
/* * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called * during qdio_shutdown().
*/
spin_lock_irq(&qdio->req_q_lock);
atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
/* Enable processing for Request Queue completions: */
tasklet_enable(&qdio->request_tasklet); /* Enable processing for QDIO interrupts: */
tasklet_enable(&qdio->irq_tasklet); /* This results in a qdio_start_irq(): */
tasklet_schedule(&qdio->irq_tasklet);
zfcp_qdio_shost_update(adapter, qdio);
return 0;
failed_qdio:
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&cdev->dev, "Setting up the QDIO connection to the FCP adapter failed\n"); return -EIO;
}
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{ if (!qdio) return;
/** * zfcp_qdio_siosl - Trigger logging in FCP channel * @adapter: The zfcp_adapter where to trigger logging * * Call the cio siosl function to trigger hardware logging. This * wrapper function sets a flag to ensure hardware logging is only * triggered once before going through qdio shutdown. * * The triggers are always run from qdio tasklet context, so no * additional synchronization is necessary.
*/ void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
{ int rc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) return;
rc = ccw_device_siosl(adapter->ccw_device); if (!rc)
atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.