/** * do_siga_output - perform SIGA-w/wt function * @schid: subchannel id or in case of QEBSM the subchannel token * @mask: which output queues to process * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform * @aob: asynchronous operation block * * Returns condition code. * Note: For IQDC unicast queues only the highest priority queue is processed.
*/ staticinlineint do_siga_output(unsignedlong schid, unsignedlong mask, unsignedint *bb, unsignedlong fc,
dma64_t aob)
{ int cc;
/** * qdio_do_eqbs - extract buffer states for QEBSM * @q: queue to manipulate * @state: state of the extracted buffers * @start: buffer number to start at * @count: count of buffers to examine * @auto_ack: automatically acknowledge buffers * * Returns the number of successfully extracted equal buffer states. * Stops processing if a state is different from the last buffers state.
*/ staticint qdio_do_eqbs(struct qdio_q *q, unsignedchar *state, int start, int count, int auto_ack)
{ int tmp_count = count, tmp_start = start, nr = q->nr; unsignedint ccq = 0;
qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
switch (ccq) { case 0: case 32: /* all done, or next buffer state different */ return count - tmp_count; case 96: /* not all buffers processed */
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count); return count - tmp_count; case 97: /* no buffer processed */
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); goto again; default:
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
q->first_to_check, count, q->irq_ptr->int_parm); return 0;
}
}
/** * qdio_do_sqbs - set buffer states for QEBSM * @q: queue to manipulate * @state: new state of the buffers * @start: first buffer number to change * @count: how many buffers to change * * Returns the number of successfully changed buffers. * Does retrying until the specified count of buffer states is set or an * error occurs.
*/ staticint qdio_do_sqbs(struct qdio_q *q, unsignedchar state, int start, int count)
{ unsignedint ccq = 0; int tmp_count = count, tmp_start = start; int nr = q->nr;
qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
switch (ccq) { case 0: case 32: /* all done, or active buffer adapter-owned */
WARN_ON_ONCE(tmp_count); return count - tmp_count; case 96: /* not all buffers processed */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
qperf_inc(q, sqbs_partial); goto again; default:
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
q->first_to_check, count, q->irq_ptr->int_parm); return 0;
}
}
/* * Returns number of examined buffers and their common state in *state. * Requested number of buffers-to-examine must be > 0.
*/ staticinlineint get_buf_states(struct qdio_q *q, unsignedint bufnr, unsignedchar *state, unsignedint count, int auto_ack)
{ unsignedchar __state = 0; int i = 1;
if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
/* Bail out early if there is no work on the queue: */ if (__state & SLSB_OWNER_CU) goto out;
for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* stop if next state differs from initial state: */ if (q->slsb.val[bufnr] != __state) break;
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */ staticinlineint set_buf_states(struct qdio_q *q, int bufnr, unsignedchar state, int count)
{ int i;
if (is_qebsm(q)) return qdio_do_sqbs(q, state, bufnr, count);
/* Ensure that all preceding changes to the SBALs are visible: */
mb();
for (i = 0; i < count; i++) {
WRITE_ONCE(q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
cc = do_siga_input(schid, q->mask, fc); if (unlikely(cc))
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); return (cc) ? -EIO : 0;
}
int debug_get_buf_state(struct qdio_q *q, unsignedint bufnr, unsignedchar *state)
{ if (qdio_need_siga_sync(q->irq_ptr))
qdio_siga_sync_q(q); return get_buf_state(q, bufnr, state, 0);
}
staticinlinevoid qdio_stop_polling(struct qdio_q *q)
{ if (!q->u.in.batch_count) return;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.batch_count);
q->u.in.batch_count = 0;
}
switch (state) { case SLSB_P_INPUT_PRIMED:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
count);
inbound_handle_work(q, start, count, is_qebsm(q)); if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count); return count; case SLSB_P_INPUT_ERROR:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
count);
*error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
inbound_handle_work(q, start, count, false); if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count); return count; case SLSB_CU_INPUT_EMPTY: if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
q->nr, start); return 0; case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: /* We should never see this state, throw a WARN: */ default:
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, "found state %#x at index %u on queue %u\n",
state, start, q->nr); return 0;
}
}
switch (state) { case SLSB_P_OUTPUT_PENDING:
*error = QDIO_ERROR_SLSB_PENDING;
fallthrough; case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count); return count; case SLSB_P_OUTPUT_ERROR:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
q->nr, count);
*error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count); return count; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr); return 0; case SLSB_P_OUTPUT_HALTED: return 0; case SLSB_P_OUTPUT_NOT_INIT: /* We should never see this state, throw a WARN: */ default:
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, "found state %#x at index %u on queue %u\n",
state, start, q->nr); return 0;
}
}
/* zfcp wants this: */ if (irq_ptr->nr_input_qs)
first_to_check = irq_ptr->input_qs[0]->first_to_check;
irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
first_to_check, 0, irq_ptr->int_parm);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); /* * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. * Therefore we call the LGR detection function here.
*/
lgr_info_log();
}
staticint qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (cstat) goto error; if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) goto error; if (dcc == 1) return -EAGAIN; if (!(dstat & DEV_STAT_DEV_END)) goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); return 0;
/** * qdio_get_ssqd_desc - get qdio subchannel description * @cdev: ccw device to get description for * @data: where to store the ssqd * * Returns 0 or an error code. The results of the chsc are stored in the * specified structure.
*/ int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data)
{ struct subchannel_id schid;
/** * qdio_shutdown - shut down a qdio subchannel * @cdev: associated ccw device * @how: use halt or clear to shutdown
*/ int qdio_shutdown(struct ccw_device *cdev, int how)
{ struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct subchannel_id schid; int rc;
mutex_lock(&irq_ptr->setup_mutex); /* * Subchannel was already shot down. We cannot prevent being called * twice since cio may trigger a shutdown asynchronously.
*/ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
mutex_unlock(&irq_ptr->setup_mutex); return 0;
}
/* * Indicate that the device is going down.
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/* * Allocate a page for the chsc calls in qdio_establish. * Must be pre-allocated since a zfcp recovery will call * qdio_establish. In case of low memory and swap on a zfcp disk * we may not be able to allocate memory otherwise.
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); if (!irq_ptr->chsc_page) goto err_chsc;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr->qdr) goto err_qdr;
rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs); if (rc) goto err_queues;
/** * handle_inbound - reset processed input buffers * @q: queue containing the buffers * @bufnr: first buffer to process * @count: how many buffers are emptied
*/ staticint handle_inbound(struct qdio_q *q, int bufnr, int count)
{ int overlap;
qperf_inc(q, inbound_call);
/* If any processed SBALs are returned to HW, adjust our tracking: */
overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
q->u.in.batch_count); if (overlap > 0) {
q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
q->u.in.batch_count -= overlap;
}
if (qdio_need_siga_in(q->irq_ptr)) return qdio_siga_input(q);
return 0;
}
/** * qdio_add_bufs_to_input_queue - process buffers on an Input Queue * @cdev: associated ccw_device for the qdio subchannel * @q_nr: queue number * @bufnr: buffer number * @count: how many buffers to process
*/ int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsignedint q_nr, unsignedint bufnr, unsignedint count)
{ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL;
/** * qdio_add_bufs_to_output_queue - process buffers on an Output Queue * @cdev: associated ccw_device for the qdio subchannel * @q_nr: queue number * @bufnr: buffer number * @count: how many buffers to process * @aob: asynchronous operation block
*/ int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsignedint q_nr, unsignedint bufnr, unsignedint count, struct qaob *aob)
{ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL;
/** * qdio_start_irq - enable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel * * Return codes * 0 - success * 1 - irqs not started since new data is available
*/ int qdio_start_irq(struct ccw_device *cdev)
{ struct qdio_q *q; struct qdio_irq *irq_ptr = cdev->private->qdio_data; unsignedint i;
if (!irq_ptr) return -ENODEV;
for_each_input_queue(irq_ptr, q, i)
qdio_stop_polling(q);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.