/* * Process NOOP request result for a single path.
*/ staticvoid nop_callback(struct ccw_device *cdev, void *data, int rc)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req;
switch (rc) { case 0:
sch->vpm |= req->lpm; break; case -ETIME:
cdev->private->path_noirq_mask |= req->lpm; break; case -EACCES:
cdev->private->path_notoper_mask |= req->lpm; break; default: goto err;
} /* Continue on the next path. */
req->lpm >>= 1;
nop_do(cdev); return;
err:
verify_done(cdev, rc);
}
/* * Create channel program to perform SET PGID on a single path.
*/ staticvoid spid_build_cp(struct ccw_device *cdev, u8 fn)
{ struct ccw_request *req = &cdev->private->req; struct ccw1 *cp = cdev->private->dma_area->iccws; int i = pathmask_to_pos(req->lpm); struct pgid *pgid = &cdev->private->dma_area->pgid[i];
/* * Perform establish/resign SET PGID on a single path.
*/ staticvoid spid_do(struct ccw_device *cdev)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req;
u8 fn;
/* Use next available path that is not already in correct state. */
req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); if (!req->lpm) goto out_nopath; /* Channel program setup. */ if (req->lpm & sch->opm)
fn = SPID_FUNC_ESTABLISH; else
fn = SPID_FUNC_RESIGN; if (cdev->private->flags.mpath)
fn |= SPID_FUNC_MULTI_PATH;
spid_build_cp(cdev, fn);
ccw_request_start(cdev); return;
out_nopath: if (cdev->private->flags.pgid_unknown) { /* At least one SPID could be partially done. */
pgid_wipeout_start(cdev); return;
}
verify_done(cdev, sch->vpm ? 0 : -EACCES);
}
/* * Process SET PGID request result for a single path.
*/ staticvoid spid_callback(struct ccw_device *cdev, void *data, int rc)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req;
switch (rc) { case 0:
sch->vpm |= req->lpm & sch->opm; break; case -ETIME:
cdev->private->flags.pgid_unknown = 1;
cdev->private->path_noirq_mask |= req->lpm; break; case -EACCES:
cdev->private->path_notoper_mask |= req->lpm; break; case -EOPNOTSUPP: if (cdev->private->flags.mpath) { /* Try without multipathing. */
cdev->private->flags.mpath = 0; goto out_restart;
} /* Try without pathgrouping. */
cdev->private->flags.pgroup = 0; goto out_restart; default: goto err;
}
req->lpm >>= 1;
spid_do(cdev); return;
/* Set bits for paths which are already in the target state. */ for (i = 0; i < 8; i++) {
lpm = 0x80 >> i; if ((cdev->private->pgid_valid_mask & lpm) == 0) continue;
pgid = &cdev->private->dma_area->pgid[i]; if (sch->opm & lpm) { if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) continue;
} else { if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) continue;
} if (cdev->private->flags.mpath) { if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) continue;
} else { if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) continue;
}
donepm |= lpm;
}
return donepm;
}
staticvoid pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
{ int i;
for (i = 0; i < 8; i++)
memcpy(&cdev->private->dma_area->pgid[i], pgid, sizeof(struct pgid));
}
/* * Process SENSE PGID data and report result.
*/ staticvoid snid_done(struct ccw_device *cdev, int rc)
{ struct ccw_dev_id *id = &cdev->private->dev_id; struct subchannel *sch = to_subchannel(cdev->dev.parent); struct pgid *pgid; int mismatch = 0;
u8 reserved = 0;
u8 reset = 0;
u8 donepm;
/* * Create channel program to perform a SENSE PGID on a single path.
*/ staticvoid snid_build_cp(struct ccw_device *cdev)
{ struct ccw_request *req = &cdev->private->req; struct ccw1 *cp = cdev->private->dma_area->iccws; int i = pathmask_to_pos(req->lpm);
out_nopath: if (cdev->private->pgid_valid_mask)
ret = 0; elseif (cdev->private->path_noirq_mask)
ret = -ETIME; else
ret = -EACCES;
snid_done(cdev, ret);
}
/* * Process SENSE PGID request result for single path.
*/ staticvoid snid_callback(struct ccw_device *cdev, void *data, int rc)
{ struct ccw_request *req = &cdev->private->req;
switch (rc) { case 0:
cdev->private->pgid_valid_mask |= req->lpm; break; case -ETIME:
cdev->private->flags.pgid_unknown = 1;
cdev->private->path_noirq_mask |= req->lpm; break; case -EACCES:
cdev->private->path_notoper_mask |= req->lpm; break; default: goto err;
} /* Continue on the next path. */
req->lpm >>= 1;
snid_do(cdev); return;
/** * ccw_device_verify_start - perform path verification * @cdev: ccw device * * Perform an I/O on each available channel path to @cdev to determine which * paths are operational. The resulting path mask is stored in sch->vpm. * If device options specify pathgrouping, establish a pathgroup for the * operational paths. When finished, call ccw_device_verify_done with a * return code specifying the result.
*/ void ccw_device_verify_start(struct ccw_device *cdev)
{
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); /* * Initialize pathgroup and multipath state with target values. * They may change in the course of path verification.
*/
cdev->private->flags.pgroup = cdev->private->options.pgroup;
cdev->private->flags.mpath = cdev->private->options.mpath;
cdev->private->flags.doverify = 0;
cdev->private->path_noirq_mask = 0;
verify_start(cdev);
}
/* * Process disband SET PGID request result.
*/ staticvoid disband_callback(struct ccw_device *cdev, void *data, int rc)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_dev_id *id = &cdev->private->dev_id;
if (rc) goto out; /* Ensure consistent multipathing state at device and channel. */
cdev->private->flags.mpath = 0; if (sch->config.mp) {
sch->config.mp = 0;
rc = cio_commit_config(sch);
}
out:
CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
rc);
ccw_device_disband_done(cdev, rc);
}
/** * ccw_device_disband_start - disband pathgroup * @cdev: ccw device * * Execute a SET PGID channel program on @cdev to disband a previously * established pathgroup. When finished, call ccw_device_disband_done with * a return code specifying the result.
*/ void ccw_device_disband_start(struct ccw_device *cdev)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req;
u8 fn;
/** * ccw_device_stlck_start - perform unconditional release * @cdev: ccw device * @data: data pointer to be passed to ccw_device_stlck_done * @buf1: data pointer used in channel program * @buf2: data pointer used in channel program * * Execute a channel program on @cdev to release an existing PGID reservation.
*/ staticvoid ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1, void *buf2)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.