/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and * the bus itself. Of course we only have one channel * subsystem driver and one channel system per machine, but
* we still use the abstraction. T.R. says it's a good idea. */ staticint
ccw_bus_match (struct device * dev, conststruct device_driver * drv)
{ struct ccw_device *cdev = to_ccwdev(dev); conststruct ccw_driver *cdrv = to_ccwdrv(drv); conststruct ccw_device_id *ids = cdrv->ids, *found;
if (!ids) return 0;
found = ccw_device_id_match(ids, &cdev->id); if (!found) return 0;
cdev->id.driver_info = found->driver_info;
return 1;
}
/* Store modalias string delimited by prefix/suffix string into buffer with * specified size. Return length of resulting string (excluding trailing '\0')
* even if string doesn't fit buffer (snprintf semantics). */ staticint snprint_alias(char *buf, size_t size, conststruct ccw_device_id *id, constchar *suffix)
{ int len;
len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); if (len > size) return len;
buf += len;
size -= len;
if (id->dev_type != 0)
len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
id->dev_model, suffix); else
len += snprintf(buf, size, "dtdm%s", suffix);
return len;
}
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non-zero otherwise. */ staticint ccw_uevent(conststruct device *dev, struct kobj_uevent_env *env)
{ conststruct ccw_device *cdev = to_ccwdev(dev); conststruct ccw_device_id *id = &(cdev->id); int ret; char modalias_buf[30];
/* CU_TYPE= */
ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); if (ret) return ret;
/* CU_MODEL= */
ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); if (ret) return ret;
/* The next two can be zero, that's ok for us */ /* DEV_TYPE= */
ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); if (ret) return ret;
/* DEV_MODEL= */
ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); if (ret) return ret;
timer_setup(&recovery_timer, recovery_func, 0);
ret = bus_register(&ccw_bus_type); if (ret) return ret;
ret = css_driver_register(&io_subchannel_driver); if (ret)
bus_unregister(&ccw_bus_type);
/** * ccw_device_set_offline() - disable a ccw device for I/O * @cdev: target ccw device * * This function calls the driver's set_offline() function for @cdev, if * given, and then disables @cdev. * Returns: * %0 on success and a negative error value on failure. * Context: * enabled, ccw device lock not held
*/ int ccw_device_set_offline(struct ccw_device *cdev)
{ struct subchannel *sch; int ret, state;
if (!cdev) return -ENODEV; if (!cdev->online || !cdev->drv) return -EINVAL;
if (cdev->drv->set_offline) {
ret = cdev->drv->set_offline(cdev); if (ret != 0) return ret;
}
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
cdev->online = 0; /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
} do {
ret = ccw_device_offline(cdev); if (!ret) break;
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno); if (ret != -EBUSY) goto error;
state = cdev->private->state;
spin_unlock_irq(cdev->ccwlock);
io_subchannel_quiesce(sch);
spin_lock_irq(cdev->ccwlock);
cdev->private->state = state;
} while (ret == -EBUSY);
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED)); /* Inform the user if set offline failed. */ if (cdev->private->state == DEV_STATE_BOXED) {
pr_warn("%s: The device entered boxed state while being set offline\n",
dev_name(&cdev->dev));
} elseif (cdev->private->state == DEV_STATE_NOT_OPER) {
pr_warn("%s: The device stopped operating while being set offline\n",
dev_name(&cdev->dev));
} /* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev); return 0;
error:
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
spin_unlock_irq(cdev->ccwlock); /* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev); return -ENODEV;
}
/** * ccw_device_set_online() - enable a ccw device for I/O * @cdev: target ccw device * * This function first enables @cdev and then calls the driver's set_online() * function for @cdev, if given. If set_online() returns an error, @cdev is * disabled again. * Returns: * %0 on success and a negative error value on failure. * Context: * enabled, ccw device lock not held
*/ int ccw_device_set_online(struct ccw_device *cdev)
{ int ret; int ret2;
if (!cdev) return -ENODEV; if (cdev->online || !cdev->drv) return -EINVAL; /* Hold on to an extra reference while device is online. */ if (!get_device(&cdev->dev)) return -ENODEV;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev); if (ret) {
spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, " "device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno); /* Give up online reference since onlining failed. */
put_device(&cdev->dev); return ret;
} /* Wait until a final state is reached */ while (!dev_fsm_final_state(cdev)) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
spin_lock_irq(cdev->ccwlock);
} /* Check if online processing was successful */ if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
spin_unlock_irq(cdev->ccwlock); /* Inform the user that set online failed. */ if (cdev->private->state == DEV_STATE_BOXED) {
pr_warn("%s: Setting the device online failed because it is boxed\n",
dev_name(&cdev->dev));
} elseif (cdev->private->state == DEV_STATE_NOT_OPER) {
pr_warn("%s: Setting the device online failed because it is not operational\n",
dev_name(&cdev->dev));
} /* Give up online reference since onlining failed. */
put_device(&cdev->dev); return -ENODEV;
}
spin_unlock_irq(cdev->ccwlock); if (cdev->drv->set_online)
ret = cdev->drv->set_online(cdev); if (ret) goto rollback;
rollback:
spin_lock_irq(cdev->ccwlock); /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
ret2 = ccw_device_offline(cdev); if (ret2) goto error;
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED)); /* Give up online reference since onlining failed. */
put_device(&cdev->dev); return ret;
error:
CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " "device 0.%x.%04x\n",
ret2, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
spin_unlock_irq(cdev->ccwlock); /* Give up online reference since onlining failed. */
put_device(&cdev->dev); return ret;
}
/* Prevent conflict between multiple on-/offline processing requests. */ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; /* Prevent conflict between internal I/Os and on-/offline processing. */ if (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
ret = -EAGAIN; goto out;
} /* Prevent conflict between pending work and on-/offline processing.*/ if (work_pending(&cdev->private->todo_work)) {
ret = -EAGAIN; goto out;
} if (!strncmp(buf, "force\n", count)) {
force = 1;
i = 1;
ret = 0;
} else {
force = 0;
ret = kstrtoul(buf, 16, &i);
} if (ret) goto out;
device_lock(dev); switch (i) { case 0:
ret = online_store_handle_offline(cdev); break; case 1:
ret = online_store_handle_online(cdev, force); break; default:
ret = -EINVAL;
}
device_unlock(dev);
if (ccw_device_is_orphan(cdev)) return sysfs_emit(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: return sysfs_emit(buf, "boxed\n"); case DEV_STATE_DISCONNECTED: case DEV_STATE_DISCONNECTED_SENSE_ID: case DEV_STATE_NOT_OPER:
sch = to_subchannel(dev->parent); if (!sch->lpm) return sysfs_emit(buf, "no path\n"); else return sysfs_emit(buf, "no device\n"); default: /* All other states considered fine. */ return sysfs_emit(buf, "good\n");
}
}
/** * get_ccwdev_by_dev_id() - obtain device from a ccw device id * @dev_id: id of the device to be searched * * This function searches all devices attached to the ccw bus for a device * matching @dev_id. * Returns: * If a device is found its reference count is increased and returned; * else %NULL is returned.
*/ struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{ struct device *dev;
dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
return dev ? to_ccwdev(dev) : NULL;
}
EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
staticvoid ccw_device_do_unbind_bind(struct ccw_device *cdev)
{ int ret;
mutex_lock(&cdev->reg_mutex); if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
}
mutex_unlock(&cdev->reg_mutex);
}
/* Need to allocate a new ccw device. */
cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { /* OK, we did everything we could... */
css_sch_device_unregister(sch); return;
} /* Start recognition for the new ccw device. */
io_subchannel_recog(cdev, sch);
}
sch = to_subchannel(cdev->dev.parent); /* * Check if subchannel is still registered. It may have become * unregistered if a machine check hit us after finishing * device recognition but before the register work could be * queued.
*/ if (!device_is_registered(&sch->dev)) goto out_err;
css_update_ssd_info(sch); /* * io_subchannel_register() will also be called after device * recognition has been done for a boxed device (which will already * be registered). We need to reprobe since we may now have sense id * information.
*/
mutex_lock(&cdev->reg_mutex); if (device_is_registered(&cdev->dev)) { if (!cdev->drv) {
ret = device_reprobe(&cdev->dev); if (ret) /* We can't do much here. */
CIO_MSG_EVENT(0, "device_reprobe() returned" " %d for 0.%x.%04x\n", ret,
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
}
adjust_init_count = 0; goto out;
} /* make it known to the system */
ret = device_add(&cdev->dev); if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
spin_lock_irqsave(&sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(&sch->lock, flags);
mutex_unlock(&cdev->reg_mutex); /* Release initial device reference. */
put_device(&cdev->dev); goto out_err;
}
out:
cdev->private->flags.recog_done = 1;
mutex_unlock(&cdev->reg_mutex);
wake_up(&cdev->private->wait_q);
out_err: if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
/* * subchannel recognition done. Called from the state machine.
*/ void
io_subchannel_recog_done(struct ccw_device *cdev)
{ if (css_init_done == 0) {
cdev->private->flags.recog_done = 1; return;
} switch (cdev->private->state) { case DEV_STATE_BOXED: /* Device did not respond in time. */ case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1; /* Remove device found not operational. */
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq); break; case DEV_STATE_OFFLINE: /* * We can't register the device in interrupt context so * we schedule a work item.
*/
ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); break;
}
}
staticvoid io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{ /* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* * Note: We always return 0 so that we bind to the device even on error. * This is needed so that our remove function is called on unregister.
*/ staticint io_subchannel_probe(struct subchannel *sch)
{ struct io_subchannel_private *io_priv; struct ccw_device *cdev; int rc;
if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group); if (rc)
CIO_MSG_EVENT(0, "Failed to create io subchannel " "attributes for subchannel " "0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc); /* * The console subchannel already has an associated ccw_device. * Register it and exit.
*/
cdev = sch_get_cdev(sch);
rc = device_add(&cdev->dev); if (rc) { /* Release online reference. */
put_device(&cdev->dev); goto out_schedule;
} if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq); return 0;
}
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch); if (rc) goto out_schedule;
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group); if (rc) goto out_schedule; /* Allocate I/O subchannel private data. */
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); if (!io_priv) goto out_schedule;
staticvoid recovery_func(struct timer_list *unused)
{ /* * We can't do our recovery in softirq context and it's not * performance critical, so we schedule it.
*/
schedule_work(&recovery_work);
}
if (rc == -ENODEV) { /* Not operational. */ if (!cdev) return IO_SCH_UNREG; if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG;
}
/* Avoid unregistering subchannels without working device. */ if (rc == -EACCES) { if (!cdev) return IO_SCH_NOP; if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG_CDEV; return IO_SCH_ORPH_CDEV;
}
/* Operational. */ if (!cdev) return IO_SCH_ATTACH; if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG_ATTACH; return IO_SCH_ORPH_ATTACH;
} if ((sch->schib.pmcw.pam & sch->opm) == 0) { if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) return IO_SCH_UNREG_CDEV; return IO_SCH_DISC;
} if (device_is_disconnected(cdev)) return IO_SCH_REPROBE; if (cdev->online) return IO_SCH_VERIFY; if (cdev->private->state == DEV_STATE_NOT_OPER) return IO_SCH_UNREG_ATTACH; return IO_SCH_NOP;
}
/** * io_subchannel_sch_event - process subchannel event * @sch: subchannel * @process: non-zero if function is called in process context * * An unspecified event occurred for this subchannel. Adjust data according * to the current operational state of the subchannel and device. Return * zero when the event has been handled sufficiently or -EAGAIN when this * function should be called again in process context.
*/ staticint io_subchannel_sch_event(struct subchannel *sch, int process)
{ unsignedlong flags; struct ccw_device *cdev; struct ccw_dev_id dev_id; enum io_sch_action action; int rc = -EAGAIN;
spin_lock_irqsave(&sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; if (work_pending(&sch->todo_work)) goto out_unlock;
cdev = sch_get_cdev(sch); if (cdev && work_pending(&cdev->private->todo_work)) goto out_unlock;
action = sch_get_action(sch);
CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
sch->schid.ssid, sch->schid.sch_no, process,
action); /* Perform immediate actions while holding the lock. */ switch (action) { case IO_SCH_REPROBE: /* Trigger device recognition. */
ccw_device_trigger_reprobe(cdev);
rc = 0; goto out_unlock; case IO_SCH_VERIFY: /* Trigger path verification. */
io_subchannel_verify(sch);
rc = 0; goto out_unlock; case IO_SCH_DISC:
ccw_device_set_disconnected(cdev);
rc = 0; goto out_unlock; case IO_SCH_ORPH_UNREG: case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev); break; case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: if (!cdev) break; if (cdev->private->state == DEV_STATE_SENSE_ID) { /* * Note: delayed work triggered by this event * and repeated calls to sch_event are synchronized * by the above check for work_pending(cdev).
*/
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
} else
ccw_device_set_notoper(cdev); break; case IO_SCH_NOP:
rc = 0; goto out_unlock; default: break;
}
spin_unlock_irqrestore(&sch->lock, flags); /* All other actions require process context. */ if (!process) goto out; /* Handle attached ccw device. */ switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: /* Move ccw device to orphanage. */
rc = ccw_device_move_to_orph(cdev); if (rc) goto out; break; case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(&sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(&sch->lock, flags); /* Unregister ccw device. */
ccw_device_unregister(cdev); break; default: break;
} /* Handle subchannel. */ switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_UNREG:
css_sch_device_unregister(sch); break; case IO_SCH_ORPH_ATTACH: case IO_SCH_UNREG_ATTACH: case IO_SCH_ATTACH:
dev_id.ssid = sch->schid.ssid;
dev_id.devno = sch->schib.pmcw.dev;
cdev = get_ccwdev_by_dev_id(&dev_id); if (!cdev) {
sch_create_and_recog_new_device(sch); break;
}
rc = ccw_device_move_to_sch(cdev, sch); if (rc) { /* Release reference from get_ccwdev_by_dev_id() */
put_device(&cdev->dev); goto out;
}
spin_lock_irqsave(&sch->lock, flags);
ccw_device_trigger_reprobe(cdev);
spin_unlock_irqrestore(&sch->lock, flags); /* Release reference from get_ccwdev_by_dev_id() */
put_device(&cdev->dev); break; default: break;
} return 0;
/* Note: we interpret class 0 in this context as an uninitialized
* field since it translates to a non-I/O interrupt class. */ if (cdrv->int_class != 0)
cdev->private->int_class = cdrv->int_class; else
cdev->private->int_class = IRQIO_CIO;
}
#ifdef CONFIG_CCW_CONSOLE int __init ccw_device_enable_console(struct ccw_device *cdev)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent); int rc;
if (!cdev->drv || !cdev->handler) return -EINVAL;
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch); if (rc) return rc;
sch->driver = &io_subchannel_driver;
io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev))
ccw_device_wait_idle(cdev);
/* Hold on to an extra reference while device is online. */
get_device(&cdev->dev);
rc = ccw_device_online(cdev); if (rc) goto out_unlock;
while (!dev_fsm_final_state(cdev))
ccw_device_wait_idle(cdev);
if (cdev->private->state == DEV_STATE_ONLINE)
cdev->online = 1; else
rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock); if (rc) /* Give up online reference since onlining failed. */
put_device(&cdev->dev); return rc;
}
/** * ccw_device_wait_idle() - busy wait for device to become idle * @cdev: ccw device * * Poll until activity control is zero, that is, no function or data * transfer is pending/active. * Called with device lock being held.
*/ void ccw_device_wait_idle(struct ccw_device *cdev)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent);
while (1) {
cio_tsch(sch); if (sch->schib.scsw.cmd.actl == 0) break;
udelay(100);
}
} #endif
/** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by * @bus_id: bus id of the device to be searched * * This function searches all devices owned by @cdrv for a device with a bus * id matching @bus_id. * Returns: * If a match is found, its reference count of the found device is increased * and it is returned; else %NULL is returned.
*/ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, constchar *bus_id)
{ struct device *dev;
dev = driver_find_device_by_name(&cdrv->driver, bus_id);
/* This is the implementation of the ccw_driver class. The probe, remove * and release methods are initially very similar to the device_driver * implementations, with the difference that they have ccw_device * arguments. * * A ccw driver also contains the information that is needed for * device matching.
*/ staticint
ccw_device_probe (struct device *dev)
{ struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = to_ccwdrv(dev->driver); int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; if (ret) {
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO; return ret;
}
/** * ccw_driver_register() - register a ccw driver * @cdriver: driver to be registered * * This function is mainly a wrapper around driver_register(). * Returns: * %0 on success and a negative error value on failure.
*/ int ccw_driver_register(struct ccw_driver *cdriver)
{ struct device_driver *drv = &cdriver->driver;
drv->bus = &ccw_bus_type;
return driver_register(drv);
}
/** * ccw_driver_unregister() - deregister a ccw driver * @cdriver: driver to be deregistered * * This function is mainly a wrapper around driver_unregister().
*/ void ccw_driver_unregister(struct ccw_driver *cdriver)
{
driver_unregister(&cdriver->driver);
}
priv = container_of(work, struct ccw_device_private, todo_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); /* Find out todo. */
spin_lock_irq(cdev->ccwlock);
todo = priv->todo;
priv->todo = CDEV_TODO_NOTHING;
CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
priv->dev_id.ssid, priv->dev_id.devno, todo);
spin_unlock_irq(cdev->ccwlock); /* Perform todo. */ switch (todo) { case CDEV_TODO_ENABLE_CMF:
cmf_reenable(cdev); break; case CDEV_TODO_REBIND:
ccw_device_do_unbind_bind(cdev); break; case CDEV_TODO_REGISTER:
io_subchannel_register(cdev); break; case CDEV_TODO_UNREG_EVAL: if (!sch_is_pseudo_sch(sch))
css_schedule_eval(sch->schid);
fallthrough; case CDEV_TODO_UNREG:
spin_lock_irq(&sch->lock);
sch_set_cdev(sch, NULL);
spin_unlock_irq(&sch->lock);
ccw_device_unregister(cdev); break; default: break;
} /* Release workqueue ref. */
put_device(&cdev->dev);
}
/** * ccw_device_sched_todo - schedule ccw device operation * @cdev: ccw device * @todo: todo * * Schedule the operation identified by @todo to be performed on the slow path * workqueue. Do nothing if another operation with higher priority is already * scheduled. Needs to be called with ccwdev lock held.
*/ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
{
CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
todo); if (cdev->private->todo >= todo) return;
cdev->private->todo = todo; /* Get workqueue ref. */ if (!get_device(&cdev->dev)) return; if (!queue_work(cio_work_q, &cdev->private->todo_work)) { /* Already queued, release workqueue ref. */
put_device(&cdev->dev);
}
}
/** * ccw_device_siosl() - initiate logging * @cdev: ccw device * * This function is used to invoke model-dependent logging within the channel * subsystem.
*/ int ccw_device_siosl(struct ccw_device *cdev)
{ struct subchannel *sch = to_subchannel(cdev->dev.parent);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.