// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL.
*/
struct dm_file { /* * poll will wait until the global event number is greater than * this value.
*/ volatileunsignedint global_event_nr;
};
/* *--------------------------------------------------------------- * The ioctl interface needs to be able to look up devices by * name or uuid. *---------------------------------------------------------------
*/ struct hash_cell { struct rb_node name_node; struct rb_node uuid_node; bool name_set; bool uuid_set;
/* *--------------------------------------------------------------- * Code for looking up a device by name *---------------------------------------------------------------
*/ staticstruct hash_cell *__get_name_cell(constchar *str)
{ struct rb_node *n = name_rb_tree.rb_node;
while (n) { struct hash_cell *hc = container_of(n, struct hash_cell, name_node); int c;
c = strcmp(hc->name, str); if (!c) {
dm_get(hc->md); return hc;
}
n = c >= 0 ? n->rb_left : n->rb_right;
}
/* * The kdev_t and uuid of a device can never change once it is * initially inserted.
*/ staticint dm_hash_insert(constchar *name, constchar *uuid, struct mapped_device *md)
{ struct hash_cell *cell, *hc;
/* * Allocate the new cells.
*/
cell = alloc_cell(name, uuid, md); if (!cell) return -ENOMEM;
/* * Insert the cell into both hash tables.
*/
down_write(&_hash_lock);
hc = __get_name_cell(name); if (hc) {
dm_put(hc->md); goto bad;
}
__link_name(cell);
if (uuid) {
hc = __get_uuid_cell(uuid); if (hc) {
__unlink_name(cell);
dm_put(hc->md); goto bad;
}
__link_uuid(cell);
}
dm_get(md);
mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(md, cell);
mutex_unlock(&dm_hash_cells_mutex);
up_write(&_hash_lock);
/* remove from the dev trees */
__unlink_name(hc);
__unlink_uuid(hc);
mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
mutex_unlock(&dm_hash_cells_mutex);
table = dm_get_live_table(hc->md, &srcu_idx); if (table)
dm_table_event(table);
dm_put_live_table(hc->md, srcu_idx);
table = NULL; if (hc->new_map)
table = hc->new_map;
dm_put(hc->md);
free_cell(hc);
if (t) {
dm_sync_table(md);
dm_table_destroy(t);
}
dm_ima_measure_on_device_remove(md, true);
dm_put(md); if (likely(keep_open_devices))
dm_destroy(md); else
dm_destroy_immediate(md);
/* * Some mapped devices may be using other mapped * devices, so repeat until we make no further * progress. If a new mapped device is created * here it will also get removed.
*/ goto retry;
}
up_write(&_hash_lock);
if (dev_skipped)
DMWARN("remove_all left %d open device(s)", dev_skipped);
}
/* * Set the uuid of a hash_cell that isn't already set.
*/ staticvoid __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
{
mutex_lock(&dm_hash_cells_mutex);
hc->uuid = new_uuid;
mutex_unlock(&dm_hash_cells_mutex);
__link_uuid(hc);
}
/* * Changes the name of a hash_cell and returns the old name for * the caller to free.
*/ staticchar *__change_cell_name(struct hash_cell *hc, char *new_name)
{ char *old_name;
/* * Rename and move the name cell.
*/
__unlink_name(hc);
old_name = hc->name;
/* * Is new free ?
*/ if (change_uuid)
hc = __get_uuid_cell(new); else
hc = __get_name_cell(new);
if (hc) {
DMERR("Unable to change %s on mapped device %s to one that already exists: %s",
change_uuid ? "uuid" : "name",
param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data); return ERR_PTR(-EBUSY);
}
/* * Is there such a device as 'old' ?
*/
hc = __get_name_cell(param->name); if (!hc) {
DMERR("Unable to rename non-existent device, %s to %s%s",
param->name, change_uuid ? "uuid " : "", new);
up_write(&_hash_lock);
kfree(new_data); return ERR_PTR(-ENXIO);
}
/* * Does this device already have a uuid?
*/ if (change_uuid && hc->uuid) {
DMERR("Unable to change uuid of mapped device %s to %s " "because uuid is already set to %s",
param->name, new, hc->uuid);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data); return ERR_PTR(-EINVAL);
}
if (change_uuid)
__set_cell_uuid(hc, new_data); else
old_name = __change_cell_name(hc, new_data);
/* * Wake up any dm event waiters.
*/
table = dm_get_live_table(hc->md, &srcu_idx); if (table)
dm_table_event(table);
dm_put_live_table(hc->md, srcu_idx);
if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
/* *--------------------------------------------------------------- * Implementation of the ioctl commands *---------------------------------------------------------------
*/ /* * All the ioctl commands get dispatched to functions with this * prototype.
*/ typedefint (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size);
/* * Loop through all the devices working out how much * space we need.
*/ for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
hc = container_of(n, struct hash_cell, name_node); if (!filter_device(hc, param->name, param->uuid)) continue;
needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
needed += align_val(sizeof(uint32_t) * 2); if (param->flags & DM_UUID_FLAG && hc->uuid)
needed += align_val(strlen(hc->uuid) + 1);
}
/* Check space - it might have changed since the first iteration */ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) {
info->flags = DM_BUFFER_FULL_FLAG; return;
}
if (info->old_vers)
info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers);
if (name) {
tt = dm_get_target_type(name); if (!tt) return -EINVAL;
}
/* * Loop through all the devices working out how much * space we need.
*/ if (!tt)
dm_target_iterate(list_version_get_needed, &needed); else
list_version_get_needed(tt, &needed);
/* * Now loop through filling out the names & versions.
*/ if (!tt)
dm_target_iterate(list_version_get_info, &iter_info); else
list_version_get_info(tt, &iter_info);
param->flags |= iter_info.flags;
staticint check_name(constchar *name)
{ if (strchr(name, '/')) {
DMERR("device name cannot contain '/'"); return -EINVAL;
}
if (strcmp(name, DM_CONTROL_NODE) == 0 ||
strcmp(name, ".") == 0 ||
strcmp(name, "..") == 0) {
DMERR("device name cannot be \"%s\", \".\", or \"..\"", DM_CONTROL_NODE); return -EINVAL;
}
return 0;
}
/* * On successful return, the caller must not attempt to acquire * _hash_lock without first calling dm_put_live_table, because dm_table_destroy * waits for this dm_put_live_table and could be called under this lock.
*/ staticstruct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
{ struct hash_cell *hc; struct dm_table *table = NULL;
/* increment rcu count, we don't care about the table pointer */
dm_get_live_table(md, srcu_idx);
down_read(&_hash_lock);
hc = dm_get_mdptr(md); if (!hc) {
DMERR("device has been removed from the dev hash table."); goto out;
}
if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
if (dm_suspended_internally_md(md))
param->flags |= DM_INTERNAL_SUSPEND_FLAG;
if (dm_test_deferred_remove_flag(md))
param->flags |= DM_DEFERRED_REMOVE;
param->dev = huge_encode_dev(disk_devt(disk));
/* * Yes, this will be out of date by the time it gets back * to userland, but it is still very useful for * debugging.
*/
param->open_count = dm_open_count(md);
staticint dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ int r, m = DM_ANY_MINOR; struct mapped_device *md;
r = check_name(param->name); if (r) return r;
if (param->flags & DM_PERSISTENT_DEV_FLAG)
m = MINOR(huge_decode_dev(param->dev));
r = dm_create(m, &md); if (r) return r;
r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); if (r) {
dm_put(md);
dm_destroy(md); return r;
}
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
__dev_status(md, param);
dm_put(md);
return 0;
}
/* * Always use UUID for lookups if it's present, otherwise use name or dev.
*/ staticstruct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
{ struct hash_cell *hc = NULL;
if (*param->uuid) { if (*param->name || param->dev) {
DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx",
param->uuid, param->name, (unsignedlonglong)param->dev); return NULL;
}
hc = __get_uuid_cell(param->uuid); if (!hc) return NULL;
} elseif (*param->name) { if (param->dev) {
DMERR("Invalid ioctl structure: name %s, dev %llx",
param->name, (unsignedlonglong)param->dev); return NULL;
}
hc = __get_name_cell(param->name); if (!hc) return NULL;
} elseif (param->dev) {
hc = __get_dev_cell(param->dev); if (!hc) return NULL;
} else return NULL;
/* * Sneakily write in both the name and the uuid * while we have the cell.
*/
strscpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid)
strscpy(param->uuid, hc->uuid, sizeof(param->uuid)); else
param->uuid[0] = '\0';
if (hc->new_map)
param->flags |= DM_INACTIVE_PRESENT_FLAG; else
param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
down_write(&_hash_lock);
hc = __find_device_hash_cell(param);
if (!hc) {
DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock); return -ENXIO;
}
md = hc->md;
/* * Ensure the device is not open and nothing further can open it.
*/
r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); if (r) { if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
up_write(&_hash_lock);
dm_put(md); return 0;
}
DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
up_write(&_hash_lock);
dm_put(md); return r;
}
t = __hash_remove(hc);
up_write(&_hash_lock);
if (t) {
dm_sync_table(md);
dm_table_destroy(t);
}
param->flags &= ~DM_DEFERRED_REMOVE;
dm_ima_measure_on_device_remove(md, false);
if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
dm_destroy(md); return 0;
}
/* * Check a string doesn't overrun the chunk of * memory we copied from userland.
*/ staticint invalid_str(char *str, void *end)
{ while ((void *) str < end) if (!*str++) return 0;
hc = __find_device_hash_cell(param); if (!hc) {
DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock); return -ENXIO;
}
if (dm_table_get_mode(new_map) & BLK_OPEN_WRITE)
set_disk_ro(dm_disk(md), 0); else
set_disk_ro(dm_disk(md), 1);
}
if (dm_suspended_md(md)) {
r = dm_resume(md); if (!r) {
dm_ima_measure_on_device_resume(md, new_map ? true : false);
if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent))
param->flags |= DM_UEVENT_GENERATED_FLAG;
}
}
/* * Since dm_swap_table synchronizes RCU, nobody should be in * read-side critical section already.
*/ if (old_map)
dm_table_destroy(old_map);
if (!r)
__dev_status(md, param);
dm_put(md); return r;
}
/* * Set or unset the suspension state of a device. * If the device already is in the requested state we just return its status.
*/ staticint dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ if (param->flags & DM_SUSPEND_FLAG) return do_suspend(param);
return do_resume(param);
}
/* * Copies device info back to user space, used by * the create and info ioctls.
*/ staticint dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ struct mapped_device *md;
md = find_device(param); if (!md) return -ENXIO;
__dev_status(md, param);
dm_put(md);
return 0;
}
/* * Build up the status struct for each target
*/ staticvoid retrieve_status(struct dm_table *table, struct dm_ioctl *param, size_t param_size)
{ unsignedint i, num_targets; struct dm_target_spec *spec; char *outbuf, *outptr;
status_type_t type;
size_t remaining, len, used = 0; unsignedint status_flags = 0;
if (param->flags & DM_STATUS_TABLE_FLAG)
type = STATUSTYPE_TABLE; elseif (param->flags & DM_IMA_MEASUREMENT_FLAG)
type = STATUSTYPE_IMA; else
type = STATUSTYPE_INFO;
/* Get all the target info */
num_targets = table->num_targets; for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i);
size_t l;
remaining = len - (outptr - outbuf); if (remaining <= sizeof(struct dm_target_spec)) {
param->flags |= DM_BUFFER_FULL_FLAG; break;
}
/* Get the status/table string from the target driver */ if (ti->type->status) { if (param->flags & DM_NOFLUSH_FLAG)
status_flags |= DM_STATUS_NOFLUSH_FLAG;
ti->type->status(ti, type, status_flags, outptr, remaining);
} else
outptr[0] = '\0';
l = strlen(outptr) + 1; if (l == remaining) {
param->flags |= DM_BUFFER_FULL_FLAG; break;
}
outptr += l;
used = param->data_start + (outptr - outbuf);
/* * Wait for a device to report an event
*/ staticint dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ int r = 0; struct mapped_device *md; struct dm_table *table; int srcu_idx;
md = find_device(param); if (!md) return -ENXIO;
/* * Wait for a notification event
*/ if (dm_wait_event(md, param->event_nr)) {
r = -ERESTARTSYS; goto out;
}
/* * The userland program is going to want to know what * changed to trigger the event, so we may as well tell * him and save an ioctl.
*/
__dev_status(md, param);
/* * Remember the global event number and make it possible to poll * for further events.
*/ staticint dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ struct dm_file *priv = filp->private_data;
if (param->flags & DM_READONLY_FLAG)
mode = BLK_OPEN_READ;
return mode;
}
staticint next_target(struct dm_target_spec *last, uint32_t next, constchar *end, struct dm_target_spec **spec, char **target_params)
{
static_assert(__alignof__(struct dm_target_spec) <= 8, "struct dm_target_spec must not require more than 8-byte alignment");
/* * Number of bytes remaining, starting with last. This is always * sizeof(struct dm_target_spec) or more, as otherwise *last was * out of bounds already.
*/
size_t remaining = end - (char *)last;
/* * There must be room for both the next target spec and the * NUL-terminator of the target itself.
*/ if (remaining - sizeof(struct dm_target_spec) <= next) {
DMERR("Target spec extends beyond end of parameters"); return -EINVAL;
}
if (next % __alignof__(struct dm_target_spec)) {
DMERR("Next dm_target_spec (offset %u) is not %zu-byte aligned",
next, __alignof__(struct dm_target_spec)); return -EINVAL;
}
r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) goto err;
/* Protect md->type and md->queue against concurrent table loads. */
dm_lock_md_type(md);
r = populate_table(t, param, param_size); if (r) goto err_unlock_md_type;
dm_ima_measure_on_table_load(t, STATUSTYPE_IMA);
immutable_target_type = dm_get_immutable_target_type(md); if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t)) &&
!dm_table_get_wildcard_target(t)) {
DMERR("can't replace immutable target type %s",
immutable_target_type->name);
r = -EINVAL; goto err_unlock_md_type;
}
if (dm_get_md_type(md) == DM_TYPE_NONE) { /* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t); if (r) {
DMERR("unable to set up device queue for new table."); goto err_unlock_md_type;
}
} elseif (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
dm_get_md_type(md), dm_table_get_type(t));
r = -EINVAL; goto err_unlock_md_type;
}
dm_unlock_md_type(md);
/* stage inactive table */
down_write(&_hash_lock);
hc = dm_get_mdptr(md); if (!hc) {
DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock);
r = -ENXIO; goto err_destroy_table;
}
if (hc->new_map)
old_map = hc->new_map;
hc->new_map = t;
up_write(&_hash_lock);
hc = __find_device_hash_cell(param); if (!hc) {
DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
up_write(&_hash_lock); return -ENXIO;
}
/* * Return the status of a device as a text string for each * target.
*/ staticint table_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ struct mapped_device *md; struct dm_table *table; int srcu_idx;
/* * Process device-mapper dependent messages. Messages prefixed with '@' * are processed by the DM core. All others are delivered to the target. * Returns a number <= 1 if message was processed by device mapper. * Returns 2 if message should be delivered to the target.
*/ staticint message_for_md(struct mapped_device *md, unsignedint argc, char **argv, char *result, unsignedint maxlen)
{ int r;
if (**argv != '@') return 2; /* no '@' prefix, deliver to target */
if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { if (argc != 1) {
DMERR("Invalid arguments for @cancel_deferred_remove"); return -EINVAL;
} return dm_cancel_deferred_remove(md);
}
r = dm_stats_message(md, argc, argv, result, maxlen); if (r < 2) return r;
DMERR("Unsupported message sent to DM core: %s", argv[0]); return -EINVAL;
}
/* * Pass a message to the target that's at the supplied device offset.
*/ staticint target_message(struct file *filp, struct dm_ioctl *param, size_t param_size)
{ int r, argc; char **argv; struct mapped_device *md; struct dm_table *table; struct dm_target *ti; struct dm_target_msg *tmsg = (void *) param + param->data_start;
size_t maxlen; char *result = get_result_buffer(param, param_size, &maxlen); int srcu_idx;
r = dm_split_args(&argc, &argv, tmsg->message); if (r) {
DMERR("Failed to split target message parameters"); goto out;
}
if (!argc) {
DMERR("Empty message received.");
r = -EINVAL; goto out_argv;
}
r = message_for_md(md, argc, argv, result, maxlen); if (r <= 1) goto out_argv;
table = dm_get_live_table(md, &srcu_idx); if (!table) goto out_table;
if (dm_deleting_md(md)) {
r = -ENXIO; goto out_table;
}
ti = dm_table_find_target(table, tmsg->sector); if (!ti) {
DMERR("Target message sector outside device.");
r = -EINVAL;
} elseif (ti->type->message)
r = ti->type->message(ti, argc, argv, result, maxlen); else {
DMERR("Target type does not support messages");
r = -EINVAL;
}
if (r == 1) {
param->flags |= DM_DATA_OUT_FLAG; if (dm_message_test_buffer_overflow(result, maxlen))
param->flags |= DM_BUFFER_FULL_FLAG; else
param->data_size = param->data_start + strlen(result) + 1;
r = 0;
}
dm_put(md); return r;
}
/* * The ioctl parameter block consists of two parts, a dm_ioctl struct * followed by a data buffer. This flag is set if the second part, * which has a variable size, is not used by the function processing * the ioctl.
*/ #define IOCTL_FLAGS_NO_PARAMS 1 #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/* *--------------------------------------------------------------- * Implementation of open/close/ioctl on the special char device. *---------------------------------------------------------------
*/ static ioctl_fn lookup_ioctl(unsignedint cmd, int *ioctl_flags)
{ staticconststruct { int cmd; int flags;
ioctl_fn fn;
} _ioctls[] = {
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
{DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
{DM_LIST_DEVICES_CMD, 0, list_devices},
/* * As well as checking the version compatibility this always * copies the kernel interface version out.
*/ staticint check_version(unsignedint cmd, struct dm_ioctl __user *user, struct dm_ioctl *kernel_params)
{ int r = 0;
/* Make certain version is first member of dm_ioctl struct */
BUILD_BUG_ON(offsetof(struct dm_ioctl, version) != 0);
if (copy_from_user(kernel_params->version, user->version, sizeof(kernel_params->version))) return -EFAULT;
/* * Use __GFP_HIGH to avoid low memory issues when a device is * suspended and the ioctl is needed to resume it. * Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
dmi = kvmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH);
if (!dmi) { if (secure_data && clear_user(user, param_kernel->data_size)) return -EFAULT; return -ENOMEM;
}
*param_flags |= DM_PARAMS_MALLOC;
/* Copy from param_kernel (which was already copied from user) */
memcpy(dmi, param_kernel, minimum_data_size);
if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
param_kernel->data_size - minimum_data_size)) goto bad;
data_copied: /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, param_kernel->data_size)) goto bad;
if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) {
DMERR("name not supplied when creating device"); return -EINVAL;
}
} elseif (*param->uuid && *param->name) {
DMERR("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL;
}
staticint ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user)
{ int r = 0; int ioctl_flags; int param_flags; unsignedint cmd; struct dm_ioctl *param;
ioctl_fn fn = NULL;
size_t input_param_size; struct dm_ioctl param_kernel;
/* only root can play with this */ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (_IOC_TYPE(command) != DM_IOCTL) return -ENOTTY;
cmd = _IOC_NR(command);
/* * Check the interface version passed in. This also * writes out the kernel's interface version.
*/
r = check_version(cmd, user, ¶m_kernel); if (r) return r;
/* * Nothing more to do for the version command.
*/ if (cmd == DM_VERSION_CMD) return 0;
/* * Copy the parameters into kernel space.
*/
r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags);
if (r) return r;
input_param_size = param->data_size;
r = validate_params(cmd, param); if (r) goto out;
param->data_size = offsetof(struct dm_ioctl, data);
r = fn(file, param, input_param_size);
if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
dm_issue_global_event();
/* * Copy the results back to userland.
*/ if (!r && copy_to_user(user, param, param->data_size))
r = -EFAULT;
/** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device * @name: Buffer (size DM_NAME_LEN) for name * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined
*/ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
{ int r = 0; struct hash_cell *hc;
if (!md) return -ENXIO;
mutex_lock(&dm_hash_cells_mutex);
hc = dm_get_mdptr(md); if (!hc) {
r = -ENXIO; goto out;
}
if (name)
strcpy(name, hc->name); if (uuid)
strcpy(uuid, hc->uuid ? : "");
/** * dm_early_create - create a mapped device in early boot. * * @dmi: Contains main information of the device mapping to be created. * @spec_array: array of pointers to struct dm_target_spec. Describes the * mapping table of the device. * @target_params_array: array of strings with the parameters to a specific * target. * * Instead of having the struct dm_target_spec and the parameters for every * target embedded at the end of struct dm_ioctl (as performed in a normal * ioctl), pass them as arguments, so the caller doesn't need to serialize them. * The size of the spec_array and target_params_array is given by * @dmi->target_count. * This function is supposed to be called in early boot, so locking mechanisms * to protect against concurrent loads are not required.
*/ int __init dm_early_create(struct dm_ioctl *dmi, struct dm_target_spec **spec_array, char **target_params_array)
{ int r, m = DM_ANY_MINOR; struct dm_table *t, *old_map; struct mapped_device *md; unsignedint i;
if (!dmi->target_count) return -EINVAL;
r = check_name(dmi->name); if (r) return r;
if (dmi->flags & DM_PERSISTENT_DEV_FLAG)
m = MINOR(huge_decode_dev(dmi->dev));
/* alloc dm device */
r = dm_create(m, &md); if (r) return r;
/* hash insert */
r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); if (r) goto err_destroy_dm;
/* alloc table */
r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); if (r) goto err_hash_remove;
/* add targets */ for (i = 0; i < dmi->target_count; i++) {
r = dm_table_add_target(t, spec_array[i]->target_type,
(sector_t) spec_array[i]->sector_start,
(sector_t) spec_array[i]->length,
target_params_array[i]); if (r) {
DMERR("error adding target to table"); goto err_destroy_table;
}
}
/* finish table */
r = dm_table_complete(t); if (r) goto err_destroy_table;
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t); if (r) {
DMERR("unable to set up device queue for new table."); goto err_destroy_table;
}
/* Set new map */
dm_suspend(md, 0);
old_map = dm_swap_table(md, t); if (IS_ERR(old_map)) {
r = PTR_ERR(old_map); goto err_destroy_table;
}
set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
/* resume device */
r = dm_resume(md); if (r) goto err_destroy_table;
DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
dm_put(md); return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.