// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_configfs.c * * This file contains ConfigFS logic for the Generic Target Engine project. * * (c) Copyright 2008-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * * based on configfs Copyright (C) 2005 Oracle. All rights reserved. *
****************************************************************************/
tf = target_core_get_fabric(name); if (!tf) {
pr_debug("target_core_register_fabric() trying autoload for %s\n",
name);
/* * Below are some hardcoded request_module() calls to automatically * local fabric modules when the following is called: * * mkdir -p /sys/kernel/config/target/$MODULE_NAME * * Note that this does not limit which TCM fabric module can be * registered, but simply provids auto loading logic for modules with * mkdir(2) system calls with known TCM fabric modules.
*/
if (!strncmp(name, "iscsi", 5)) { /* * Automatically load the LIO Target fabric module when the * following is called: * * mkdir -p $CONFIGFS/target/iscsi
*/
ret = request_module("iscsi_target_mod"); if (ret < 0) {
pr_debug("request_module() failed for" " iscsi_target_mod.ko: %d\n", ret); return ERR_PTR(-EINVAL);
}
} elseif (!strncmp(name, "loopback", 8)) { /* * Automatically load the tcm_loop fabric module when the * following is called: * * mkdir -p $CONFIGFS/target/loopback
*/
ret = request_module("tcm_loop"); if (ret < 0) {
pr_debug("request_module() failed for" " tcm_loop.ko: %d\n", ret); return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
}
if (!tf) {
pr_debug("target_core_get_fabric() failed for %s\n",
name); return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" " %s\n", tf->tf_ops->fabric_name); /* * On a successful target_core_get_fabric() look, the returned * struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&tf->tf_wwn_cit);
configname = config_item_name(&dev->dev_group.cg_item); if (strlen(configname) >= INQUIRY_MODEL_LEN) {
pr_warn("dev[%p]: Backstore name '%s' is too long for " "INQUIRY_MODEL, truncating to 15 characters\n", dev,
configname);
} /* * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1) * here without potentially breaking existing setups, so continue to * truncate one byte shorter than what can be carried in INQUIRY.
*/
strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
}
ret = kstrtou32(page, 0, &val); if (ret < 0) return ret;
if (val != TARGET_UA_INTLCK_CTRL_CLEAR
&& val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
&& val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
pr_err("Illegal value %d\n", val); return -EINVAL;
}
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device" " UA_INTRLCK_CTRL while export_count is %d\n",
da->da_dev, da->da_dev->export_count); return -EINVAL;
}
da->emulate_ua_intlck_ctrl = val;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
da->da_dev, val); return count;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while" " export_count is %d\n",
da->da_dev, da->da_dev->export_count); return -EINVAL;
}
da->emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
da->da_dev, flag ? "Enabled" : "Disabled");
return count;
}
staticint target_try_configure_unmap(struct se_device *dev, constchar *config_opt)
{ if (!dev->transport->configure_unmap) {
pr_err("Generic Block Discard not supported\n"); return -ENOSYS;
}
if (!target_dev_configured(dev)) {
pr_err("Generic Block Discard setup for %s requires device to be configured\n",
config_opt); return -ENODEV;
}
if (!dev->transport->configure_unmap(dev)) {
pr_err("Generic Block Discard setup for %s failed\n",
config_opt); return -ENOSYS;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret;
/* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice().
*/ if (flag && !da->max_unmap_block_desc_count) {
ret = target_try_configure_unmap(dev, "emulate_tpu"); if (ret) return ret;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret;
/* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice().
*/ if (flag && !da->max_unmap_block_desc_count) {
ret = target_try_configure_unmap(dev, "emulate_tpws"); if (ret) return ret;
}
ret = kstrtou32(page, 0, &flag); if (ret < 0) return ret;
if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
pr_err("Illegal value %d for pi_prot_type\n", flag); return -EINVAL;
} if (flag == 2) {
pr_err("DIF TYPE2 protection currently not supported\n"); return -ENOSYS;
} if (da->hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware," " ignoring\n"); return count;
} if (!dev->transport->init_prot || !dev->transport->free_prot) { /* 0 is only allowed value for non-supporting backends */ if (flag == 0) return count;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name); return -ENOSYS;
} if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n"); return -ENODEV;
} if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device PROT type while" " export_count is %d\n", dev, dev->export_count); return -EINVAL;
}
da->pi_prot_type = flag;
if (flag && !old_prot) {
ret = dev->transport->init_prot(dev); if (ret) {
da->pi_prot_type = old_prot;
da->pi_prot_verify = (bool) da->pi_prot_type; return ret;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret;
if (!flag) return count;
if (!dev->transport->format_prot) {
pr_err("DIF protection format not supported by backend %s\n",
dev->transport->name); return -ENOSYS;
} if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n"); return -ENODEV;
} if (dev->export_count) {
pr_err("dev[%p]: Unable to format SE Device PROT type while" " export_count is %d\n", dev, dev->export_count); return -EINVAL;
}
ret = dev->transport->format_prot(dev); if (ret) return ret;
pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); return count;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret; if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to set force_pr_aptpl while" " export_count is %d\n",
da->da_dev, da->da_dev->export_count); return -EINVAL;
}
ret = kstrtobool(page, &flag); if (ret < 0) return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device" " unmap_zeroes_data while export_count is %d\n",
da->da_dev, da->da_dev->export_count); return -EINVAL;
} /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_configure_device().
*/ if (flag && !da->max_unmap_block_desc_count) {
ret = target_try_configure_unmap(dev, "unmap_zeroes_data"); if (ret) return ret;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag); return count;
}
/* * Note, this can only be called on unexported SE Device Object.
*/ static ssize_t queue_depth_store(struct config_item *item, constchar *page, size_t count)
{ struct se_dev_attrib *da = to_attrib(item); struct se_device *dev = da->da_dev;
u32 val; int ret;
ret = kstrtou32(page, 0, &val); if (ret < 0) return ret;
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while" " export_count is %d\n",
dev, dev->export_count); return -EINVAL;
} if (!val) {
pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); return -EINVAL;
}
ret = kstrtou32(page, 0, &val); if (ret < 0) return ret;
if (da->da_dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size" " while export_count is %d\n",
da->da_dev, da->da_dev->export_count); return -EINVAL;
}
if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
pr_err("dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n",
da->da_dev, val); return -EINVAL;
}
da->block_size = val;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
da->da_dev, val); return count;
}
/* * dev_attrib attributes for devices using the target core SBC/SPC * interpreter. Any backend using spc_parse_cdb should be using * these.
*/ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_emulate_model_alias,
&attr_emulate_dpo,
&attr_emulate_fua_write,
&attr_emulate_fua_read,
&attr_emulate_write_cache,
&attr_emulate_ua_intlck_ctrl,
&attr_emulate_tas,
&attr_emulate_tpu,
&attr_emulate_tpws,
&attr_emulate_caw,
&attr_emulate_3pc,
&attr_emulate_pr,
&attr_pi_prot_type,
&attr_hw_pi_prot_type,
&attr_pi_prot_format,
&attr_pi_prot_verify,
&attr_enforce_pr_isids,
&attr_is_nonrot,
&attr_emulate_rest_reord,
&attr_force_pr_aptpl,
&attr_hw_block_size,
&attr_block_size,
&attr_hw_max_sectors,
&attr_optimal_sectors,
&attr_hw_queue_depth,
&attr_queue_depth,
&attr_max_unmap_lba_count,
&attr_max_unmap_block_desc_count,
&attr_unmap_granularity,
&attr_unmap_granularity_alignment,
&attr_unmap_zeroes_data,
&attr_max_write_same_len,
&attr_alua_support,
&attr_pgr_support,
&attr_emulate_rsoc,
&attr_submit_type,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
/* * Minimal dev_attrib attributes for devices passing through CDBs. * In this case we only provide a few read-only attributes for * backwards compatibility.
*/ struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_pi_prot_type,
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
&attr_emulate_pr,
&attr_alua_support,
&attr_pgr_support,
&attr_submit_type,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
/* * pr related dev_attrib attributes for devices passing through CDBs, * but allowing in core pr emulation.
*/ struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
&attr_enforce_pr_isids,
&attr_force_pr_aptpl,
NULL,
};
EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
static ssize_t target_check_inquiry_data(char *buf)
{
size_t len; int i;
len = strlen(buf);
/* * SPC 4.3.1: * ASCII data fields shall contain only ASCII printable characters * (i.e., code values 20h to 7Eh) and may be terminated with one or * more ASCII null (00h) characters.
*/ for (i = 0; i < len; i++) { if (buf[i] < 0x20 || buf[i] > 0x7E) {
pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n"); return -EINVAL;
}
}
len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
} if (len < 0 || len > INQUIRY_VENDOR_LEN) {
pr_err("Emulated T10 Vendor Identification exceeds" " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) "\n"); return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0) return ret;
/* * Check to see if any active exports exist. If they do exist, fail * here as changing this information on the fly (underneath the * initiator side OS dependent multipath code) could cause negative * effects.
*/ if (dev->export_count) {
pr_err("Unable to set T10 Vendor Identification while" " active %d exports exist\n", dev->export_count); return -EINVAL;
}
len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
} if (len < 0 || len > INQUIRY_MODEL_LEN) {
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
__stringify(INQUIRY_MODEL_LEN) "\n"); return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0) return ret;
/* * Check to see if any active exports exist. If they do exist, fail * here as changing this information on the fly (underneath the * initiator side OS dependent multipath code) could cause negative * effects.
*/ if (dev->export_count) {
pr_err("Unable to set T10 Model while active %d exports exist\n",
dev->export_count); return -EINVAL;
}
len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
} if (len < 0 || len > INQUIRY_REVISION_LEN) {
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
__stringify(INQUIRY_REVISION_LEN) "\n"); return -EOVERFLOW;
}
ret = target_check_inquiry_data(stripped);
if (ret < 0) return ret;
/* * Check to see if any active exports exist. If they do exist, fail * here as changing this information on the fly (underneath the * initiator side OS dependent multipath code) could cause negative * effects.
*/ if (dev->export_count) {
pr_err("Unable to set T10 Revision while active %d exports exist\n",
dev->export_count); return -EINVAL;
}
/* * The IEEE COMPANY_ID field should contain a 24-bit canonical * form OUI assigned by the IEEE.
*/
ret = kstrtou32(page, 0, &val); if (ret < 0) return ret;
if (val >= 0x1000000) return -EOVERFLOW;
/* * Check to see if any active exports exist. If they do exist, fail * here as changing this information on the fly (underneath the * initiator side OS dependent multipath code) could cause negative * effects.
*/ if (dev->export_count) {
pr_err("Unable to set Company ID while %u exports exist\n",
dev->export_count); return -EINVAL;
}
t10_wwn->company_id = val;
pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
t10_wwn->company_id);
return count;
}
/* * VPD page 0x80 Unit serial
*/ static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, char *page)
{ return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&to_t10_wwn(item)->unit_serial[0]);
}
/* * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial * from the struct scsi_device level firmware, do not allow * VPD Unit Serial to be emulated. * * Note this struct scsi_device could also be emulating VPD * information from its drivers/scsi LLD. But for now we assume * it is doing 'the right thing' wrt a world wide unique * VPD Unit Serial Number that OS dependent multipath can depend on.
*/ if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD" " Unit Serial, ignoring request\n"); return -EOPNOTSUPP;
}
if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
pr_err("Emulated VPD Unit Serial exceeds" " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); return -EOVERFLOW;
} /* * Check to see if any active $FABRIC_MOD exports exist. If they * do exist, fail here as changing this information on the fly * (underneath the initiator side OS dependent multipath code) * could cause negative effects.
*/ if (dev->export_count) {
pr_err("Unable to set VPD Unit Serial while" " active %d $FABRIC_MOD exports exist\n",
dev->export_count); return -EINVAL;
}
/* * This currently assumes ASCII encoding for emulated VPD Unit Serial. * * Also, strip any newline added from the userspace * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, "%s", strstrip(buf));
dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" " %s\n", dev->t10_wwn.unit_serial);
lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n"); return -EINVAL;
}
memcpy(buf, page, count); /* * Any ALUA logical unit alias besides "NULL" means we will be * making a new group association.
*/ if (strcmp(strstrip(buf), "NULL")) { /* * core_alua_get_lu_gp_by_name() will increment reference to * struct t10_alua_lu_gp. This reference is released with * core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); if (!lu_gp_new) return -ENODEV;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp; if (lu_gp) { /* * Clearing an existing lu_gp association, and replacing * with NULL
*/ if (!lu_gp_new) {
pr_debug("Target_Core_ConfigFS: Releasing %s/%s" " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.