// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_iblock.c * * This file contains the Storage Engine <-> Linux BlockIO transport * specific functions. * * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> *
******************************************************************************/
/* * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count.
*/
max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd); if (max_write_zeroes_sectors)
dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; else
dev->dev_attrib.max_write_same_len = 0xFFFF;
if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd); if (!bi) return 0;
switch (bi->csum_type) { case BLK_INTEGRITY_CSUM_IP:
pr_err("IBLOCK export of blk_integrity: %s not supported\n",
blk_integrity_profile_name(bi));
ret = -ENOSYS; goto out_blkdev_put; case BLK_INTEGRITY_CSUM_CRC: if (bi->flags & BLK_INTEGRITY_REF_TAG)
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; else
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; break; default: break;
}
/* * Each se_device has a per cpu work this can be run from. We * shouldn't have multiple threads on the same cpu calling this * at the same time.
*/
ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()]; if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags)) return NULL;
if (!refcount_dec_and_test(&ibr->pending)) return;
if (blk_status == BLK_STS_RESV_CONFLICT)
status = SAM_STAT_RESERVATION_CONFLICT; elseif (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION; else
status = SAM_STAT_GOOD;
if (bio->bi_status) {
pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); /* * Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic();
}
bio_put(bio);
iblock_complete_cmd(cmd, blk_status);
}
staticstruct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
blk_opf_t opf)
{ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); struct bio *bio;
/* * Only allocate as many vector entries as the bio code allows us to, * we'll loop later on until we have handled the whole request.
*/
bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
GFP_NOIO, &ib_dev->ibd_bio_set); if (!bio) {
pr_err("Unable to allocate memory for bio\n"); return NULL;
}
staticvoid iblock_submit_bios(struct bio_list *list)
{ struct blk_plug plug; struct bio *bio; /* * The block layer handles nested plugs, so just plug/unplug to handle * fabric drivers that didn't support batching and multi bio cmds.
*/
blk_start_plug(&plug); while ((bio = bio_list_pop(list)))
submit_bio(bio);
blk_finish_plug(&plug);
}
staticvoid iblock_end_io_flush(struct bio *bio)
{ struct se_cmd *cmd = bio->bi_private;
if (bio->bi_status)
pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
if (cmd) { if (bio->bi_status)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); else
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
bio_put(bio);
}
/* * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache.
*/ static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
{ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); int immed = (cmd->t_task_cdb[1] & 0x2); struct bio *bio;
/* * If the Immediate bit is set, queue up the GOOD response * for this SYNCHRONIZE_CACHE op.
*/ if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush; if (!immed)
bio->bi_private = cmd;
submit_bio(bio); return 0;
}
buf = kmap(sg_page(sg)) + sg->offset; if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* * Fall back to block_execute_write_same() slow-path if * incoming WRITE_SAME payload does not contain zeros.
*/
not_zero = memchr_inv(buf, 0x00, cmd->data_length);
kunmap(sg_page(sg));
if (not_zero) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = blkdev_issue_zeroout(bdev,
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); if (ret) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK" " backends not supported\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
if (!cmd->t_data_nents) return TCM_INVALID_CDB_FIELD;
if (data_direction == DMA_TO_DEVICE) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
/* * Set bits to indicate WRITE_ODIRECT so we are not throttled * by WBT.
*/
opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; /* * Force writethrough using REQ_FUA if a volatile write cache * is not enabled, or if initiator set the Force Unit Access bit.
*/
miter_dir = SG_MITER_TO_SG; if (bdev_fua(ib_dev->ibd_bd)) { if (cmd->se_cmd_flags & SCF_FUA)
opf |= REQ_FUA; elseif (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
} else {
opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG;
}
if (!sgl_nents) {
refcount_set(&ibr->pending, 1);
iblock_complete_cmd(cmd, BLK_STS_OK); return 0;
}
bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); if (!bio) goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
refcount_set(&ibr->pending, 2);
bio_cnt = 1;
if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
miter_dir);
for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and * endless loop. Better hope no driver uses huge pages.
*/ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) { if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
rc = iblock_alloc_bip(cmd, bio, &prot_miter); if (rc) goto fail_put_bios;
}
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list);
bio_cnt = 0;
}
bio = iblock_get_bio(cmd, block_lba, sg_num, opf); if (!bio) goto fail_put_bios;
if (!ops) {
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
switch (sa) { case PRO_REGISTER: case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: if (!ops->pr_register) {
pr_err("block device does not support pr_register.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/* The block layer pr ops always enables aptpl */ if (!aptpl)
pr_info("APTPL not set by initiator, but will be used.\n");
ret = ops->pr_register(bdev, key, sa_key,
sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY); break; case PRO_RESERVE: if (!ops->pr_reserve) {
pr_err("block_device does not support pr_reserve.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0); break; case PRO_CLEAR: if (!ops->pr_clear) {
pr_err("block_device does not support pr_clear.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
ret = ops->pr_clear(bdev, key); break; case PRO_PREEMPT: case PRO_PREEMPT_AND_ABORT: if (!ops->pr_clear) {
pr_err("block_device does not support pr_preempt.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
ret = ops->pr_preempt(bdev, key, sa_key,
scsi_pr_type_to_block(type),
sa == PRO_PREEMPT_AND_ABORT); break; case PRO_RELEASE: if (!ops->pr_clear) {
pr_err("block_device does not support pr_pclear.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
staticvoid iblock_pr_report_caps(unsignedchar *param_data)
{
u16 len = 8;
put_unaligned_be16(len, ¶m_data[0]); /* * When using the pr_ops passthrough method we only support exporting * the device through one target port because from the backend module * level we can't see the target port config. As a result we only * support registration directly from the I_T nexus the cmd is sent * through and do not set ATP_C here. * * The block layer pr_ops do not support passing in initiators so * we don't set SIP_C here.
*/ /* PTPL_C: Persistence across Target Power Loss bit */
param_data[2] |= 0x01; /* * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so * set the TMV: Task Mask Valid bit.
*/
param_data[3] |= 0x80; /* * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
*/
param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */ /* * PTPL_A: Persistence across Target Power Loss Active bit. The block * layer pr ops always enables this so report it active.
*/
param_data[3] |= 0x01; /* * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
*/
param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
}
if (!ops) {
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (!ops->pr_read_keys) {
pr_err("Block device does not support read_keys.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/* * We don't know what's under us, but dm-multipath will register every * path with the same key, so start off with enough space for 16 paths. * which is not a lot of memory and should normally be enough.
*/
paths = 16;
retry:
len = 8 * paths;
keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL); if (!keys) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
keys->num_keys = paths; if (!ops->pr_read_keys(bdev, keys)) { if (keys->num_keys > paths) {
kfree(keys);
paths *= 2; goto retry;
}
} else {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto free_keys;
}
ret = TCM_NO_SENSE;
put_unaligned_be32(keys->generation, ¶m_data[0]); if (!keys->num_keys) {
put_unaligned_be32(0, ¶m_data[4]); goto free_keys;
}
if (!ops) {
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (!ops->pr_read_reservation) {
pr_err("Block device does not support read_keys.\n"); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
if (ops->pr_read_reservation(bdev, &rsv)) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
put_unaligned_be32(rsv.generation, ¶m_data[0]); if (!block_pr_type_to_scsi(rsv.type)) {
put_unaligned_be32(0, ¶m_data[4]); return TCM_NO_SENSE;
}
put_unaligned_be32(16, ¶m_data[4]);
if (cmd->data_length < 16) return TCM_NO_SENSE;
put_unaligned_be64(rsv.key, ¶m_data[8]);
if (cmd->data_length < 22) return TCM_NO_SENSE;
param_data[21] = block_pr_type_to_scsi(rsv.type);
return TCM_NO_SENSE;
}
static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa, unsignedchar *param_data)
{
sense_reason_t ret = TCM_NO_SENSE;
switch (sa) { case PRI_REPORT_CAPABILITIES:
iblock_pr_report_caps(param_data); break; case PRI_READ_KEYS:
ret = iblock_pr_read_keys(cmd, param_data); break; case PRI_READ_RESERVATION:
ret = iblock_pr_read_reservation(cmd, param_data); break; default:
pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa); return TCM_UNSUPPORTED_SCSI_OPCODE;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.