/* * Platform neutral description of a scsi request - * this remains the same across the write regardless of 32/64 bit * note: it's patterned off the SCSI_PASS_THROUGH structure
*/ #define STORVSC_MAX_CMD_LEN 0x10
/* Sense buffer size is the same for all versions since Windows 8 */ #define STORVSC_SENSE_BUFFER_SIZE 0x14 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
/* * The storage protocol version is determined during the * initial exchange with the host. It will indicate which * storage functionality is available in the host.
*/ staticint vmstor_proto_version;
/* This structure is sent during the storage protocol negotiations. */ struct vmstorage_protocol_version { /* Major (MSW) and minor (LSW) version numbers. */
u16 major_minor;
/* * Revision number is auto-incremented whenever this file is changed * (See FILL_VMSTOR_REVISION macro above). Mismatch does not * definitely indicate incompatibility--but it does indicate mismatched * builds. * This is only used on the windows side. Just set it to 0.
*/
u16 revision;
} __packed;
/* * SRB status codes and masks. In the 8-bit field, the two high order bits * are flags, while the remaining 6 bits are an integer status code. The * definitions here include only the subset of the integer status codes that * are tested for in this driver.
*/ #define SRB_STATUS_AUTOSENSE_VALID 0x80 #define SRB_STATUS_QUEUE_FROZEN 0x40
module_param(storvsc_max_hw_queues, uint, 0644);
MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
staticint ring_avail_percent_lowater = 10;
module_param(ring_avail_percent_lowater, int, S_IRUGO);
MODULE_PARM_DESC(ring_avail_percent_lowater, "Select a channel if available ring size > this in percent");
/* * Timeout in seconds for all devices managed by this driver.
*/ staticconstint storvsc_timeout = 180;
/* * Each unique Port/Path/Target represents 1 channel ie scsi * controller. In reality, the pathid, targetid is always 0 * and the port is set by us
*/ unsignedint port_number; unsignedchar path_id; unsignedchar target_id;
/* * Max I/O, the device can support.
*/
u32 max_transfer_bytes; /* * Number of sub-channels we will open.
*/
u16 num_sc; struct vmbus_channel **stor_chns; /* * Mask of CPUs bound to subchannels.
*/ struct cpumask alloced_cpus; /* * Serializes modifications of stor_chns[] from storvsc_do_io() * and storvsc_change_target_cpu().
*/
spinlock_t lock; /* Used for vsc/vsp channel reset process */ struct storvsc_cmd_request init_request; struct storvsc_cmd_request reset_request; /* * Currently active port and node names for FC devices.
*/
u64 node_name;
u64 port_name; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) struct fc_rport *rport; #endif
};
host = host_device->host; /* * Before scanning the host, first check to see if any of the * currently known devices have been hot removed. We issue a * "unit ready" command against all currently known devices. * This I/O will result in an error for devices that have been * removed. As part of handling the I/O error, we remove the device. * * When a LUN is added or removed, the host sends us a signal to * scan the host. Thus we are forced to discover the LUNs that * may have been removed this way.
*/
mutex_lock(&host->scan_mutex);
shost_for_each_device(sdev, host)
scsi_test_unit_ready(sdev, 1, 1, NULL);
mutex_unlock(&host->scan_mutex); /* * Now scan the host to discover LUNs that may have been added.
*/
scsi_scan_host(host);
}
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
scsi_host_put(wrk->host);
done:
kfree(wrk);
}
/* * We can get incoming messages from the host that are not in response to * messages that we have sent out. An example of this would be messages * received by the guest to notify dynamic addition/removal of LUNs. To * deal with potential race conditions where the driver may be in the * midst of being unloaded when we might receive an unsolicited message * from the host, we have implemented a mechanism to gurantee sequential * consistency: * * 1) Once the device is marked as being destroyed, we will fail all * outgoing messages. * 2) We permit incoming messages when the device is being destroyed, * only to properly account for messages already sent out.
*/
/* See storvsc_do_io() -> get_og_chn(). */
spin_lock_irqsave(&stor_device->lock, flags);
/* * Determines if the storvsc device has other channels assigned to * the "old" CPU to update the alloced_cpus mask and the stor_chns * array.
*/ if (device->channel != channel && device->channel->target_cpu == old) {
cur_chn = device->channel;
old_is_alloced = true; goto old_is_alloced;
}
list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) { if (cur_chn == channel) continue; if (cur_chn->target_cpu == old) {
old_is_alloced = true; goto old_is_alloced;
}
}
old_is_alloced: if (old_is_alloced)
WRITE_ONCE(stor_device->stor_chns[old], cur_chn); else
cpumask_clear_cpu(old, &stor_device->alloced_cpus);
/* "Flush" the stor_chns array. */
for_each_possible_cpu(cpu) { if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
cpu, &stor_device->alloced_cpus))
WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
}
if (rqst_addr == VMBUS_RQST_INIT) return VMBUS_RQST_INIT; if (rqst_addr == VMBUS_RQST_RESET) return VMBUS_RQST_RESET;
/* * Cannot return an ID of 0, which is reserved for an unsolicited * message from Hyper-V.
*/ return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
}
/* Add the sub-channel to the array of available channels. */
stor_device->stor_chns[new_sc->target_cpu] = new_sc;
cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
}
staticvoid handle_multichannel_storage(struct hv_device *device, int max_chns)
{ struct device *dev = &device->device; struct storvsc_device *stor_device; int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t;
/* * If the number of CPUs is artificially restricted, such as * with maxcpus=1 on the kernel boot line, Hyper-V could offer * sub-channels >= the number of CPUs. These sub-channels * should not be created. The primary channel is already created * and assigned to one CPU, so check against # CPUs - 1.
*/
num_sc = min((int)(num_online_cpus() - 1), max_chns); if (!num_sc) return;
stor_device = get_out_stor_device(device); if (!stor_device) return;
ret = vmbus_sendpacket(device->channel, vstor_packet, sizeof(struct vstor_packet),
VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0) {
dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); return;
}
t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); if (t == 0) {
dev_err(dev, "Failed to create sub-channel: timed out\n"); return;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0) {
dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n",
vstor_packet->operation, vstor_packet->status); return;
}
/* * We need to do nothing here, because vmbus_process_offer() * invokes channel->sc_creation_callback, which will open and use * the sub-channel(s).
*/
}
staticvoid cache_wwn(struct storvsc_device *stor_device, struct vstor_packet *vstor_packet)
{ /* * Cache the currently active port and node ww names.
*/ if (vstor_packet->wwn_packet.primary_active) {
stor_device->node_name =
wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn);
stor_device->port_name =
wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn);
} else {
stor_device->node_name =
wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn);
stor_device->port_name =
wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn);
}
}
/* * Now, initiate the vsc/vsp initialization protocol on the open * channel
*/
memset(request, 0, sizeof(struct storvsc_cmd_request));
vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
ret = storvsc_execute_vstor_op(device, request, true); if (ret) return ret; /* * Query host supported protocol version.
*/
for (i = 0; i < ARRAY_SIZE(protocol_version); i++) { /* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation =
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
/* * The revision number is only used in Windows; set it to 0.
*/
vstor_packet->version.revision = 0;
ret = storvsc_execute_vstor_op(device, request, false); if (ret != 0) return ret;
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) return -EINVAL;
if (vstor_packet->status == 0) {
vmstor_proto_version = protocol_version[i];
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
ret = storvsc_execute_vstor_op(device, request, true); if (ret != 0) return ret;
/* * Check to see if multi-channel support is there. * Hosts that implement protocol version of 5.1 and above * support multi-channel.
*/
max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
/* * Allocate state to manage the sub-channels. * We allocate an array based on the number of CPU ids. This array * is initially sparsely populated for the CPUs assigned to channels: * primary + sub-channels. As I/Os are initiated by different CPUs, * the slots for all online CPUs are populated to evenly distribute * the load across all channels.
*/
stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *),
GFP_KERNEL); if (stor_device->stor_chns == NULL) return -ENOMEM;
switch (SRB_STATUS(vm_srb->srb_status)) { case SRB_STATUS_ERROR: case SRB_STATUS_ABORTED: case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_INTERNAL_ERROR: case SRB_STATUS_TIMEOUT: case SRB_STATUS_SELECTION_TIMEOUT: case SRB_STATUS_BUS_RESET: case SRB_STATUS_DATA_OVERRUN: if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { /* Check for capacity change */ if ((asc == 0x2a) && (ascq == 0x9)) {
process_err_fn = storvsc_device_scan; /* Retry the I/O that triggered this. */
set_host_byte(scmnd, DID_REQUEUE); goto do_work;
}
/* * Check for "Operating parameters have changed" * due to Hyper-V changing the VHD/VHDX BlockSize * when adding/removing a differencing disk. This * causes discard_granularity to change, so do a * rescan to pick up the new granularity. We don't * want scsi_report_sense() to output a message * that a sysadmin wouldn't know what to do with.
*/ if ((asc == 0x3f) && (ascq != 0x03) &&
(ascq != 0x0e)) {
process_err_fn = storvsc_device_scan;
set_host_byte(scmnd, DID_REQUEUE); goto do_work;
}
/* * Otherwise, let upper layer deal with the * error when sense message is present
*/ return;
}
/* * If there is an error; offline the device since all * error recovery strategies would have already been * deployed on the host side. However, if the command * were a pass-through command deal with it appropriately.
*/ switch (scmnd->cmnd[0]) { case ATA_16: case ATA_12:
set_host_byte(scmnd, DID_PASSTHROUGH); break; /* * On some Hyper-V hosts TEST_UNIT_READY command can * return SRB_STATUS_ERROR. Let the upper level code * deal with it based on the sense information.
*/ case TEST_UNIT_READY: break; default:
set_host_byte(scmnd, DID_ERROR);
} return;
case SRB_STATUS_INVALID_LUN:
set_host_byte(scmnd, DID_NO_CONNECT);
process_err_fn = storvsc_remove_lun; goto do_work;
} return;
do_work: /* * We need to schedule work to process this error; schedule it.
*/
wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); if (!wrk) {
set_host_byte(scmnd, DID_BAD_TARGET); return;
}
if (scmnd->result) {
sense_ok = scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (sense_ok && do_logging(STORVSC_LOGGING_WARN))
scsi_print_sense_hdr(scmnd->device, "storvsc",
&sense_hdr);
}
if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
sense_hdr.ascq); /* * The Windows driver set data_transfer_length on * SRB_STATUS_DATA_OVERRUN. On other errors, this value * is untouched. In these cases we set it to 0.
*/ if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
data_transfer_length = 0;
}
/* * The current SCSI handling on the host side does * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c * MAINTENANCE_IN is not supported by HyperV FC passthrough * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues * (srb status == 0x4) and off-line the device in that case.
*/
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
/* * Copy over the sense_info_length, but limit to the known max * size if Hyper-V returns a bad value.
*/
stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE,
vstor_packet->vm_srb.sense_info_length);
if (vstor_packet->vm_srb.scsi_status != 0 ||
vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
/* * Log TEST_UNIT_READY errors only as warnings. Hyper-V can * return errors when detecting devices using TEST_UNIT_READY, * and logging these as errors produces unhelpful noise.
*/ int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
if (rqst_id == VMBUS_RQST_INIT) {
request = &stor_device->init_request;
} elseif (rqst_id == VMBUS_RQST_RESET) {
request = &stor_device->reset_request;
} else { /* Hyper-V can send an unsolicited message with ID of 0 */ if (rqst_id == 0) { /* * storvsc_on_receive() looks at the vstor_packet in the message * from the ring buffer. * * - If the operation in the vstor_packet is COMPLETE_IO, then * we call storvsc_on_io_completion(), and dereference the * guest memory address. Make sure we don't call * storvsc_on_io_completion() with a guest memory address * that is zero if Hyper-V were to construct and send such * a bogus packet. * * - If the operation in the vstor_packet is FCHBA_DATA, then * we call cache_wwn(), and access the data payload area of * the packet (wwn_packet); however, there is no guarantee * that the packet is big enough to contain such area. * Future-proof the code by rejecting such a bogus packet.
*/ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
dev_err(&device->device, "Invalid packet with ID of 0\n"); continue;
}
} else { struct scsi_cmnd *scmnd;
/* * At this point, all outbound traffic should be disable. We * only allow inbound traffic (responses) to proceed so that * outstanding requests can be completed.
*/
storvsc_wait_to_drain(stor_device);
/* * Since we have already drained, we don't need to busy wait * as was done in final_release_stor_device() * Note that we cannot set the ext pointer to NULL until * we have drained - to drain the outgoing packets, we need to * allow incoming packets.
*/
hv_set_drvdata(device, NULL);
/* Close the channel */
vmbus_close(device->channel);
/* * Our channel array is sparsley populated and we * initiated I/O on a processor/hw-q that does not * currently have a designated channel. Fix this. * The strategy is simple: * I. Ensure NUMA locality * II. Distribute evenly (best effort)
*/
request->device = device; /* * Select an appropriate channel to send the request out.
*/ /* See storvsc_change_target_cpu(). */
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); if (outgoing_channel != NULL) { if (outgoing_channel->target_cpu == q_num) { /* * Ideally, we want to pick a different channel if * available on the same NUMA node.
*/
node_mask = cpumask_of_node(cpu_to_node(q_num));
for_each_cpu_wrap(tgt_cpu,
&stor_device->alloced_cpus, q_num + 1) { if (!cpumask_test_cpu(tgt_cpu, node_mask)) continue; if (tgt_cpu == q_num) continue;
channel = READ_ONCE(
stor_device->stor_chns[tgt_cpu]); if (channel == NULL) continue; if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
outgoing_channel = channel; goto found_channel;
}
}
/* * All the other channels on the same NUMA node are * busy. Try to use the channel on the current CPU
*/ if (hv_get_avail_to_write_percent(
&outgoing_channel->outbound)
> ring_avail_percent_lowater) goto found_channel;
/* * If we reach here, all the channels on the current * NUMA node are busy. Try to find a channel in * other NUMA nodes
*/
for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { if (cpumask_test_cpu(tgt_cpu, node_mask)) continue;
channel = READ_ONCE(
stor_device->stor_chns[tgt_cpu]); if (channel == NULL) continue; if (hv_get_avail_to_write_percent(
&channel->outbound)
> ring_avail_percent_lowater) {
outgoing_channel = channel; goto found_channel;
}
}
}
} else {
spin_lock_irqsave(&stor_device->lock, flags);
outgoing_channel = stor_device->stor_chns[q_num]; if (outgoing_channel != NULL) {
spin_unlock_irqrestore(&stor_device->lock, flags); goto found_channel;
}
outgoing_channel = get_og_chn(stor_device, q_num);
spin_unlock_irqrestore(&stor_device->lock, flags);
}
staticint storvsc_device_alloc(struct scsi_device *sdevice)
{ /* * Set blist flag to permit the reading of the VPD pages even when * the target may claim SPC-2 compliance. MSFT targets currently * claim SPC-2 compliance while they implement post SPC-2 features. * With this flag we can correctly handle WRITE_SAME_16 issues. * * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but * still supports REPORT LUN.
*/
sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
/* * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 * if the device is a MSFT virtual device. If the host is * WIN10 or newer, allow write_same.
*/ if (!strncmp(sdevice->vendor, "Msft", 4)) { switch (vmstor_proto_version) { case VMSTOR_PROTO_VERSION_WIN8: case VMSTOR_PROTO_VERSION_WIN8_1:
sdevice->scsi_level = SCSI_SPC_3; break;
}
if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
sdevice->no_write_same = 0;
}
/* * We are making up these values; let us keep it simple.
*/
heads = 0xff;
sectors_pt = 0x3f; /* Sectors per track */
sector_div(cylinders, heads * sectors_pt); if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
cylinders = 0xffff;
ret = vmbus_sendpacket(device->channel, vstor_packet, sizeof(struct vstor_packet),
VMBUS_RQST_RESET,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) return FAILED;
t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); if (t == 0) return TIMEOUT_ERROR;
/* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us * There is a potential race here where the host may be in * the process of responding when we return from here. * Just wait for all in-transit packets to be accounted for * before we return from here.
*/
storvsc_wait_to_drain(stor_device);
return SUCCESS;
}
/* * The host guarantees to respond to each command, although I/O latencies might * be unbounded on Azure. Reset the timer unconditionally to give the host a * chance to perform EH.
*/ staticenum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
{ return SCSI_EH_RESET_TIMER;
}
switch (scsi_op) { /* the host does not handle WRITE_SAME, log accident usage */ case WRITE_SAME: /* * smartd sends this command and the host does not handle * this. So, don't send it.
*/ case SET_WINDOW:
set_host_byte(scmnd, DID_ERROR);
allowed = false; break; default: break;
} return allowed;
}
if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { /* * On legacy hosts filter unimplemented commands. * Future hosts are expected to correctly handle * unsupported commands. Furthermore, it is * possible that some of the currently * unsupported commands maybe supported in * future versions of the host.
*/ if (!storvsc_scsi_cmd_ok(scmnd)) {
scsi_done(scmnd); return 0;
}
}
/* Setup the cmd request */
cmd_request->cmd = scmnd;
/* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE:
vm_srb->data_in = WRITE_TYPE;
vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT; break; case DMA_FROM_DEVICE:
vm_srb->data_in = READ_TYPE;
vm_srb->srb_flags |= SRB_FLAGS_DATA_IN; break; case DMA_NONE:
vm_srb->data_in = UNKNOWN_TYPE;
vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; break; default: /* * This is DMA_BIDIRECTIONAL or something else we are never * supposed to see here.
*/
WARN(1, "Unexpected data direction: %d\n",
scmnd->sc_data_direction); return -EINVAL;
}
sg_count = scsi_dma_map(scmnd); if (sg_count < 0) {
ret = SCSI_MLQUEUE_DEVICE_BUSY; goto err_free_payload;
}
for_each_sg(sgl, sg, sg_count, j) { /* * Init values for the current sgl entry. hvpfns_to_add * is in units of Hyper-V size pages. Handling the * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles * values of sgl->offset that are larger than PAGE_SIZE. * Such offsets are handled even on other than the first * sgl entry, provided they are a multiple of PAGE_SIZE.
*/
hvpfn = HVPFN_DOWN(sg_dma_address(sg));
hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
sg_dma_len(sg)) - hvpfn;
/* * Fill the next portion of the PFN array with * sequential Hyper-V PFNs for the continguous physical * memory described by the sgl entry. The end of the * last sgl should be reached at the same time that * the PFN array is filled.
*/ while (hvpfns_to_add--)
payload->range.pfn_array[i++] = hvpfn++;
}
}
/* * We support sub-channels for storage on SCSI and FC controllers. * The number of sub-channels offerred is based on the number of * VCPUs in the guest.
*/ if (!dev_is_ide)
max_sub_channels =
(num_cpus - 1) / storvsc_vcpus_per_sub_channel;
default:
host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
host->max_id = STORVSC_IDE_MAX_TARGETS;
host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; break;
} /* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN; /* * Any reasonable Hyper-V configuration should provide * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, * protecting it from any weird value.
*/
max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); if (is_fc)
max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE);
/* max_hw_sectors_kb */
host->max_sectors = max_xfer_bytes >> 9; /* * There are 2 requirements for Hyper-V storvsc sgl segments, * based on which the below calculation for max segments is * done: * * 1. Except for the first and last sgl segment, all sgl segments * should be align to HV_HYP_PAGE_SIZE, that also means the * maximum number of segments in a sgl can be calculated by * dividing the total max transfer length by HV_HYP_PAGE_SIZE. * * 2. Except for the first and last, each entry in the SGL must * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
*/
host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; /* * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting.
*/ if (!dev_is_ide) { if (storvsc_max_hw_queues > num_present_cpus) {
storvsc_max_hw_queues = 0;
storvsc_log(device, STORVSC_LOGGING_WARN, "Resetting invalid storvsc_max_hw_queues value to default.\n");
} if (storvsc_max_hw_queues)
host->nr_hw_queues = storvsc_max_hw_queues; else
host->nr_hw_queues = num_present_cpus;
}
/* * Set the error handler work queue.
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
0,
host->host_no); if (!host_dev->handle_error_wq) {
ret = -ENOMEM; goto err_out2;
}
INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); /* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device); if (ret != 0) goto err_out3;
err_out2: /* * Once we have connected with the host, we would need to * invoke storvsc_dev_remove() to rollback this state and * this call also frees up the stor_device; hence the jump around * err_out1 label.
*/
storvsc_dev_remove(device); goto err_out0;
staticint __init storvsc_drv_init(void)
{ int ret;
/* * Divide the ring buffer data size (which is 1 page less * than the ring buffer size since that page is reserved for * the ring buffer indices) by the max request size (which is * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
max_outstanding_req_per_channel =
((aligned_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(struct vstor_packet) + sizeof(u64), sizeof(u64)));
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
fc_transport_template = fc_attach_transport(&fc_transport_functions); if (!fc_transport_template) return -ENODEV; #endif
ret = vmbus_driver_register(&storvsc_drv);
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (ret)
fc_release_transport(fc_transport_template); #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.