staticint iscsi_dbg_libtcp;
module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp " "module. Set to 1 to turn on, and zero to turn off. Default " "is off.");
/* * Scatterlist handling: inside the iscsi_segment, we * remember an index into the scatterlist, and set data/size * to the current scatterlist entry. For highmem pages, we * kmap as needed. * * Note that the page is unmapped when we return from * TCP's data_ready handler, so we may end up mapping and * unmapping the same page repeatedly. The whole reason * for this is that we shouldn't keep the page mapped * outside the softirq.
*/
/** * iscsi_tcp_segment_init_sg - init indicated scatterlist entry * @segment: the buffer object * @sg: scatterlist * @offset: byte offset into that sg entry * * This function sets up the segment so that subsequent * data is copied to the indicated sg entry, at the given * offset.
*/ staticinlinevoid
iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, struct scatterlist *sg, unsignedint offset)
{
segment->sg = sg;
segment->sg_offset = offset;
segment->size = min(sg->length - offset,
segment->total_size - segment->total_copied);
segment->data = NULL;
}
/** * iscsi_tcp_segment_map - map the current S/G page * @segment: iscsi_segment * @recv: 1 if called from recv path * * We only need to possibly kmap data if scatter lists are being used, * because the iscsi passthrough and internal IO paths will never use high * mem pages.
*/ staticvoid iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
{ struct scatterlist *sg;
if (segment->data != NULL || !segment->sg) return;
/* * We always map for the recv path. * * If the page count is greater than one it is ok to send * to the network layer's zero copy send path. If not we * have to go the slow sendmsg path. * * Same goes for slab pages: skb_can_coalesce() allows * coalescing neighboring slab objects into a single frag which * triggers one of hardened usercopy checks.
*/ if (!recv && sendpage_ok(sg_page(sg))) return;
if (recv) {
segment->atomic_mapped = true;
segment->sg_mapped = kmap_atomic(sg_page(sg));
} else {
segment->atomic_mapped = false; /* the xmit path can sleep with the page mapped so use kmap */
segment->sg_mapped = kmap(sg_page(sg));
}
/** * iscsi_tcp_segment_done - check whether the segment is complete * @tcp_conn: iscsi tcp connection * @segment: iscsi segment to check * @recv: set to one of this is called from the recv path * @copied: number of bytes copied * * Check if we're done receiving this segment. If the receive * buffer is full but we expect more data, move on to the * next entry in the scatterlist. * * If the amount of data we received isn't a multiple of 4, * we will transparently receive the pad bytes, too. * * This function must be re-entrant.
*/ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, int recv, unsigned copied)
{ unsignedint pad;
/* Unmap the current scatterlist page, if there is one. */
iscsi_tcp_segment_unmap(segment);
/* Do we have more scatterlist entries? */
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
segment->total_copied, segment->total_size); if (segment->total_copied < segment->total_size) { /* Proceed to the next entry in the scatterlist. */
iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
0);
iscsi_tcp_segment_map(segment, recv);
BUG_ON(segment->size == 0); return 0;
}
/* Do we need to handle padding? */ if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
pad = iscsi_padding(segment->total_copied); if (pad != 0) {
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "consume %d pad bytes\n", pad);
segment->total_size += pad;
segment->size = pad;
segment->data = segment->padbuf; return 0;
}
}
/* * Set us up for transferring the data digest. hdr digest * is completely handled in hdr done function.
*/ if (segment->crcp) {
put_unaligned_le32(~*segment->crcp, segment->digest);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest); return 0;
}
/** * iscsi_tcp_segment_recv - copy data to segment * @tcp_conn: the iSCSI TCP connection * @segment: the buffer to copy to * @ptr: data pointer * @len: amount of data available * * This function copies up to @len bytes to the * given buffer, and returns the number of bytes * consumed, which can actually be less than @len. * * If CRC is enabled, the function will update the CRC while copying. * Combining these two operations doesn't buy us a lot (yet), * but in the future we could implement combined copy+crc, * just way we do for network layer checksums.
*/ staticint
iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, constvoid *ptr, unsignedint len)
{ unsignedint copy = 0, copied = 0;
while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) { if (copied == len) {
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %d bytes\n", len); break;
}
/** * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception * @tcp_conn: iscsi connection to prep for * * This function always passes NULL for the crcp argument, because when this * function is called we do not yet know the final size of the header and want * to delay the digest processing until we know that.
*/ void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ? "digest enabled" : "digest disabled");
iscsi_segment_init_linear(&tcp_conn->in.segment,
tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
iscsi_tcp_hdr_recv_done, NULL);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
/* * Handle incoming reply to any other type of command
*/ staticint
iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment)
{ struct iscsi_conn *conn = tcp_conn->iscsi_conn; int rc = 0;
if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) return ISCSI_ERR_DATA_DGST;
rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
conn->data, tcp_conn->in.datalen); if (rc) return rc;
/* * lib iscsi will update this in the completion handling if there * is status.
*/ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
spin_lock(&session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) {
spin_unlock(&session->back_lock); return ISCSI_ERR_BAD_ITT;
} elseif (task->sc->sc_data_direction != DMA_TO_DEVICE) {
spin_unlock(&session->back_lock); return ISCSI_ERR_PROTO;
} /* * A bad target might complete the cmd before we have handled R2Ts * so get a ref to the task that will be dropped in the xmit path.
*/ if (task->state != ISCSI_TASK_RUNNING) {
spin_unlock(&session->back_lock); /* Let the path that got the early rsp complete it */ return 0;
}
task->last_xfer = jiffies; if (!iscsi_get_task(task)) {
spin_unlock(&session->back_lock); /* Let the path that got the early rsp complete it */ return 0;
}
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; /* fill-in new R2T associated with the task */
iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr);
spin_unlock(&session->back_lock);
if (tcp_conn->in.datalen) {
iscsi_conn_printk(KERN_ERR, conn, "invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
rc = ISCSI_ERR_DATALEN; goto put_task;
}
if (session->state != ISCSI_STATE_LOGGED_IN) {
iscsi_conn_printk(KERN_INFO, conn, "dropping R2T itt %d in recovery.\n",
task->itt);
rc = 0; goto put_task;
}
data_length = be32_to_cpu(rhdr->data_length); if (data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with zero data len\n");
rc = ISCSI_ERR_DATALEN; goto put_task;
}
if (data_length > session->max_burst)
ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max " "burst %u. Attempting to execute request.\n",
data_length, session->max_burst);
data_offset = be32_to_cpu(rhdr->data_offset); if (data_offset + data_length > task->sc->sdb.length) {
iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with data len %u at offset %u " "and total length %d\n", data_length,
data_offset, task->sc->sdb.length);
rc = ISCSI_ERR_DATALEN; goto put_task;
}
spin_lock(&tcp_task->pool2queue);
rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *)); if (!rc) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " "Target has sent more R2Ts than it " "negotiated for or driver has leaked.\n");
spin_unlock(&tcp_task->pool2queue);
rc = ISCSI_ERR_PROTO; goto put_task;
}
if (!iscsi_tcp_dgst_verify(tcp_conn, segment)) return ISCSI_ERR_DATA_DGST;
/* check for non-exceptional status */ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); if (rc) return rc;
}
iscsi_tcp_hdr_recv_prep(tcp_conn); return 0;
}
/** * iscsi_tcp_hdr_dissect - process PDU header * @conn: iSCSI connection * @hdr: PDU header * * This function analyzes the header of the PDU received, * and performs several sanity checks. If the PDU is accompanied * by data, the receive buffer is set up to copy the incoming data * to the correct location.
*/ staticint
iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{ int rc = 0, opcode, ahslen; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_task *task;
/* * Setup copy of Data-In into the struct scsi_cmnd * Scatterlist case: * We set up the iscsi_segment to point to the next * scatterlist entry to copy to. As we go along, * we move on to the next scatterlist entry and * update the digest per-entry.
*/ if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
rx_crcp = tcp_conn->rx_crcp;
ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( " "offset=%d, datalen=%d)\n",
tcp_task->data_offset,
tcp_conn->in.datalen);
task->last_xfer = jiffies;
rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
sdb->table.sgl,
sdb->table.nents,
tcp_task->data_offset,
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
rx_crcp);
spin_unlock(&conn->session->back_lock); return rc;
}
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
spin_unlock(&conn->session->back_lock); break; case ISCSI_OP_SCSI_CMD_RSP: if (tcp_conn->in.datalen) {
iscsi_tcp_data_recv_prep(tcp_conn); return 0;
}
rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; case ISCSI_OP_R2T: if (ahslen) {
rc = ISCSI_ERR_AHSLEN; break;
}
rc = iscsi_tcp_r2t_rsp(conn, hdr); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: /* * It is possible that we could get a PDU with a buffer larger * than 8K, but there are no targets that currently do this. * For now we fail until we find a vendor that needs it
*/ if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
iscsi_conn_printk(KERN_ERR, conn, "iscsi_tcp: received buffer of " "len %u but conn buffer is only %u " "(opcode %0x)\n",
tcp_conn->in.datalen,
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
rc = ISCSI_ERR_PROTO; break;
}
/* If there's data coming in with the response, * receive it to the connection's buffer.
*/ if (tcp_conn->in.datalen) {
iscsi_tcp_data_recv_prep(tcp_conn); return 0;
}
fallthrough; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP:
rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; default:
rc = ISCSI_ERR_BAD_OPCODE; break;
}
if (rc == 0) { /* Anything that comes with data should have
* been handled above. */ if (tcp_conn->in.datalen) return ISCSI_ERR_PROTO;
iscsi_tcp_hdr_recv_prep(tcp_conn);
}
return rc;
}
/** * iscsi_tcp_hdr_recv_done - process PDU header * @tcp_conn: iSCSI TCP connection * @segment: the buffer segment being processed * * This is the callback invoked when the PDU header has * been received. If the header is followed by additional * header segments, we go back for more data.
*/ staticint
iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment)
{ struct iscsi_conn *conn = tcp_conn->iscsi_conn; struct iscsi_hdr *hdr;
/* Check if there are additional header segments * *prior* to computing the digest, because we * may need to go back to the caller for more.
*/
hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf; if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) { /* Bump the header length - the caller will * just loop around and get the AHS for us, and
* call again. */ unsignedint ahslen = hdr->hlength << 2;
/* Make sure we don't overflow */ if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf)) return ISCSI_ERR_AHSLEN;
/* We're done processing the header. See if we're doing * header digests; if so, set up the recv_digest buffer
* and go back for more. */ if (conn->hdrdgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) { if (segment->digest_len == 0) { /* * Even if we offload the digest processing we * splice it in so we can increment the skb/segment * counters in preparation for the data segment.
*/
iscsi_tcp_segment_splice_digest(segment,
segment->recv_digest); return 0;
}
/** * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header * @tcp_conn: iscsi tcp conn * * returns non zero if we are currently processing or setup to process * a header.
*/ inlineint iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
{ return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
}
EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
/** * iscsi_tcp_recv_skb - Process skb * @conn: iscsi connection * @skb: network buffer with header and/or data segment * @offset: offset in skb * @offloaded: bool indicating if transfer was offloaded * @status: iscsi TCP status result * * Will return status of transfer in @status. And will return * number of bytes copied.
*/ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, unsignedint offset, bool offloaded, int *status)
{ struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_segment *segment = &tcp_conn->in.segment; struct skb_seq_state seq; unsignedint consumed = 0; int rc = 0;
ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); /* * Update for each skb instead of pdu, because over slow networks a * data_in's data could take a while to read in. We also want to * account for r2ts.
*/
conn->last_recv = jiffies;
if (!sc) { /* * mgmt tasks do not have a scatterlist since they come * in from the iscsi interface.
*/
ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
/** * iscsi_tcp_task_xmit - xmit normal PDU task * @task: iscsi command task * * We're expected to return 0 when everything was transmitted successfully, * -EAGAIN if there's still data in the queue, or != 0 for any other kind * of error.
*/ int iscsi_tcp_task_xmit(struct iscsi_task *task)
{ struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct iscsi_r2t_info *r2t; int rc = 0;
flush: /* Flush any pending data first. */
rc = session->tt->xmit_pdu(task); if (rc < 0) return rc;
/* mgmt command */ if (!task->sc) { if (task->hdr->itt == RESERVED_ITT)
iscsi_put_task(task); return 0;
}
/* Are we done already? */ if (task->sc->sc_data_direction != DMA_TO_DEVICE) return 0;
r2t = iscsi_tcp_get_curr_r2t(task); if (r2t == NULL) { /* Waiting for more R2Ts to arrive. */
ISCSI_DBG_TCP(conn, "no R2Ts yet\n"); return 0;
}
cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn) + dd_data_size, conn_idx); if (!cls_conn) return NULL;
conn = cls_conn->dd_data; /* * due to strange issues with iser these are not set * in iscsi_conn_setup
*/
conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.