/** * iucv_msg_length() - Returns the length of an iucv message. * @msg: Pointer to struct iucv_message, MUST NOT be NULL * * The function returns the length of the specified iucv message @msg of data * stored in a buffer and of data stored in the parameter list (PRMDATA). * * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket * data: * PRMDATA[0..6] socket data (max 7 bytes); * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) * * The socket data length is computed by subtracting the socket data length * value from 0xFF. * If the socket data len is greater 7, then PRMDATA can be used for special * notifications (see iucv_sock_shutdown); and further, * if the socket data len is > 7, the function returns 8. * * Use this function to allocate socket buffers to store iucv message data.
*/ staticinline size_t iucv_msg_length(struct iucv_message *msg)
{
size_t datalen;
/** * iucv_sock_in_state() - check for specific states * @sk: sock structure * @state: first iucv sk state * @state2: second iucv sk state * * Returns true if the socket in either in the first or second state.
*/ staticint iucv_sock_in_state(struct sock *sk, int state, int state2)
{ return (sk->sk_state == state || sk->sk_state == state2);
}
/** * iucv_below_msglim() - function to check if messages can be sent * @sk: sock structure * * Returns true if the send queue length is lower than the message limit. * Always returns true if the socket is not connected (no iucv path for * checking the message limit).
*/ staticinlineint iucv_below_msglim(struct sock *sk)
{ struct iucv_sock *iucv = iucv_sk(sk);
/* Whoever resets the path pointer, must sever and free it. */ if (xchg(&iucv->path, NULL)) { if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
pr_iucv->path_sever(path, user_data);
} else
pr_iucv->path_sever(path, NULL);
iucv_path_free(path);
}
}
/* Send controlling flags through an IUCV socket for HIPER transport */ staticint iucv_send_ctrl(struct sock *sk, u8 flags)
{ struct iucv_sock *iucv = iucv_sk(sk); int err = 0; int blen; struct sk_buff *skb;
u8 shutdown = 0;
blen = sizeof(struct af_iucv_trans_hdr) +
LL_RESERVED_SPACE(iucv->hs_dev); if (sk->sk_shutdown & SEND_SHUTDOWN) { /* controlling flags should be sent anyway */
shutdown = sk->sk_shutdown;
sk->sk_shutdown &= RCV_SHUTDOWN;
}
skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) {
skb_reserve(skb, blen);
err = afiucv_hs_send(NULL, sk, skb, flags);
} if (shutdown)
sk->sk_shutdown = shutdown; return err;
}
/* Close an IUCV socket */ staticvoid iucv_sock_close(struct sock *sk)
{ struct iucv_sock *iucv = iucv_sk(sk); unsignedlong timeo; int err = 0;
lock_sock(sk);
switch (sk->sk_state) { case IUCV_LISTEN:
iucv_sock_cleanup_listen(sk); break;
case IUCV_CONNECTED: if (iucv->transport == AF_IUCV_TRANS_HIPER) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
fallthrough;
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
/** * iucv_send_iprm() - Send socket data in parameter list of an iucv message. * @path: IUCV path * @msg: Pointer to a struct iucv_message * @skb: The socket data to send, skb->len MUST BE <= 7 * * Send the socket data in the parameter list in the iucv message * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter * list and the socket data len at index 7 (last byte). * See also iucv_msg_length(). * * Returns the error code from the iucv_message_send() call.
*/ staticint iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, struct sk_buff *skb)
{
u8 prmdata[8];
switch (cmsg->cmsg_type) { case SCM_IUCV_TRGCLS: if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
err = -EINVAL; goto out;
}
/* set iucv message target class */
memcpy(&txmsg.class,
(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
break;
default:
err = -EINVAL; goto out;
}
}
/* allocate one skb for each iucv message: * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
headroom = sizeof(struct af_iucv_trans_hdr) +
LL_RESERVED_SPACE(iucv->hs_dev);
linear = min(len, PAGE_SIZE - headroom);
} else { if (len < PAGE_SIZE) {
linear = len;
} else { /* In nonlinear "classic" iucv skb, * reserve space for iucv_array
*/
headroom = sizeof(struct iucv_array) *
(MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0); if (!skb) goto out; if (headroom)
skb_reserve(skb, headroom);
skb_put(skb, linear);
skb->len = len;
skb->data_len = len - linear;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); if (err) goto fail;
/* wait if outstanding messages for iucv path has reached */
timeo = sock_sndtimeo(sk, noblock);
err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); if (err) goto fail;
/* return -ECONNRESET if the socket is no longer connected */ if (sk->sk_state != IUCV_CONNECTED) {
err = -ECONNRESET; goto fail;
}
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
IUCV_SKB_CB(skb)->tag = txmsg.tag;
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
err = afiucv_hs_send(&txmsg, sk, skb, 0); if (err) {
atomic_dec(&iucv->msg_sent); goto out;
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
atomic_inc(&iucv->skbs_in_xmit);
/* on success: there is no message_complete callback */ /* for an IPRMDATA msg; remove skb from send queue */ if (err == 0) {
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
consume_skb(skb);
}
/* this error should never happen since the */ /* IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) {
pr_iucv->path_sever(iucv->path, NULL);
atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE; goto fail;
}
} elseif (skb_is_nonlinear(skb)) { struct iucv_array *iba = (struct iucv_array *)skb->head; int i;
/* skip iucv_array lying in the headroom */
iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (len < PAGE_SIZE) {
headroom = 0;
linear = len;
} else {
headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
linear = PAGE_SIZE - headroom;
}
skb = alloc_skb_with_frags(headroom + linear, len - linear,
0, &err, GFP_ATOMIC | GFP_DMA);
WARN_ONCE(!skb, "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
len, err); if (skb) { if (headroom)
skb_reserve(skb, headroom);
skb_put(skb, linear);
skb->len = len;
skb->data_len = len - linear;
} return skb;
}
/* iucv_process_message() - Receive a single outstanding IUCV message * * Locking: must be called with message_q.lock held
*/ staticvoid iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg)
{ int rc; unsignedint len;
len = iucv_msg_length(msg);
/* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */
IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
skb->data = NULL;
skb->len = 0;
}
} else { if (skb_is_nonlinear(skb)) { struct iucv_array *iba = (struct iucv_array *)skb->head; int i;
iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* receive/dequeue next skb: * the function understands MSG_PEEK and, thus, does not dequeue skb * only refcount is increased.
*/
skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err;
}
offset = IUCV_SKB_CB(skb)->offset;
rlen = skb->len - offset; /* real length of skb */
copied = min_t(unsignedint, rlen, len); if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ if (sk->sk_type == SOCK_SEQPACKET) { if (copied < rlen)
msg->msg_flags |= MSG_TRUNC; /* each iucv message contains a complete record */
msg->msg_flags |= MSG_EOR;
}
/* create control message to store iucv msg target class: * get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class); if (err) goto err_out;
/* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) {
/* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { if (copied < rlen) {
IUCV_SKB_CB(skb)->offset = offset + copied;
skb_queue_head(&sk->sk_receive_queue, skb); goto done;
}
}
consume_skb(skb); if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv); if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
WARN_ON(1);
iucv_sock_close(sk); return -EFAULT;
}
}
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) {
IUCV_SKB_CB(rskb)->offset = 0; if (__sock_queue_rcv_skb(sk, rskb)) { /* handle rcv queue full */
skb_queue_head(&iucv->backlog_skb_q,
rskb); break;
}
rskb = skb_dequeue(&iucv->backlog_skb_q);
} if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk); if (atomic_read(&iucv->msg_recv) >=
iucv->msglimit / 2) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); if (err) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
}
}
spin_unlock_bh(&iucv->message_q.lock);
}
done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen; if (flags & MSG_PEEK)
skb_unref(skb);
return copied;
err_out: if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb); else
skb_unref(skb);
if ((how & ~SHUTDOWN_MASK) || !how) return -EINVAL;
lock_sock(sk); switch (sk->sk_state) { case IUCV_LISTEN: case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CLOSED:
err = -ENOTCONN; goto fail; default: break;
}
/* getsockopt and setsockopt */ staticint iucv_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsignedint optlen)
{ struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int val; int rc;
if (level != SOL_IUCV) return -ENOPROTOOPT;
if (optlen < sizeof(int)) return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT;
rc = 0;
lock_sock(sk); switch (optname) { case SO_IPRMDATA_MSG: if (val)
iucv->flags |= IUCV_IPRMDATA; else
iucv->flags &= ~IUCV_IPRMDATA; break; case SO_MSGLIMIT: switch (sk->sk_state) { case IUCV_OPEN: case IUCV_BOUND: if (val < 1 || val > U16_MAX)
rc = -EINVAL; else
iucv->msglimit = val; break; default:
rc = -EINVAL; break;
} break; default:
rc = -ENOPROTOOPT; break;
}
release_sock(sk);
return rc;
}
staticint iucv_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{ struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsignedint val; int len;
if (level != SOL_IUCV) return -ENOPROTOOPT;
if (get_user(len, optlen)) return -EFAULT;
if (len < 0) return -EINVAL;
len = min_t(unsignedint, len, sizeof(int));
switch (optname) { case SO_IPRMDATA_MSG:
val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; break; case SO_MSGLIMIT:
lock_sock(sk);
val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
: iucv->msglimit; /* default */
release_sock(sk); break; case SO_MSGSIZE: if (sk->sk_state == IUCV_OPEN) return -EBADFD;
val = (iucv->hs_dev) ? iucv->hs_dev->mtu - sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
0x7fffffff; break; default: return -ENOPROTOOPT;
}
if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT;
return 0;
}
/* Callback wrappers - called from iucv base support */ staticint iucv_callback_connreq(struct iucv_path *path,
u8 ipvmid[8], u8 ipuser[16])
{ unsignedchar user_data[16]; unsignedchar nuser_data[16]; unsignedchar src_name[8]; struct sock *sk, *nsk; struct iucv_sock *iucv, *niucv; int err;
memcpy(src_name, ipuser, 8);
EBCASC(src_name, 8); /* Find out if this path belongs to af_iucv. */
read_lock(&iucv_sk_list.lock);
iucv = NULL;
sk = NULL;
sk_for_each(sk, &iucv_sk_list.head) if (sk->sk_state == IUCV_LISTEN &&
!memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { /* * Found a listening socket with * src_name == ipuser[0-7].
*/
iucv = iucv_sk(sk); break;
}
read_unlock(&iucv_sk_list.lock); if (!iucv) /* No socket found, not one of our paths. */ return -EINVAL;
bh_lock_sock(sk);
/* Check if parent socket is listening */
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data)); if (sk->sk_state != IUCV_LISTEN) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path); goto fail;
}
/* Check for backlog size */ if (sk_acceptq_is_full(sk)) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path); goto fail;
}
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); if (!nsk) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path); goto fail;
}
/* called if the other communication side shuts down its RECV direction; * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
*/ staticvoid iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
{ struct sock *sk = path->private;
staticint iucv_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
{ struct sock *sk;
if (protocol && protocol != PF_IUCV) return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) { case SOCK_STREAM: case SOCK_SEQPACKET: /* currently, proto ops can handle both sk types */
sock->ops = &iucv_sock_ops; break; default: return -ESOCKTNOSUPPORT;
}
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); if (!sk) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.