// SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This abstraction carries sctp events to the ULP (sockets). * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Sridhar Samudrala <sri@us.ibm.com>
*/
/* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */ if (event) { /* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
if (event->msg_flags & MSG_EOR)
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for * very first SKB on the 'temp' list.
*/ if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_ulpq_tail_event(ulpq, &temp);
}
return event_eor;
}
/* Add a new event for propagation to the ULP. */ /* Clear the partial delivery mode for this socket. Note: This * assumes that no association is currently in partial delivery mode.
*/ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
{ struct sctp_sock *sp = sctp_sk(sk);
if (atomic_dec_and_test(&sp->pd_mode)) { /* This means there are no other associations in PD, so * we can go ahead and clear out the lobby in one shot
*/ if (!skb_queue_empty(&sp->pd_lobby)) {
skb_queue_splice_tail_init(&sp->pd_lobby,
&sk->sk_receive_queue); return 1;
}
} else { /* There are other associations in PD, so we only need to * pull stuff out of the lobby that belongs to the * associations that is exiting PD (all of its notifications * are posted here).
*/ if (!skb_queue_empty(&sp->pd_lobby) && asoc) { struct sk_buff *skb, *tmp; struct sctp_ulpevent *event;
/* Set the pd_mode on the socket and ulpq */ staticvoid sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
{ struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
atomic_inc(&sp->pd_mode);
ulpq->pd_mode = 1;
}
/* Clear the pd_mode and restart any pending messages waiting for delivery. */ staticint sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
ulpq->pd_mode = 0;
sctp_ulpq_reasm_drain(ulpq); return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
}
/* If the socket is just going to throw this away, do not * even try to deliver it.
*/ if (sk->sk_shutdown & RCV_SHUTDOWN &&
(sk->sk_shutdown & SEND_SHUTDOWN ||
!sctp_ulpevent_is_notification(event))) goto out_free;
if (!sctp_ulpevent_is_notification(event)) {
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
} /* Check if the user wishes to receive this event. */ if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) goto out_free;
/* If we are in partial delivery mode, post to the lobby until * partial delivery is cleared, unless, of course _this_ is * the association the cause of the partial delivery.
*/
if (atomic_read(&sp->pd_mode) == 0) {
queue = &sk->sk_receive_queue;
} else { if (ulpq->pd_mode) { /* If the association is in partial delivery, we * need to finish delivering the partially processed * packet before passing any other data. This is * because we don't truly support stream interleaving.
*/ if ((event->msg_flags & MSG_NOTIFICATION) ||
(SCTP_DATA_NOT_FRAG ==
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
queue = &sp->pd_lobby; else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
}
} else { /* * If fragment interleave is enabled, we * can queue this to the receive queue instead * of the lobby.
*/ if (sp->frag_interleave)
queue = &sk->sk_receive_queue; else
queue = &sp->pd_lobby;
}
}
skb_queue_splice_tail_init(skb_list, queue);
/* Did we just complete partial delivery and need to get * rolling again? Move pending data to the receive * queue.
*/ if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { if (!sock_owned_by_user(sk))
sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
} return 1;
out_free:
sctp_queue_purge_ulpevents(skb_list);
return 0;
}
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */ staticvoid sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{ struct sk_buff *pos; struct sctp_ulpevent *cevent;
__u32 tsn, ctsn;
tsn = event->tsn;
/* See if it belongs at the end. */
pos = skb_peek_tail(&ulpq->reasm); if (!pos) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); return;
}
/* Short circuit just dropping it at the end. */
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn; if (TSN_lt(ctsn, tsn)) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); return;
}
/* Find the right place in this list. We store them by TSN. */
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
if (TSN_lt(tsn, ctsn)) break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled * datagram. * This routine creates a re-assembled skb given the first and last skb's * as stored in the reassembly queue. The skb's may be non-linear if the sctp * payload was fragmented on the way and ip had to reassemble them. * We add the rest of skb's to the first skb's fraglist.
*/ struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net, struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{ struct sk_buff *pos; struct sk_buff *new = NULL; struct sctp_ulpevent *event; struct sk_buff *pnext, *last; struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */ if (f_frag == l_frag)
pos = NULL; else
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */ for (last = list; list; last = list, list = list->next)
;
/* Add the list of remaining fragments to the first fragments * frag_list.
*/ if (last)
last->next = pos; else { if (skb_cloned(f_frag)) { /* This is a cloned skb, we can't just modify * the frag_list. We need a new skb to do that. * Instead of calling skb_unshare(), we'll do it * ourselves since we need to delay the free.
*/ new = skb_copy(f_frag, GFP_ATOMIC); if (!new) return NULL; /* try again later */
/* Helper function to check if an incoming chunk has filled up the last * missing fragment in a SCTP datagram and return the corresponding event.
*/ staticstruct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{ struct sk_buff *pos; struct sctp_ulpevent *cevent; struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn; struct sctp_ulpevent *retval = NULL; struct sk_buff *pd_first = NULL; struct sk_buff *pd_last = NULL;
size_t pd_len = 0; struct sctp_association *asoc;
u32 pd_point;
/* Initialized to 0 just to avoid compiler warning message. Will * never be used with this value. It is referenced only after it * is set when we find the first fragment of a message.
*/
next_tsn = 0;
/* The chunks are held in the reasm queue sorted by TSN. * Walk through the queue sequentially and look for a sequence of * fragmented chunks that complete a datagram. * 'first_frag' and next_tsn are reset when we find a chunk which * is the first fragment of a datagram. Once these 2 fields are set * we expect to find the remaining middle fragments and the last * fragment in order. If not, first_frag is reset to NULL and we * start the next pass when we find another first fragment. * * There is a potential to do partial delivery if user sets * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here * to see if can do PD.
*/
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: /* If this "FIRST_FRAG" is the first * element in the queue, then count it towards * possible PD.
*/ if (skb_queue_is_first(&ulpq->reasm, pos)) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
} else {
pd_first = NULL;
pd_last = NULL;
pd_len = 0;
}
first_frag = pos;
next_tsn = ctsn + 1; break;
case SCTP_DATA_MIDDLE_FRAG: if ((first_frag) && (ctsn == next_tsn)) {
next_tsn++; if (pd_first) {
pd_last = pos;
pd_len += pos->len;
}
} else
first_frag = NULL; break;
case SCTP_DATA_LAST_FRAG: if (first_frag && (ctsn == next_tsn)) goto found; else
first_frag = NULL; break;
}
}
asoc = ulpq->asoc; if (pd_first) { /* Make sure we can enter partial deliver. * We can trigger partial delivery only if framgent * interleave is set, or the socket is not already * in partial delivery.
*/ if (!sctp_sk(asoc->base.sk)->frag_interleave &&
atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) goto done;
/* Retrieve the next set of fragments of a partial message. */ staticstruct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
{ struct sk_buff *pos, *last_frag, *first_frag; struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn; int is_last; struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN. * Walk through the queue sequentially and look for the first * sequence of fragmented chunks.
*/
/* We have the reassembled event. There is no need to look * further.
*/
done:
retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
first_frag, last_frag); if (retval && is_last)
retval->msg_flags |= MSG_EOR;
return retval;
}
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that * need reassembling.
*/ staticstruct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{ struct sctp_ulpevent *retval = NULL;
/* Check if this is part of a fragmented message. */ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR; return event;
}
/* Do not even bother unless this is the next tsn to * be delivered.
*/
ctsn = event->tsn;
ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); if (TSN_lte(ctsn, ctsnap))
retval = sctp_ulpq_retrieve_partial(ulpq);
}
return retval;
}
/* Retrieve the first part (sequential fragments) for partial delivery. */ staticstruct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
{ struct sk_buff *pos, *last_frag, *first_frag; struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn; struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN. * Walk through the queue sequentially and look for a sequence of * fragmented chunks that start a datagram.
*/
case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) return NULL; if (ctsn == next_tsn) {
next_tsn++;
last_frag = pos;
} else goto done; break;
case SCTP_DATA_LAST_FRAG: if (!first_frag) return NULL; else goto done; break;
default: return NULL;
}
}
/* We have the reassembled event. There is no need to look * further.
*/
done:
retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
first_frag, last_frag); return retval;
}
/* * Flush out stale fragments from the reassembly queue when processing * a Forward TSN. * * RFC 3758, Section 3.6 * * After receiving and processing a FORWARD TSN, the data receiver MUST * take cautions in updating its re-assembly queue. The receiver MUST * remove any partially reassembled message, which is still missing one * or more TSNs earlier than or equal to the new cumulative TSN point. * In the event that the receiver has invoked the partial delivery API, * a notification SHOULD also be generated to inform the upper layer API * that the message being partially delivered will NOT be completed.
*/ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{ struct sk_buff *pos, *tmp; struct sctp_ulpevent *event;
__u32 tsn;
/* Since the entire message must be abandoned by the * sender (item A3 in Section 3.5, RFC 3758), we can * free all fragments on the list that are less then * or equal to ctsn_point
*/ if (TSN_lte(tsn, fwd_tsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
} else break;
}
}
/* * Drain the reassembly queue. If we just cleared parted delivery, it * is possible that the reassembly queue will contain already reassembled * messages. Retrieve any such messages and give them to the user.
*/ staticvoid sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{ struct sctp_ulpevent *event = NULL;
if (skb_queue_empty(&ulpq->reasm)) return;
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { struct sk_buff_head temp;
/* Find the right place in this list. We store them by * stream ID and then by SSN.
*/
skb_queue_walk(&ulpq->lobby, pos) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid > sid) break; if (csid == sid && SSN_lt(ssn, cssn)) break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
}
/* Check if this message needs ordering. */ if (event->msg_flags & SCTP_DATA_UNORDERED) return event;
/* Note: The stream ID must be verified before this routine. */
sid = event->stream;
ssn = event->ssn;
stream = &ulpq->asoc->stream;
/* Is this the expected SSN for this stream ID? */ if (ssn != sctp_ssn_peek(stream, in, sid)) { /* We've received something out of order, so find where it * needs to be placed. We order by stream and then by SSN.
*/
sctp_ulpq_store_ordered(ulpq, event); return NULL;
}
/* Mark that the next chunk has been found. */
sctp_ssn_next(stream, in, sid);
/* Go find any other chunks that were waiting for * ordering.
*/
sctp_ulpq_retrieve_ordered(ulpq, event);
return event;
}
/* Helper function to gather skbs that have possibly become * ordered by forward tsn skipping their dependencies.
*/ staticvoid sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{ struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; struct sctp_ulpevent *event; struct sctp_stream *stream; struct sk_buff_head temp; struct sk_buff_head *lobby = &ulpq->lobby;
__u16 csid, cssn;
stream = &ulpq->asoc->stream;
/* We are holding the chunks by stream, by SSN. */
skb_queue_head_init(&temp);
event = NULL;
sctp_skb_for_each(pos, lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
/* Have we gone too far? */ if (csid > sid) break;
/* Have we not gone far enough? */ if (csid < sid) continue;
/* see if this ssn has been marked by skipping */ if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid))) break;
__skb_unlink(pos, lobby); if (!event) /* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
}
/* If we didn't reap any data, see if the next expected SSN * is next on the queue and if so, use that.
*/ if (event == NULL && pos != (struct sk_buff *)lobby) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
sctp_ssn_next(stream, in, csid);
__skb_unlink(pos, lobby);
__skb_queue_tail(&temp, pos);
event = sctp_skb2event(pos);
}
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for * very first SKB on the 'temp' list.
*/ if (event) { /* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, &temp);
}
}
/* Skip over an SSN. This is used during the processing of * Forwared TSN chunk to skip over the abandoned ordered data
*/ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{ struct sctp_stream *stream;
/* Note: The stream ID must be verified before this routine. */
stream = &ulpq->asoc->stream;
/* Is this an old SSN? If so ignore. */ if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid))) return;
/* Mark that we are no longer expecting this SSN or lower. */
sctp_ssn_skip(stream, in, sid, ssn);
/* Go find any other chunks that were waiting for * ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq, sid);
}
/* Don't renege below the Cumulative TSN ACK Point. */ if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) break;
/* Events in ordering queue may have multiple fragments * corresponding to additional TSNs. Sum the total * freed space; find the last TSN.
*/
freed += skb_headlen(skb);
flist = skb_shinfo(skb)->frag_list; for (last = flist; flist; flist = flist->next) {
last = flist;
freed += skb_headlen(last);
} if (last)
last_tsn = sctp_skb2event(last)->tsn; else
last_tsn = tsn;
/* Unlink the event, then renege all applicable TSNs. */
__skb_unlink(skb, list);
sctp_ulpevent_free(event); while (TSN_lte(tsn, last_tsn)) {
sctp_tsnmap_renege(tsnmap, tsn);
tsn++;
} if (freed >= needed) return freed;
}
/* Partial deliver the first message as there is pressure on rwnd. */ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
gfp_t gfp)
{ struct sctp_ulpevent *event; struct sctp_association *asoc; struct sctp_sock *sp;
__u32 ctsn; struct sk_buff *skb;
asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);
/* If the association is already in Partial Delivery mode * we have nothing to do.
*/ if (ulpq->pd_mode) return;
/* Data must be at or below the Cumulative TSN ACK Point to * start partial delivery.
*/
skb = skb_peek(&asoc->ulpq.reasm); if (skb != NULL) {
ctsn = sctp_skb2event(skb)->tsn; if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) return;
}
/* If the user enabled fragment interleave socket option, * multiple associations can enter partial delivery. * Otherwise, we can only enter partial delivery if the * socket is not in partial deliver mode.
*/ if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { /* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq); /* Send event to the ULP. */ if (event) { struct sk_buff_head temp;
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed); if (freed < needed)
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
} /* If able to free enough room, accept this chunk. */ if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
freed >= needed) { int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); /* * Enter partial delivery if chunk has not been * delivered; otherwise, drain the reassembly queue.
*/ if (retval <= 0)
sctp_ulpq_partial_delivery(ulpq, gfp); elseif (retval == 1)
sctp_ulpq_reasm_drain(ulpq);
}
}
/* Notify the application if an association is aborted and in * partial delivery mode. Send up any pending received messages.
*/ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{ struct sctp_ulpevent *ev = NULL; struct sctp_sock *sp; struct sock *sk;
if (!ulpq->pd_mode) return;
sk = ulpq->asoc->base.sk;
sp = sctp_sk(sk); if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
SCTP_PARTIAL_DELIVERY_EVENT))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
0, 0, 0, gfp); if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */ if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.