1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
37 #include <linux/slab.h>
38 #include <linux/types.h>
39 #include <linux/skbuff.h>
41 #include <net/sctp/structs.h>
42 #include <net/sctp/sctp.h>
43 #include <net/sctp/sm.h>
45 /* Forward declarations for internal helpers. */
46 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
47 struct sctp_ulpevent *);
48 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
49 struct sctp_ulpevent *);
50 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52 /* 1st Level Abstractions */
54 /* Initialize a ULP queue from a block of memory. */
55 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
56 struct sctp_association *asoc)
58 memset(ulpq, 0, sizeof(struct sctp_ulpq));
61 skb_queue_head_init(&ulpq->reasm);
62 skb_queue_head_init(&ulpq->lobby);
69 /* Flush the reassembly and ordering queues. */
70 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
73 struct sctp_ulpevent *event;
75 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
76 event = sctp_skb2event(skb);
77 sctp_ulpevent_free(event);
80 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
81 event = sctp_skb2event(skb);
82 sctp_ulpevent_free(event);
87 /* Dispose of a ulpqueue. */
88 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
90 sctp_ulpq_flush(ulpq);
93 /* Process an incoming DATA chunk. */
94 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
97 struct sk_buff_head temp;
98 struct sctp_ulpevent *event;
101 /* Create an event from the incoming chunk. */
102 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
106 /* Do reassembly if needed. */
107 event = sctp_ulpq_reasm(ulpq, event);
109 /* Do ordering if needed. */
110 if ((event) && (event->msg_flags & MSG_EOR)){
111 /* Create a temporary list to collect chunks on. */
112 skb_queue_head_init(&temp);
113 __skb_queue_tail(&temp, sctp_event2skb(event));
115 event = sctp_ulpq_order(ulpq, event);
118 /* Send event to the ULP. 'event' is the sctp_ulpevent for
119 * very first SKB on the 'temp' list.
122 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
123 sctp_ulpq_tail_event(ulpq, event);
129 /* Add a new event for propagation to the ULP. */
130 /* Clear the partial delivery mode for this socket. Note: This
131 * assumes that no association is currently in partial delivery mode.
133 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
135 struct sctp_sock *sp = sctp_sk(sk);
137 if (atomic_dec_and_test(&sp->pd_mode)) {
138 /* This means there are no other associations in PD, so
139 * we can go ahead and clear out the lobby in one shot
141 if (!skb_queue_empty(&sp->pd_lobby)) {
142 struct list_head *list;
143 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
144 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
145 INIT_LIST_HEAD(list);
149 /* There are other associations in PD, so we only need to
150 * pull stuff out of the lobby that belongs to the
151 * associations that is exiting PD (all of its notifications
154 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
155 struct sk_buff *skb, *tmp;
156 struct sctp_ulpevent *event;
158 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
159 event = sctp_skb2event(skb);
160 if (event->asoc == asoc) {
161 __skb_unlink(skb, &sp->pd_lobby);
162 __skb_queue_tail(&sk->sk_receive_queue,
172 /* Set the pd_mode on the socket and ulpq */
173 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
175 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
177 atomic_inc(&sp->pd_mode);
181 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
182 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
185 sctp_ulpq_reasm_drain(ulpq);
186 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
189 /* If the SKB of 'event' is on a list, it is the first such member
192 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
194 struct sock *sk = ulpq->asoc->base.sk;
195 struct sk_buff_head *queue, *skb_list;
196 struct sk_buff *skb = sctp_event2skb(event);
199 skb_list = (struct sk_buff_head *) skb->prev;
201 /* If the socket is just going to throw this away, do not
202 * even try to deliver it.
204 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
207 /* Check if the user wishes to receive this event. */
208 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
211 /* If we are in partial delivery mode, post to the lobby until
212 * partial delivery is cleared, unless, of course _this_ is
213 * the association the cause of the partial delivery.
216 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
217 queue = &sk->sk_receive_queue;
220 /* If the association is in partial delivery, we
221 * need to finish delivering the partially processed
222 * packet before passing any other data. This is
223 * because we don't truly support stream interleaving.
225 if ((event->msg_flags & MSG_NOTIFICATION) ||
226 (SCTP_DATA_NOT_FRAG ==
227 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
228 queue = &sctp_sk(sk)->pd_lobby;
230 clear_pd = event->msg_flags & MSG_EOR;
231 queue = &sk->sk_receive_queue;
235 * If fragment interleave is enabled, we
236 * can queue this to the receive queue instead
239 if (sctp_sk(sk)->frag_interleave)
240 queue = &sk->sk_receive_queue;
242 queue = &sctp_sk(sk)->pd_lobby;
246 /* If we are harvesting multiple skbs they will be
247 * collected on a list.
250 sctp_skb_list_tail(skb_list, queue);
252 __skb_queue_tail(queue, skb);
254 /* Did we just complete partial delivery and need to get
255 * rolling again? Move pending data to the receive
259 sctp_ulpq_clear_pd(ulpq);
261 if (queue == &sk->sk_receive_queue)
262 sk->sk_data_ready(sk, 0);
267 sctp_queue_purge_ulpevents(skb_list);
269 sctp_ulpevent_free(event);
274 /* 2nd Level Abstractions */
276 /* Helper function to store chunks that need to be reassembled. */
277 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
278 struct sctp_ulpevent *event)
281 struct sctp_ulpevent *cevent;
286 /* See if it belongs at the end. */
287 pos = skb_peek_tail(&ulpq->reasm);
289 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
293 /* Short circuit just dropping it at the end. */
294 cevent = sctp_skb2event(pos);
296 if (TSN_lt(ctsn, tsn)) {
297 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
301 /* Find the right place in this list. We store them by TSN. */
302 skb_queue_walk(&ulpq->reasm, pos) {
303 cevent = sctp_skb2event(pos);
306 if (TSN_lt(tsn, ctsn))
310 /* Insert before pos. */
311 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
315 /* Helper function to return an event corresponding to the reassembled
317 * This routine creates a re-assembled skb given the first and last skb's
318 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
319 * payload was fragmented on the way and ip had to reassemble them.
320 * We add the rest of skb's to the first skb's fraglist.
322 static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
323 struct sk_buff_head *queue, struct sk_buff *f_frag,
324 struct sk_buff *l_frag)
327 struct sk_buff *new = NULL;
328 struct sctp_ulpevent *event;
329 struct sk_buff *pnext, *last;
330 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
332 /* Store the pointer to the 2nd skb */
333 if (f_frag == l_frag)
338 /* Get the last skb in the f_frag's frag_list if present. */
339 for (last = list; list; last = list, list = list->next);
341 /* Add the list of remaining fragments to the first fragments
347 if (skb_cloned(f_frag)) {
348 /* This is a cloned skb, we can't just modify
349 * the frag_list. We need a new skb to do that.
350 * Instead of calling skb_unshare(), we'll do it
351 * ourselves since we need to delay the free.
353 new = skb_copy(f_frag, GFP_ATOMIC);
355 return NULL; /* try again later */
357 sctp_skb_set_owner_r(new, f_frag->sk);
359 skb_shinfo(new)->frag_list = pos;
361 skb_shinfo(f_frag)->frag_list = pos;
364 /* Remove the first fragment from the reassembly queue. */
365 __skb_unlink(f_frag, queue);
367 /* if we did unshare, then free the old skb and re-assign */
377 /* Update the len and data_len fields of the first fragment. */
378 f_frag->len += pos->len;
379 f_frag->data_len += pos->len;
381 /* Remove the fragment from the reassembly queue. */
382 __skb_unlink(pos, queue);
384 /* Break if we have reached the last fragment. */
391 event = sctp_skb2event(f_frag);
392 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
398 /* Helper function to check if an incoming chunk has filled up the last
399 * missing fragment in a SCTP datagram and return the corresponding event.
401 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
404 struct sctp_ulpevent *cevent;
405 struct sk_buff *first_frag = NULL;
406 __u32 ctsn, next_tsn;
407 struct sctp_ulpevent *retval = NULL;
408 struct sk_buff *pd_first = NULL;
409 struct sk_buff *pd_last = NULL;
411 struct sctp_association *asoc;
414 /* Initialized to 0 just to avoid compiler warning message. Will
415 * never be used with this value. It is referenced only after it
416 * is set when we find the first fragment of a message.
420 /* The chunks are held in the reasm queue sorted by TSN.
421 * Walk through the queue sequentially and look for a sequence of
422 * fragmented chunks that complete a datagram.
423 * 'first_frag' and next_tsn are reset when we find a chunk which
424 * is the first fragment of a datagram. Once these 2 fields are set
425 * we expect to find the remaining middle fragments and the last
426 * fragment in order. If not, first_frag is reset to NULL and we
427 * start the next pass when we find another first fragment.
429 * There is a potential to do partial delivery if user sets
430 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
431 * to see if can do PD.
433 skb_queue_walk(&ulpq->reasm, pos) {
434 cevent = sctp_skb2event(pos);
437 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
438 case SCTP_DATA_FIRST_FRAG:
439 /* If this "FIRST_FRAG" is the first
440 * element in the queue, then count it towards
443 if (pos == ulpq->reasm.next) {
457 case SCTP_DATA_MIDDLE_FRAG:
458 if ((first_frag) && (ctsn == next_tsn)) {
468 case SCTP_DATA_LAST_FRAG:
469 if (first_frag && (ctsn == next_tsn))
479 /* Make sure we can enter partial deliver.
480 * We can trigger partial delivery only if framgent
481 * interleave is set, or the socket is not already
482 * in partial delivery.
484 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
485 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
488 cevent = sctp_skb2event(pd_first);
489 pd_point = sctp_sk(asoc->base.sk)->pd_point;
490 if (pd_point && pd_point <= pd_len) {
491 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
496 sctp_ulpq_set_pd(ulpq);
502 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
503 &ulpq->reasm, first_frag, pos);
505 retval->msg_flags |= MSG_EOR;
509 /* Retrieve the next set of fragments of a partial message. */
510 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
512 struct sk_buff *pos, *last_frag, *first_frag;
513 struct sctp_ulpevent *cevent;
514 __u32 ctsn, next_tsn;
516 struct sctp_ulpevent *retval;
518 /* The chunks are held in the reasm queue sorted by TSN.
519 * Walk through the queue sequentially and look for the first
520 * sequence of fragmented chunks.
523 if (skb_queue_empty(&ulpq->reasm))
526 last_frag = first_frag = NULL;
531 skb_queue_walk(&ulpq->reasm, pos) {
532 cevent = sctp_skb2event(pos);
535 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
536 case SCTP_DATA_FIRST_FRAG:
540 case SCTP_DATA_MIDDLE_FRAG:
545 } else if (next_tsn == ctsn) {
551 case SCTP_DATA_LAST_FRAG:
554 else if (ctsn != next_tsn)
564 /* We have the reassembled event. There is no need to look
568 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
569 &ulpq->reasm, first_frag, last_frag);
570 if (retval && is_last)
571 retval->msg_flags |= MSG_EOR;
577 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
580 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
581 struct sctp_ulpevent *event)
583 struct sctp_ulpevent *retval = NULL;
585 /* Check if this is part of a fragmented message. */
586 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
587 event->msg_flags |= MSG_EOR;
591 sctp_ulpq_store_reasm(ulpq, event);
593 retval = sctp_ulpq_retrieve_reassembled(ulpq);
597 /* Do not even bother unless this is the next tsn to
601 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
602 if (TSN_lte(ctsn, ctsnap))
603 retval = sctp_ulpq_retrieve_partial(ulpq);
609 /* Retrieve the first part (sequential fragments) for partial delivery. */
610 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
612 struct sk_buff *pos, *last_frag, *first_frag;
613 struct sctp_ulpevent *cevent;
614 __u32 ctsn, next_tsn;
615 struct sctp_ulpevent *retval;
617 /* The chunks are held in the reasm queue sorted by TSN.
618 * Walk through the queue sequentially and look for a sequence of
619 * fragmented chunks that start a datagram.
622 if (skb_queue_empty(&ulpq->reasm))
625 last_frag = first_frag = NULL;
629 skb_queue_walk(&ulpq->reasm, pos) {
630 cevent = sctp_skb2event(pos);
633 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
634 case SCTP_DATA_FIRST_FRAG:
643 case SCTP_DATA_MIDDLE_FRAG:
646 if (ctsn == next_tsn) {
653 case SCTP_DATA_LAST_FRAG:
665 /* We have the reassembled event. There is no need to look
669 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
670 &ulpq->reasm, first_frag, last_frag);
675 * Flush out stale fragments from the reassembly queue when processing
678 * RFC 3758, Section 3.6
680 * After receiving and processing a FORWARD TSN, the data receiver MUST
681 * take cautions in updating its re-assembly queue. The receiver MUST
682 * remove any partially reassembled message, which is still missing one
683 * or more TSNs earlier than or equal to the new cumulative TSN point.
684 * In the event that the receiver has invoked the partial delivery API,
685 * a notification SHOULD also be generated to inform the upper layer API
686 * that the message being partially delivered will NOT be completed.
688 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
690 struct sk_buff *pos, *tmp;
691 struct sctp_ulpevent *event;
694 if (skb_queue_empty(&ulpq->reasm))
697 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
698 event = sctp_skb2event(pos);
701 /* Since the entire message must be abandoned by the
702 * sender (item A3 in Section 3.5, RFC 3758), we can
703 * free all fragments on the list that are less then
704 * or equal to ctsn_point
706 if (TSN_lte(tsn, fwd_tsn)) {
707 __skb_unlink(pos, &ulpq->reasm);
708 sctp_ulpevent_free(event);
715 * Drain the reassembly queue. If we just cleared parted delivery, it
716 * is possible that the reassembly queue will contain already reassembled
717 * messages. Retrieve any such messages and give them to the user.
719 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
721 struct sctp_ulpevent *event = NULL;
722 struct sk_buff_head temp;
724 if (skb_queue_empty(&ulpq->reasm))
727 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
728 /* Do ordering if needed. */
729 if ((event) && (event->msg_flags & MSG_EOR)){
730 skb_queue_head_init(&temp);
731 __skb_queue_tail(&temp, sctp_event2skb(event));
733 event = sctp_ulpq_order(ulpq, event);
736 /* Send event to the ULP. 'event' is the
737 * sctp_ulpevent for very first SKB on the temp' list.
740 sctp_ulpq_tail_event(ulpq, event);
745 /* Helper function to gather skbs that have possibly become
746 * ordered by an an incoming chunk.
748 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
749 struct sctp_ulpevent *event)
751 struct sk_buff_head *event_list;
752 struct sk_buff *pos, *tmp;
753 struct sctp_ulpevent *cevent;
754 struct sctp_stream *in;
755 __u16 sid, csid, cssn;
758 in = &ulpq->asoc->ssnmap->in;
760 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
762 /* We are holding the chunks by stream, by SSN. */
763 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
764 cevent = (struct sctp_ulpevent *) pos->cb;
765 csid = cevent->stream;
768 /* Have we gone too far? */
772 /* Have we not gone far enough? */
776 if (cssn != sctp_ssn_peek(in, sid))
779 /* Found it, so mark in the ssnmap. */
780 sctp_ssn_next(in, sid);
782 __skb_unlink(pos, &ulpq->lobby);
784 /* Attach all gathered skbs to the event. */
785 __skb_queue_tail(event_list, pos);
789 /* Helper function to store chunks needing ordering. */
790 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
791 struct sctp_ulpevent *event)
794 struct sctp_ulpevent *cevent;
798 pos = skb_peek_tail(&ulpq->lobby);
800 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
807 cevent = (struct sctp_ulpevent *) pos->cb;
808 csid = cevent->stream;
811 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
815 if ((sid == csid) && SSN_lt(cssn, ssn)) {
816 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
820 /* Find the right place in this list. We store them by
821 * stream ID and then by SSN.
823 skb_queue_walk(&ulpq->lobby, pos) {
824 cevent = (struct sctp_ulpevent *) pos->cb;
825 csid = cevent->stream;
830 if (csid == sid && SSN_lt(ssn, cssn))
835 /* Insert before pos. */
836 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
839 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
840 struct sctp_ulpevent *event)
843 struct sctp_stream *in;
845 /* Check if this message needs ordering. */
846 if (SCTP_DATA_UNORDERED & event->msg_flags)
849 /* Note: The stream ID must be verified before this routine. */
852 in = &ulpq->asoc->ssnmap->in;
854 /* Is this the expected SSN for this stream ID? */
855 if (ssn != sctp_ssn_peek(in, sid)) {
856 /* We've received something out of order, so find where it
857 * needs to be placed. We order by stream and then by SSN.
859 sctp_ulpq_store_ordered(ulpq, event);
863 /* Mark that the next chunk has been found. */
864 sctp_ssn_next(in, sid);
866 /* Go find any other chunks that were waiting for
869 sctp_ulpq_retrieve_ordered(ulpq, event);
874 /* Helper function to gather skbs that have possibly become
875 * ordered by forward tsn skipping their dependencies.
877 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
879 struct sk_buff *pos, *tmp;
880 struct sctp_ulpevent *cevent;
881 struct sctp_ulpevent *event;
882 struct sctp_stream *in;
883 struct sk_buff_head temp;
884 struct sk_buff_head *lobby = &ulpq->lobby;
887 in = &ulpq->asoc->ssnmap->in;
889 /* We are holding the chunks by stream, by SSN. */
890 skb_queue_head_init(&temp);
892 sctp_skb_for_each(pos, lobby, tmp) {
893 cevent = (struct sctp_ulpevent *) pos->cb;
894 csid = cevent->stream;
897 /* Have we gone too far? */
901 /* Have we not gone far enough? */
905 /* see if this ssn has been marked by skipping */
906 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
909 __skb_unlink(pos, lobby);
911 /* Create a temporary list to collect chunks on. */
912 event = sctp_skb2event(pos);
914 /* Attach all gathered skbs to the event. */
915 __skb_queue_tail(&temp, pos);
918 /* If we didn't reap any data, see if the next expected SSN
919 * is next on the queue and if so, use that.
921 if (event == NULL && pos != (struct sk_buff *)lobby) {
922 cevent = (struct sctp_ulpevent *) pos->cb;
923 csid = cevent->stream;
926 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
927 sctp_ssn_next(in, csid);
928 __skb_unlink(pos, lobby);
929 __skb_queue_tail(&temp, pos);
930 event = sctp_skb2event(pos);
934 /* Send event to the ULP. 'event' is the sctp_ulpevent for
935 * very first SKB on the 'temp' list.
938 /* see if we have more ordered that we can deliver */
939 sctp_ulpq_retrieve_ordered(ulpq, event);
940 sctp_ulpq_tail_event(ulpq, event);
944 /* Skip over an SSN. This is used during the processing of
945 * Forwared TSN chunk to skip over the abandoned ordered data
947 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
949 struct sctp_stream *in;
951 /* Note: The stream ID must be verified before this routine. */
952 in = &ulpq->asoc->ssnmap->in;
954 /* Is this an old SSN? If so ignore. */
955 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
958 /* Mark that we are no longer expecting this SSN or lower. */
959 sctp_ssn_skip(in, sid, ssn);
961 /* Go find any other chunks that were waiting for
962 * ordering and deliver them if needed.
964 sctp_ulpq_reap_ordered(ulpq, sid);
967 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
968 struct sk_buff_head *list, __u16 needed)
972 struct sk_buff *skb, *flist, *last;
973 struct sctp_ulpevent *event;
974 struct sctp_tsnmap *tsnmap;
976 tsnmap = &ulpq->asoc->peer.tsn_map;
978 while ((skb = skb_peek_tail(list)) != NULL) {
979 event = sctp_skb2event(skb);
982 /* Don't renege below the Cumulative TSN ACK Point. */
983 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
986 /* Events in ordering queue may have multiple fragments
987 * corresponding to additional TSNs. Sum the total
988 * freed space; find the last TSN.
990 freed += skb_headlen(skb);
991 flist = skb_shinfo(skb)->frag_list;
992 for (last = flist; flist; flist = flist->next) {
994 freed += skb_headlen(last);
997 last_tsn = sctp_skb2event(last)->tsn;
1001 /* Unlink the event, then renege all applicable TSNs. */
1002 __skb_unlink(skb, list);
1003 sctp_ulpevent_free(event);
1004 while (TSN_lte(tsn, last_tsn)) {
1005 sctp_tsnmap_renege(tsnmap, tsn);
1008 if (freed >= needed)
1015 /* Renege 'needed' bytes from the ordering queue. */
1016 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1018 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1021 /* Renege 'needed' bytes from the reassembly queue. */
1022 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1024 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1027 /* Partial deliver the first message as there is pressure on rwnd. */
1028 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1031 struct sctp_ulpevent *event;
1032 struct sctp_association *asoc;
1033 struct sctp_sock *sp;
1035 struct sk_buff *skb;
1038 sp = sctp_sk(asoc->base.sk);
1040 /* If the association is already in Partial Delivery mode
1041 * we have nothing to do.
1046 /* Data must be at or below the Cumulative TSN ACK Point to
1047 * start partial delivery.
1049 skb = skb_peek(&asoc->ulpq.reasm);
1051 ctsn = sctp_skb2event(skb)->tsn;
1052 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1056 /* If the user enabled fragment interleave socket option,
1057 * multiple associations can enter partial delivery.
1058 * Otherwise, we can only enter partial delivery if the
1059 * socket is not in partial deliver mode.
1061 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1062 /* Is partial delivery possible? */
1063 event = sctp_ulpq_retrieve_first(ulpq);
1064 /* Send event to the ULP. */
1066 sctp_ulpq_tail_event(ulpq, event);
1067 sctp_ulpq_set_pd(ulpq);
1073 /* Renege some packets to make room for an incoming chunk. */
1074 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1077 struct sctp_association *asoc;
1078 __u16 needed, freed;
1083 needed = ntohs(chunk->chunk_hdr->length);
1084 needed -= sizeof(sctp_data_chunk_t);
1086 needed = SCTP_DEFAULT_MAXWINDOW;
1090 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1091 freed = sctp_ulpq_renege_order(ulpq, needed);
1092 if (freed < needed) {
1093 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1096 /* If able to free enough room, accept this chunk. */
1097 if (chunk && (freed >= needed)) {
1099 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1101 * Enter partial delivery if chunk has not been
1102 * delivered; otherwise, drain the reassembly queue.
1105 sctp_ulpq_partial_delivery(ulpq, gfp);
1106 else if (retval == 1)
1107 sctp_ulpq_reasm_drain(ulpq);
1110 sk_mem_reclaim(asoc->base.sk);
1115 /* Notify the application if an association is aborted and in
1116 * partial delivery mode. Send up any pending received messages.
1118 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1120 struct sctp_ulpevent *ev = NULL;
1126 sk = ulpq->asoc->base.sk;
1127 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1128 &sctp_sk(sk)->subscribe))
1129 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1130 SCTP_PARTIAL_DELIVERY_ABORTED,
1133 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1135 /* If there is data waiting, send it up the socket now. */
1136 if (sctp_ulpq_clear_pd(ulpq) || ev)
1137 sk->sk_data_ready(sk, 0);