xref: /linux/net/sctp/ulpqueue.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /* SCTP kernel reference Implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001 Intel Corp.
6  * Copyright (c) 2001 Nokia, Inc.
7  * Copyright (c) 2001 La Monte H.P. Yarroll
8  *
9  * This abstraction carries sctp events to the ULP (sockets).
10  *
11  * The SCTP reference implementation is free software;
12  * you can redistribute it and/or modify it under the terms of
13  * the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * The SCTP reference implementation is distributed in the hope that it
18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19  *                 ************************
20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21  * See the GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with GNU CC; see the file COPYING.  If not, write to
25  * the Free Software Foundation, 59 Temple Place - Suite 330,
26  * Boston, MA 02111-1307, USA.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <lksctp-developers@lists.sourceforge.net>
31  *
32  * Or submit a bug report through the following website:
33  *    http://www.sf.net/projects/lksctp
34  *
35  * Written or modified by:
36  *    Jon Grimm             <jgrimm@us.ibm.com>
37  *    La Monte H.P. Yarroll <piggy@acm.org>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *
40  * Any bugs reported given to us we will try to fix... any fixes shared will
41  * be incorporated into the next SCTP release.
42  */
43 
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
50 
51 /* Forward declarations for internal helpers.  */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 					      struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 					      struct sctp_ulpevent *);
56 
57 /* 1st Level Abstractions */
58 
59 /* Initialize a ULP queue from a block of memory.  */
60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 				 struct sctp_association *asoc)
62 {
63 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
64 
65 	ulpq->asoc = asoc;
66 	skb_queue_head_init(&ulpq->reasm);
67 	skb_queue_head_init(&ulpq->lobby);
68 	ulpq->pd_mode  = 0;
69 	ulpq->malloced = 0;
70 
71 	return ulpq;
72 }
73 
74 
75 /* Flush the reassembly and ordering queues.  */
76 static void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
77 {
78 	struct sk_buff *skb;
79 	struct sctp_ulpevent *event;
80 
81 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 		event = sctp_skb2event(skb);
83 		sctp_ulpevent_free(event);
84 	}
85 
86 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 		event = sctp_skb2event(skb);
88 		sctp_ulpevent_free(event);
89 	}
90 
91 }
92 
93 /* Dispose of a ulpqueue.  */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95 {
96 	sctp_ulpq_flush(ulpq);
97 	if (ulpq->malloced)
98 		kfree(ulpq);
99 }
100 
101 /* Process an incoming DATA chunk.  */
102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
103 			gfp_t gfp)
104 {
105 	struct sk_buff_head temp;
106 	sctp_data_chunk_t *hdr;
107 	struct sctp_ulpevent *event;
108 
109 	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
110 
111 	/* Create an event from the incoming chunk. */
112 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 	if (!event)
114 		return -ENOMEM;
115 
116 	/* Do reassembly if needed.  */
117 	event = sctp_ulpq_reasm(ulpq, event);
118 
119 	/* Do ordering if needed.  */
120 	if ((event) && (event->msg_flags & MSG_EOR)){
121 		/* Create a temporary list to collect chunks on.  */
122 		skb_queue_head_init(&temp);
123 		__skb_queue_tail(&temp, sctp_event2skb(event));
124 
125 		event = sctp_ulpq_order(ulpq, event);
126 	}
127 
128 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
129 	 * very first SKB on the 'temp' list.
130 	 */
131 	if (event)
132 		sctp_ulpq_tail_event(ulpq, event);
133 
134 	return 0;
135 }
136 
137 /* Add a new event for propagation to the ULP.  */
138 /* Clear the partial delivery mode for this socket.   Note: This
139  * assumes that no association is currently in partial delivery mode.
140  */
141 int sctp_clear_pd(struct sock *sk)
142 {
143 	struct sctp_sock *sp = sctp_sk(sk);
144 
145 	sp->pd_mode = 0;
146 	if (!skb_queue_empty(&sp->pd_lobby)) {
147 		struct list_head *list;
148 		sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
149 		list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
150 		INIT_LIST_HEAD(list);
151 		return 1;
152 	}
153 	return 0;
154 }
155 
156 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
157 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158 {
159 	ulpq->pd_mode = 0;
160 	return sctp_clear_pd(ulpq->asoc->base.sk);
161 }
162 
163 /* If the SKB of 'event' is on a list, it is the first such member
164  * of that list.
165  */
166 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
167 {
168 	struct sock *sk = ulpq->asoc->base.sk;
169 	struct sk_buff_head *queue, *skb_list;
170 	struct sk_buff *skb = sctp_event2skb(event);
171 	int clear_pd = 0;
172 
173 	skb_list = (struct sk_buff_head *) skb->prev;
174 
175 	/* If the socket is just going to throw this away, do not
176 	 * even try to deliver it.
177 	 */
178 	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
179 		goto out_free;
180 
181 	/* Check if the user wishes to receive this event.  */
182 	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
183 		goto out_free;
184 
185 	/* If we are in partial delivery mode, post to the lobby until
186 	 * partial delivery is cleared, unless, of course _this_ is
187 	 * the association the cause of the partial delivery.
188 	 */
189 
190 	if (!sctp_sk(sk)->pd_mode) {
191 		queue = &sk->sk_receive_queue;
192 	} else if (ulpq->pd_mode) {
193 		if (event->msg_flags & MSG_NOTIFICATION)
194 		       	queue = &sctp_sk(sk)->pd_lobby;
195 		else {
196 			clear_pd = event->msg_flags & MSG_EOR;
197 			queue = &sk->sk_receive_queue;
198 		}
199 	} else
200 		queue = &sctp_sk(sk)->pd_lobby;
201 
202 
203 	/* If we are harvesting multiple skbs they will be
204 	 * collected on a list.
205 	 */
206 	if (skb_list)
207 		sctp_skb_list_tail(skb_list, queue);
208 	else
209 		__skb_queue_tail(queue, skb);
210 
211 	/* Did we just complete partial delivery and need to get
212 	 * rolling again?  Move pending data to the receive
213 	 * queue.
214 	 */
215 	if (clear_pd)
216 		sctp_ulpq_clear_pd(ulpq);
217 
218 	if (queue == &sk->sk_receive_queue)
219 		sk->sk_data_ready(sk, 0);
220 	return 1;
221 
222 out_free:
223 	if (skb_list)
224 		sctp_queue_purge_ulpevents(skb_list);
225 	else
226 		sctp_ulpevent_free(event);
227 
228 	return 0;
229 }
230 
231 /* 2nd Level Abstractions */
232 
233 /* Helper function to store chunks that need to be reassembled.  */
234 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
235 					 struct sctp_ulpevent *event)
236 {
237 	struct sk_buff *pos;
238 	struct sctp_ulpevent *cevent;
239 	__u32 tsn, ctsn;
240 
241 	tsn = event->tsn;
242 
243 	/* See if it belongs at the end. */
244 	pos = skb_peek_tail(&ulpq->reasm);
245 	if (!pos) {
246 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
247 		return;
248 	}
249 
250 	/* Short circuit just dropping it at the end. */
251 	cevent = sctp_skb2event(pos);
252 	ctsn = cevent->tsn;
253 	if (TSN_lt(ctsn, tsn)) {
254 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
255 		return;
256 	}
257 
258 	/* Find the right place in this list. We store them by TSN.  */
259 	skb_queue_walk(&ulpq->reasm, pos) {
260 		cevent = sctp_skb2event(pos);
261 		ctsn = cevent->tsn;
262 
263 		if (TSN_lt(tsn, ctsn))
264 			break;
265 	}
266 
267 	/* Insert before pos. */
268 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
269 
270 }
271 
272 /* Helper function to return an event corresponding to the reassembled
273  * datagram.
274  * This routine creates a re-assembled skb given the first and last skb's
275  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
276  * payload was fragmented on the way and ip had to reassemble them.
277  * We add the rest of skb's to the first skb's fraglist.
278  */
279 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
280 {
281 	struct sk_buff *pos;
282 	struct sk_buff *new = NULL;
283 	struct sctp_ulpevent *event;
284 	struct sk_buff *pnext, *last;
285 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
286 
287 	/* Store the pointer to the 2nd skb */
288 	if (f_frag == l_frag)
289 		pos = NULL;
290 	else
291 		pos = f_frag->next;
292 
293 	/* Get the last skb in the f_frag's frag_list if present. */
294 	for (last = list; list; last = list, list = list->next);
295 
296 	/* Add the list of remaining fragments to the first fragments
297 	 * frag_list.
298 	 */
299 	if (last)
300 		last->next = pos;
301  	else {
302  		if (skb_cloned(f_frag)) {
303  			/* This is a cloned skb, we can't just modify
304  			 * the frag_list.  We need a new skb to do that.
305  			 * Instead of calling skb_unshare(), we'll do it
306  			 * ourselves since we need to delay the free.
307  			 */
308  			new = skb_copy(f_frag, GFP_ATOMIC);
309  			if (!new)
310  				return NULL;	/* try again later */
311 
312  			new->sk = f_frag->sk;
313 
314  			skb_shinfo(new)->frag_list = pos;
315  		} else
316  			skb_shinfo(f_frag)->frag_list = pos;
317  	}
318 
319 	/* Remove the first fragment from the reassembly queue.  */
320 	__skb_unlink(f_frag, queue);
321 
322  	/* if we did unshare, then free the old skb and re-assign */
323  	if (new) {
324  		kfree_skb(f_frag);
325  		f_frag = new;
326  	}
327 
328 	while (pos) {
329 
330 		pnext = pos->next;
331 
332 		/* Update the len and data_len fields of the first fragment. */
333 		f_frag->len += pos->len;
334 		f_frag->data_len += pos->len;
335 
336 		/* Remove the fragment from the reassembly queue.  */
337 		__skb_unlink(pos, queue);
338 
339 		/* Break if we have reached the last fragment.  */
340 		if (pos == l_frag)
341 			break;
342 		pos->next = pnext;
343 		pos = pnext;
344 	};
345 
346 	event = sctp_skb2event(f_frag);
347 	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
348 
349 	return event;
350 }
351 
352 
353 /* Helper function to check if an incoming chunk has filled up the last
354  * missing fragment in a SCTP datagram and return the corresponding event.
355  */
356 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
357 {
358 	struct sk_buff *pos;
359 	struct sctp_ulpevent *cevent;
360 	struct sk_buff *first_frag = NULL;
361 	__u32 ctsn, next_tsn;
362 	struct sctp_ulpevent *retval = NULL;
363 
364 	/* Initialized to 0 just to avoid compiler warning message.  Will
365 	 * never be used with this value. It is referenced only after it
366 	 * is set when we find the first fragment of a message.
367 	 */
368 	next_tsn = 0;
369 
370 	/* The chunks are held in the reasm queue sorted by TSN.
371 	 * Walk through the queue sequentially and look for a sequence of
372 	 * fragmented chunks that complete a datagram.
373 	 * 'first_frag' and next_tsn are reset when we find a chunk which
374 	 * is the first fragment of a datagram. Once these 2 fields are set
375 	 * we expect to find the remaining middle fragments and the last
376 	 * fragment in order. If not, first_frag is reset to NULL and we
377 	 * start the next pass when we find another first fragment.
378 	 */
379 	skb_queue_walk(&ulpq->reasm, pos) {
380 		cevent = sctp_skb2event(pos);
381 		ctsn = cevent->tsn;
382 
383 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
384 		case SCTP_DATA_FIRST_FRAG:
385 			first_frag = pos;
386 			next_tsn = ctsn + 1;
387 			break;
388 
389 		case SCTP_DATA_MIDDLE_FRAG:
390 			if ((first_frag) && (ctsn == next_tsn))
391 				next_tsn++;
392 			else
393 				first_frag = NULL;
394 			break;
395 
396 		case SCTP_DATA_LAST_FRAG:
397 			if (first_frag && (ctsn == next_tsn))
398 				goto found;
399 			else
400 				first_frag = NULL;
401 			break;
402 		};
403 
404 	}
405 done:
406 	return retval;
407 found:
408 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
409 	if (retval)
410 		retval->msg_flags |= MSG_EOR;
411 	goto done;
412 }
413 
414 /* Retrieve the next set of fragments of a partial message. */
415 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
416 {
417 	struct sk_buff *pos, *last_frag, *first_frag;
418 	struct sctp_ulpevent *cevent;
419 	__u32 ctsn, next_tsn;
420 	int is_last;
421 	struct sctp_ulpevent *retval;
422 
423 	/* The chunks are held in the reasm queue sorted by TSN.
424 	 * Walk through the queue sequentially and look for the first
425 	 * sequence of fragmented chunks.
426 	 */
427 
428 	if (skb_queue_empty(&ulpq->reasm))
429 		return NULL;
430 
431 	last_frag = first_frag = NULL;
432 	retval = NULL;
433 	next_tsn = 0;
434 	is_last = 0;
435 
436 	skb_queue_walk(&ulpq->reasm, pos) {
437 		cevent = sctp_skb2event(pos);
438 		ctsn = cevent->tsn;
439 
440 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
441 		case SCTP_DATA_MIDDLE_FRAG:
442 			if (!first_frag) {
443 				first_frag = pos;
444 				next_tsn = ctsn + 1;
445 				last_frag = pos;
446 			} else if (next_tsn == ctsn)
447 				next_tsn++;
448 			else
449 				goto done;
450 			break;
451 		case SCTP_DATA_LAST_FRAG:
452 			if (!first_frag)
453 				first_frag = pos;
454 			else if (ctsn != next_tsn)
455 				goto done;
456 			last_frag = pos;
457 			is_last = 1;
458 			goto done;
459 		default:
460 			return NULL;
461 		};
462 	}
463 
464 	/* We have the reassembled event. There is no need to look
465 	 * further.
466 	 */
467 done:
468 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
469 	if (retval && is_last)
470 		retval->msg_flags |= MSG_EOR;
471 
472 	return retval;
473 }
474 
475 
476 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
477  * need reassembling.
478  */
479 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
480 						struct sctp_ulpevent *event)
481 {
482 	struct sctp_ulpevent *retval = NULL;
483 
484 	/* Check if this is part of a fragmented message.  */
485 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
486 		event->msg_flags |= MSG_EOR;
487 		return event;
488 	}
489 
490 	sctp_ulpq_store_reasm(ulpq, event);
491 	if (!ulpq->pd_mode)
492 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
493 	else {
494 		__u32 ctsn, ctsnap;
495 
496 		/* Do not even bother unless this is the next tsn to
497 		 * be delivered.
498 		 */
499 		ctsn = event->tsn;
500 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
501 		if (TSN_lte(ctsn, ctsnap))
502 			retval = sctp_ulpq_retrieve_partial(ulpq);
503 	}
504 
505 	return retval;
506 }
507 
508 /* Retrieve the first part (sequential fragments) for partial delivery.  */
509 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
510 {
511 	struct sk_buff *pos, *last_frag, *first_frag;
512 	struct sctp_ulpevent *cevent;
513 	__u32 ctsn, next_tsn;
514 	struct sctp_ulpevent *retval;
515 
516 	/* The chunks are held in the reasm queue sorted by TSN.
517 	 * Walk through the queue sequentially and look for a sequence of
518 	 * fragmented chunks that start a datagram.
519 	 */
520 
521 	if (skb_queue_empty(&ulpq->reasm))
522 		return NULL;
523 
524 	last_frag = first_frag = NULL;
525 	retval = NULL;
526 	next_tsn = 0;
527 
528 	skb_queue_walk(&ulpq->reasm, pos) {
529 		cevent = sctp_skb2event(pos);
530 		ctsn = cevent->tsn;
531 
532 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
533 		case SCTP_DATA_FIRST_FRAG:
534 			if (!first_frag) {
535 				first_frag = pos;
536 				next_tsn = ctsn + 1;
537 				last_frag = pos;
538 			} else
539 				goto done;
540 			break;
541 
542 		case SCTP_DATA_MIDDLE_FRAG:
543 			if (!first_frag)
544 				return NULL;
545 			if (ctsn == next_tsn) {
546 				next_tsn++;
547 				last_frag = pos;
548 			} else
549 				goto done;
550 			break;
551 		default:
552 			return NULL;
553 		};
554 	}
555 
556 	/* We have the reassembled event. There is no need to look
557 	 * further.
558 	 */
559 done:
560 	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
561 	return retval;
562 }
563 
564 /* Helper function to gather skbs that have possibly become
565  * ordered by an an incoming chunk.
566  */
567 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
568 					      struct sctp_ulpevent *event)
569 {
570 	struct sk_buff_head *event_list;
571 	struct sk_buff *pos, *tmp;
572 	struct sctp_ulpevent *cevent;
573 	struct sctp_stream *in;
574 	__u16 sid, csid;
575 	__u16 ssn, cssn;
576 
577 	sid = event->stream;
578 	ssn = event->ssn;
579 	in  = &ulpq->asoc->ssnmap->in;
580 
581 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
582 
583 	/* We are holding the chunks by stream, by SSN.  */
584 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
585 		cevent = (struct sctp_ulpevent *) pos->cb;
586 		csid = cevent->stream;
587 		cssn = cevent->ssn;
588 
589 		/* Have we gone too far?  */
590 		if (csid > sid)
591 			break;
592 
593 		/* Have we not gone far enough?  */
594 		if (csid < sid)
595 			continue;
596 
597 		if (cssn != sctp_ssn_peek(in, sid))
598 			break;
599 
600 		/* Found it, so mark in the ssnmap. */
601 		sctp_ssn_next(in, sid);
602 
603 		__skb_unlink(pos, &ulpq->lobby);
604 
605 		/* Attach all gathered skbs to the event.  */
606 		__skb_queue_tail(event_list, pos);
607 	}
608 }
609 
610 /* Helper function to store chunks needing ordering.  */
611 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
612 					   struct sctp_ulpevent *event)
613 {
614 	struct sk_buff *pos;
615 	struct sctp_ulpevent *cevent;
616 	__u16 sid, csid;
617 	__u16 ssn, cssn;
618 
619 	pos = skb_peek_tail(&ulpq->lobby);
620 	if (!pos) {
621 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
622 		return;
623 	}
624 
625 	sid = event->stream;
626 	ssn = event->ssn;
627 
628 	cevent = (struct sctp_ulpevent *) pos->cb;
629 	csid = cevent->stream;
630 	cssn = cevent->ssn;
631 	if (sid > csid) {
632 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
633 		return;
634 	}
635 
636 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
637 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
638 		return;
639 	}
640 
641 	/* Find the right place in this list.  We store them by
642 	 * stream ID and then by SSN.
643 	 */
644 	skb_queue_walk(&ulpq->lobby, pos) {
645 		cevent = (struct sctp_ulpevent *) pos->cb;
646 		csid = cevent->stream;
647 		cssn = cevent->ssn;
648 
649 		if (csid > sid)
650 			break;
651 		if (csid == sid && SSN_lt(ssn, cssn))
652 			break;
653 	}
654 
655 
656 	/* Insert before pos. */
657 	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
658 
659 }
660 
661 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
662 					     struct sctp_ulpevent *event)
663 {
664 	__u16 sid, ssn;
665 	struct sctp_stream *in;
666 
667 	/* Check if this message needs ordering.  */
668 	if (SCTP_DATA_UNORDERED & event->msg_flags)
669 		return event;
670 
671 	/* Note: The stream ID must be verified before this routine.  */
672 	sid = event->stream;
673 	ssn = event->ssn;
674 	in  = &ulpq->asoc->ssnmap->in;
675 
676 	/* Is this the expected SSN for this stream ID?  */
677 	if (ssn != sctp_ssn_peek(in, sid)) {
678 		/* We've received something out of order, so find where it
679 		 * needs to be placed.  We order by stream and then by SSN.
680 		 */
681 		sctp_ulpq_store_ordered(ulpq, event);
682 		return NULL;
683 	}
684 
685 	/* Mark that the next chunk has been found.  */
686 	sctp_ssn_next(in, sid);
687 
688 	/* Go find any other chunks that were waiting for
689 	 * ordering.
690 	 */
691 	sctp_ulpq_retrieve_ordered(ulpq, event);
692 
693 	return event;
694 }
695 
696 /* Helper function to gather skbs that have possibly become
697  * ordered by forward tsn skipping their dependencies.
698  */
699 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
700 {
701 	struct sk_buff *pos, *tmp;
702 	struct sctp_ulpevent *cevent;
703 	struct sctp_ulpevent *event;
704 	struct sctp_stream *in;
705 	struct sk_buff_head temp;
706 	__u16 csid, cssn;
707 
708 	in  = &ulpq->asoc->ssnmap->in;
709 
710 	/* We are holding the chunks by stream, by SSN.  */
711 	skb_queue_head_init(&temp);
712 	event = NULL;
713 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
714 		cevent = (struct sctp_ulpevent *) pos->cb;
715 		csid = cevent->stream;
716 		cssn = cevent->ssn;
717 
718 		if (cssn != sctp_ssn_peek(in, csid))
719 			break;
720 
721 		/* Found it, so mark in the ssnmap. */
722 		sctp_ssn_next(in, csid);
723 
724 		__skb_unlink(pos, &ulpq->lobby);
725 		if (!event) {
726 			/* Create a temporary list to collect chunks on.  */
727 			event = sctp_skb2event(pos);
728 			__skb_queue_tail(&temp, sctp_event2skb(event));
729 		} else {
730 			/* Attach all gathered skbs to the event.  */
731 			__skb_queue_tail(&temp, pos);
732 		}
733 	}
734 
735 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
736 	 * very first SKB on the 'temp' list.
737 	 */
738 	if (event)
739 		sctp_ulpq_tail_event(ulpq, event);
740 }
741 
742 /* Skip over an SSN. */
743 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
744 {
745 	struct sctp_stream *in;
746 
747 	/* Note: The stream ID must be verified before this routine.  */
748 	in  = &ulpq->asoc->ssnmap->in;
749 
750 	/* Is this an old SSN?  If so ignore. */
751 	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
752 		return;
753 
754 	/* Mark that we are no longer expecting this SSN or lower. */
755 	sctp_ssn_skip(in, sid, ssn);
756 
757 	/* Go find any other chunks that were waiting for
758 	 * ordering and deliver them if needed.
759 	 */
760 	sctp_ulpq_reap_ordered(ulpq);
761 	return;
762 }
763 
764 /* Renege 'needed' bytes from the ordering queue. */
765 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
766 {
767 	__u16 freed = 0;
768 	__u32 tsn;
769 	struct sk_buff *skb;
770 	struct sctp_ulpevent *event;
771 	struct sctp_tsnmap *tsnmap;
772 
773 	tsnmap = &ulpq->asoc->peer.tsn_map;
774 
775 	while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
776 		freed += skb_headlen(skb);
777 		event = sctp_skb2event(skb);
778 		tsn = event->tsn;
779 
780 		sctp_ulpevent_free(event);
781 		sctp_tsnmap_renege(tsnmap, tsn);
782 		if (freed >= needed)
783 			return freed;
784 	}
785 
786 	return freed;
787 }
788 
789 /* Renege 'needed' bytes from the reassembly queue. */
790 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
791 {
792 	__u16 freed = 0;
793 	__u32 tsn;
794 	struct sk_buff *skb;
795 	struct sctp_ulpevent *event;
796 	struct sctp_tsnmap *tsnmap;
797 
798 	tsnmap = &ulpq->asoc->peer.tsn_map;
799 
800 	/* Walk backwards through the list, reneges the newest tsns. */
801 	while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
802 		freed += skb_headlen(skb);
803 		event = sctp_skb2event(skb);
804 		tsn = event->tsn;
805 
806 		sctp_ulpevent_free(event);
807 		sctp_tsnmap_renege(tsnmap, tsn);
808 		if (freed >= needed)
809 			return freed;
810 	}
811 
812 	return freed;
813 }
814 
815 /* Partial deliver the first message as there is pressure on rwnd. */
816 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
817 				struct sctp_chunk *chunk,
818 				gfp_t gfp)
819 {
820 	struct sctp_ulpevent *event;
821 	struct sctp_association *asoc;
822 
823 	asoc = ulpq->asoc;
824 
825 	/* Are we already in partial delivery mode?  */
826 	if (!sctp_sk(asoc->base.sk)->pd_mode) {
827 
828 		/* Is partial delivery possible?  */
829 		event = sctp_ulpq_retrieve_first(ulpq);
830 		/* Send event to the ULP.   */
831 		if (event) {
832 			sctp_ulpq_tail_event(ulpq, event);
833 			sctp_sk(asoc->base.sk)->pd_mode = 1;
834 			ulpq->pd_mode = 1;
835 			return;
836 		}
837 	}
838 }
839 
840 /* Renege some packets to make room for an incoming chunk.  */
841 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
842 		      gfp_t gfp)
843 {
844 	struct sctp_association *asoc;
845 	__u16 needed, freed;
846 
847 	asoc = ulpq->asoc;
848 
849 	if (chunk) {
850 		needed = ntohs(chunk->chunk_hdr->length);
851 		needed -= sizeof(sctp_data_chunk_t);
852 	} else
853 		needed = SCTP_DEFAULT_MAXWINDOW;
854 
855 	freed = 0;
856 
857 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
858 		freed = sctp_ulpq_renege_order(ulpq, needed);
859 		if (freed < needed) {
860 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
861 		}
862 	}
863 	/* If able to free enough room, accept this chunk. */
864 	if (chunk && (freed >= needed)) {
865 		__u32 tsn;
866 		tsn = ntohl(chunk->subh.data_hdr->tsn);
867 		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
868 		sctp_ulpq_tail_data(ulpq, chunk, gfp);
869 
870 		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
871 	}
872 
873 	return;
874 }
875 
876 
877 
878 /* Notify the application if an association is aborted and in
879  * partial delivery mode.  Send up any pending received messages.
880  */
881 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
882 {
883 	struct sctp_ulpevent *ev = NULL;
884 	struct sock *sk;
885 
886 	if (!ulpq->pd_mode)
887 		return;
888 
889 	sk = ulpq->asoc->base.sk;
890 	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
891 				       &sctp_sk(sk)->subscribe))
892 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
893 					      SCTP_PARTIAL_DELIVERY_ABORTED,
894 					      gfp);
895 	if (ev)
896 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
897 
898 	/* If there is data waiting, send it up the socket now. */
899 	if (sctp_ulpq_clear_pd(ulpq) || ev)
900 		sk->sk_data_ready(sk, 0);
901 }
902