xref: /linux/net/sctp/outqueue.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /* SCTP kernel implementation
2  * (C) Copyright IBM Corp. 2001, 2004
3  * Copyright (c) 1999-2000 Cisco, Inc.
4  * Copyright (c) 1999-2001 Motorola, Inc.
5  * Copyright (c) 2001-2003 Intel Corp.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions implement the sctp_outq class.   The outqueue handles
10  * bundling and queueing of outgoing SCTP chunks.
11  *
12  * This SCTP implementation is free software;
13  * you can redistribute it and/or modify it under the terms of
14  * the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This SCTP implementation is distributed in the hope that it
19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20  *                 ************************
21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22  * See the GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with GNU CC; see the file COPYING.  If not, see
26  * <http://www.gnu.org/licenses/>.
27  *
28  * Please send any bug reports or fixes you make to the
29  * email address(es):
30  *    lksctp developers <linux-sctp@vger.kernel.org>
31  *
32  * Written or modified by:
33  *    La Monte H.P. Yarroll <piggy@acm.org>
34  *    Karl Knutson          <karl@athena.chicago.il.us>
35  *    Perry Melange         <pmelange@null.cc.uic.edu>
36  *    Xingang Guo           <xingang.guo@intel.com>
37  *    Hui Huang 	    <hui.huang@nokia.com>
38  *    Sridhar Samudrala     <sri@us.ibm.com>
39  *    Jon Grimm             <jgrimm@us.ibm.com>
40  */
41 
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 
44 #include <linux/types.h>
45 #include <linux/list.h>   /* For struct list_head */
46 #include <linux/socket.h>
47 #include <linux/ip.h>
48 #include <linux/slab.h>
49 #include <net/sock.h>	  /* For skb_set_owner_w */
50 
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53 
54 /* Declare internal functions here.  */
55 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
56 static void sctp_check_transmitted(struct sctp_outq *q,
57 				   struct list_head *transmitted_queue,
58 				   struct sctp_transport *transport,
59 				   union sctp_addr *saddr,
60 				   struct sctp_sackhdr *sack,
61 				   __u32 *highest_new_tsn);
62 
63 static void sctp_mark_missing(struct sctp_outq *q,
64 			      struct list_head *transmitted_queue,
65 			      struct sctp_transport *transport,
66 			      __u32 highest_new_tsn,
67 			      int count_of_newacks);
68 
69 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
70 
71 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
72 
73 /* Add data to the front of the queue. */
74 static inline void sctp_outq_head_data(struct sctp_outq *q,
75 					struct sctp_chunk *ch)
76 {
77 	list_add(&ch->list, &q->out_chunk_list);
78 	q->out_qlen += ch->skb->len;
79 }
80 
81 /* Take data from the front of the queue. */
82 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
83 {
84 	struct sctp_chunk *ch = NULL;
85 
86 	if (!list_empty(&q->out_chunk_list)) {
87 		struct list_head *entry = q->out_chunk_list.next;
88 
89 		ch = list_entry(entry, struct sctp_chunk, list);
90 		list_del_init(entry);
91 		q->out_qlen -= ch->skb->len;
92 	}
93 	return ch;
94 }
95 /* Add data chunk to the end of the queue. */
96 static inline void sctp_outq_tail_data(struct sctp_outq *q,
97 				       struct sctp_chunk *ch)
98 {
99 	list_add_tail(&ch->list, &q->out_chunk_list);
100 	q->out_qlen += ch->skb->len;
101 }
102 
103 /*
104  * SFR-CACC algorithm:
105  * D) If count_of_newacks is greater than or equal to 2
106  * and t was not sent to the current primary then the
107  * sender MUST NOT increment missing report count for t.
108  */
109 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
110 				       struct sctp_transport *transport,
111 				       int count_of_newacks)
112 {
113 	if (count_of_newacks >= 2 && transport != primary)
114 		return 1;
115 	return 0;
116 }
117 
118 /*
119  * SFR-CACC algorithm:
120  * F) If count_of_newacks is less than 2, let d be the
121  * destination to which t was sent. If cacc_saw_newack
122  * is 0 for destination d, then the sender MUST NOT
123  * increment missing report count for t.
124  */
125 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
126 				       int count_of_newacks)
127 {
128 	if (count_of_newacks < 2 &&
129 			(transport && !transport->cacc.cacc_saw_newack))
130 		return 1;
131 	return 0;
132 }
133 
134 /*
135  * SFR-CACC algorithm:
136  * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
137  * execute steps C, D, F.
138  *
139  * C has been implemented in sctp_outq_sack
140  */
141 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
142 				     struct sctp_transport *transport,
143 				     int count_of_newacks)
144 {
145 	if (!primary->cacc.cycling_changeover) {
146 		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
147 			return 1;
148 		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
149 			return 1;
150 		return 0;
151 	}
152 	return 0;
153 }
154 
155 /*
156  * SFR-CACC algorithm:
157  * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
158  * than next_tsn_at_change of the current primary, then
159  * the sender MUST NOT increment missing report count
160  * for t.
161  */
162 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
163 {
164 	if (primary->cacc.cycling_changeover &&
165 	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
166 		return 1;
167 	return 0;
168 }
169 
170 /*
171  * SFR-CACC algorithm:
172  * 3) If the missing report count for TSN t is to be
173  * incremented according to [RFC2960] and
174  * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
175  * then the sender MUST further execute steps 3.1 and
176  * 3.2 to determine if the missing report count for
177  * TSN t SHOULD NOT be incremented.
178  *
179  * 3.3) If 3.1 and 3.2 do not dictate that the missing
180  * report count for t should not be incremented, then
181  * the sender SHOULD increment missing report count for
182  * t (according to [RFC2960] and [SCTP_STEWART_2002]).
183  */
184 static inline int sctp_cacc_skip(struct sctp_transport *primary,
185 				 struct sctp_transport *transport,
186 				 int count_of_newacks,
187 				 __u32 tsn)
188 {
189 	if (primary->cacc.changeover_active &&
190 	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
191 	     sctp_cacc_skip_3_2(primary, tsn)))
192 		return 1;
193 	return 0;
194 }
195 
196 /* Initialize an existing sctp_outq.  This does the boring stuff.
197  * You still need to define handlers if you really want to DO
198  * something with this structure...
199  */
200 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
201 {
202 	memset(q, 0, sizeof(struct sctp_outq));
203 
204 	q->asoc = asoc;
205 	INIT_LIST_HEAD(&q->out_chunk_list);
206 	INIT_LIST_HEAD(&q->control_chunk_list);
207 	INIT_LIST_HEAD(&q->retransmit);
208 	INIT_LIST_HEAD(&q->sacked);
209 	INIT_LIST_HEAD(&q->abandoned);
210 
211 	q->empty = 1;
212 }
213 
214 /* Free the outqueue structure and any related pending chunks.
215  */
216 static void __sctp_outq_teardown(struct sctp_outq *q)
217 {
218 	struct sctp_transport *transport;
219 	struct list_head *lchunk, *temp;
220 	struct sctp_chunk *chunk, *tmp;
221 
222 	/* Throw away unacknowledged chunks. */
223 	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
224 			transports) {
225 		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
226 			chunk = list_entry(lchunk, struct sctp_chunk,
227 					   transmitted_list);
228 			/* Mark as part of a failed message. */
229 			sctp_chunk_fail(chunk, q->error);
230 			sctp_chunk_free(chunk);
231 		}
232 	}
233 
234 	/* Throw away chunks that have been gap ACKed.  */
235 	list_for_each_safe(lchunk, temp, &q->sacked) {
236 		list_del_init(lchunk);
237 		chunk = list_entry(lchunk, struct sctp_chunk,
238 				   transmitted_list);
239 		sctp_chunk_fail(chunk, q->error);
240 		sctp_chunk_free(chunk);
241 	}
242 
243 	/* Throw away any chunks in the retransmit queue. */
244 	list_for_each_safe(lchunk, temp, &q->retransmit) {
245 		list_del_init(lchunk);
246 		chunk = list_entry(lchunk, struct sctp_chunk,
247 				   transmitted_list);
248 		sctp_chunk_fail(chunk, q->error);
249 		sctp_chunk_free(chunk);
250 	}
251 
252 	/* Throw away any chunks that are in the abandoned queue. */
253 	list_for_each_safe(lchunk, temp, &q->abandoned) {
254 		list_del_init(lchunk);
255 		chunk = list_entry(lchunk, struct sctp_chunk,
256 				   transmitted_list);
257 		sctp_chunk_fail(chunk, q->error);
258 		sctp_chunk_free(chunk);
259 	}
260 
261 	/* Throw away any leftover data chunks. */
262 	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
263 
264 		/* Mark as send failure. */
265 		sctp_chunk_fail(chunk, q->error);
266 		sctp_chunk_free(chunk);
267 	}
268 
269 	/* Throw away any leftover control chunks. */
270 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
271 		list_del_init(&chunk->list);
272 		sctp_chunk_free(chunk);
273 	}
274 }
275 
276 void sctp_outq_teardown(struct sctp_outq *q)
277 {
278 	__sctp_outq_teardown(q);
279 	sctp_outq_init(q->asoc, q);
280 }
281 
282 /* Free the outqueue structure and any related pending chunks.  */
283 void sctp_outq_free(struct sctp_outq *q)
284 {
285 	/* Throw away leftover chunks. */
286 	__sctp_outq_teardown(q);
287 }
288 
289 /* Put a new chunk in an sctp_outq.  */
290 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
291 {
292 	struct net *net = sock_net(q->asoc->base.sk);
293 	int error = 0;
294 
295 	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
296 		 chunk && chunk->chunk_hdr ?
297 		 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
298 		 "illegal chunk");
299 
300 	/* If it is data, queue it up, otherwise, send it
301 	 * immediately.
302 	 */
303 	if (sctp_chunk_is_data(chunk)) {
304 		/* Is it OK to queue data chunks?  */
305 		/* From 9. Termination of Association
306 		 *
307 		 * When either endpoint performs a shutdown, the
308 		 * association on each peer will stop accepting new
309 		 * data from its user and only deliver data in queue
310 		 * at the time of sending or receiving the SHUTDOWN
311 		 * chunk.
312 		 */
313 		switch (q->asoc->state) {
314 		case SCTP_STATE_CLOSED:
315 		case SCTP_STATE_SHUTDOWN_PENDING:
316 		case SCTP_STATE_SHUTDOWN_SENT:
317 		case SCTP_STATE_SHUTDOWN_RECEIVED:
318 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
319 			/* Cannot send after transport endpoint shutdown */
320 			error = -ESHUTDOWN;
321 			break;
322 
323 		default:
324 			pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
325 				 __func__, q, chunk, chunk && chunk->chunk_hdr ?
326 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
327 				 "illegal chunk");
328 
329 			sctp_outq_tail_data(q, chunk);
330 			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
331 				SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
332 			else
333 				SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
334 			q->empty = 0;
335 			break;
336 		}
337 	} else {
338 		list_add_tail(&chunk->list, &q->control_chunk_list);
339 		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
340 	}
341 
342 	if (error < 0)
343 		return error;
344 
345 	if (!q->cork)
346 		error = sctp_outq_flush(q, 0);
347 
348 	return error;
349 }
350 
351 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
352  * and the abandoned list are in ascending order.
353  */
354 static void sctp_insert_list(struct list_head *head, struct list_head *new)
355 {
356 	struct list_head *pos;
357 	struct sctp_chunk *nchunk, *lchunk;
358 	__u32 ntsn, ltsn;
359 	int done = 0;
360 
361 	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
362 	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
363 
364 	list_for_each(pos, head) {
365 		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
366 		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
367 		if (TSN_lt(ntsn, ltsn)) {
368 			list_add(new, pos->prev);
369 			done = 1;
370 			break;
371 		}
372 	}
373 	if (!done)
374 		list_add_tail(new, head);
375 }
376 
377 /* Mark all the eligible packets on a transport for retransmission.  */
378 void sctp_retransmit_mark(struct sctp_outq *q,
379 			  struct sctp_transport *transport,
380 			  __u8 reason)
381 {
382 	struct list_head *lchunk, *ltemp;
383 	struct sctp_chunk *chunk;
384 
385 	/* Walk through the specified transmitted queue.  */
386 	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
387 		chunk = list_entry(lchunk, struct sctp_chunk,
388 				   transmitted_list);
389 
390 		/* If the chunk is abandoned, move it to abandoned list. */
391 		if (sctp_chunk_abandoned(chunk)) {
392 			list_del_init(lchunk);
393 			sctp_insert_list(&q->abandoned, lchunk);
394 
395 			/* If this chunk has not been previousely acked,
396 			 * stop considering it 'outstanding'.  Our peer
397 			 * will most likely never see it since it will
398 			 * not be retransmitted
399 			 */
400 			if (!chunk->tsn_gap_acked) {
401 				if (chunk->transport)
402 					chunk->transport->flight_size -=
403 							sctp_data_size(chunk);
404 				q->outstanding_bytes -= sctp_data_size(chunk);
405 				q->asoc->peer.rwnd += sctp_data_size(chunk);
406 			}
407 			continue;
408 		}
409 
410 		/* If we are doing  retransmission due to a timeout or pmtu
411 		 * discovery, only the  chunks that are not yet acked should
412 		 * be added to the retransmit queue.
413 		 */
414 		if ((reason == SCTP_RTXR_FAST_RTX  &&
415 			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
416 		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
417 			/* RFC 2960 6.2.1 Processing a Received SACK
418 			 *
419 			 * C) Any time a DATA chunk is marked for
420 			 * retransmission (via either T3-rtx timer expiration
421 			 * (Section 6.3.3) or via fast retransmit
422 			 * (Section 7.2.4)), add the data size of those
423 			 * chunks to the rwnd.
424 			 */
425 			q->asoc->peer.rwnd += sctp_data_size(chunk);
426 			q->outstanding_bytes -= sctp_data_size(chunk);
427 			if (chunk->transport)
428 				transport->flight_size -= sctp_data_size(chunk);
429 
430 			/* sctpimpguide-05 Section 2.8.2
431 			 * M5) If a T3-rtx timer expires, the
432 			 * 'TSN.Missing.Report' of all affected TSNs is set
433 			 * to 0.
434 			 */
435 			chunk->tsn_missing_report = 0;
436 
437 			/* If a chunk that is being used for RTT measurement
438 			 * has to be retransmitted, we cannot use this chunk
439 			 * anymore for RTT measurements. Reset rto_pending so
440 			 * that a new RTT measurement is started when a new
441 			 * data chunk is sent.
442 			 */
443 			if (chunk->rtt_in_progress) {
444 				chunk->rtt_in_progress = 0;
445 				transport->rto_pending = 0;
446 			}
447 
448 			chunk->resent = 1;
449 
450 			/* Move the chunk to the retransmit queue. The chunks
451 			 * on the retransmit queue are always kept in order.
452 			 */
453 			list_del_init(lchunk);
454 			sctp_insert_list(&q->retransmit, lchunk);
455 		}
456 	}
457 
458 	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
459 		 "flight_size:%d, pba:%d\n", __func__, transport, reason,
460 		 transport->cwnd, transport->ssthresh, transport->flight_size,
461 		 transport->partial_bytes_acked);
462 }
463 
464 /* Mark all the eligible packets on a transport for retransmission and force
465  * one packet out.
466  */
467 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
468 		     sctp_retransmit_reason_t reason)
469 {
470 	struct net *net = sock_net(q->asoc->base.sk);
471 	int error = 0;
472 
473 	switch (reason) {
474 	case SCTP_RTXR_T3_RTX:
475 		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
476 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
477 		/* Update the retran path if the T3-rtx timer has expired for
478 		 * the current retran path.
479 		 */
480 		if (transport == transport->asoc->peer.retran_path)
481 			sctp_assoc_update_retran_path(transport->asoc);
482 		transport->asoc->rtx_data_chunks +=
483 			transport->asoc->unack_data;
484 		break;
485 	case SCTP_RTXR_FAST_RTX:
486 		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
487 		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
488 		q->fast_rtx = 1;
489 		break;
490 	case SCTP_RTXR_PMTUD:
491 		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
492 		break;
493 	case SCTP_RTXR_T1_RTX:
494 		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
495 		transport->asoc->init_retries++;
496 		break;
497 	default:
498 		BUG();
499 	}
500 
501 	sctp_retransmit_mark(q, transport, reason);
502 
503 	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
504 	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
505 	 * following the procedures outlined in C1 - C5.
506 	 */
507 	if (reason == SCTP_RTXR_T3_RTX)
508 		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
509 
510 	/* Flush the queues only on timeout, since fast_rtx is only
511 	 * triggered during sack processing and the queue
512 	 * will be flushed at the end.
513 	 */
514 	if (reason != SCTP_RTXR_FAST_RTX)
515 		error = sctp_outq_flush(q, /* rtx_timeout */ 1);
516 
517 	if (error)
518 		q->asoc->base.sk->sk_err = -error;
519 }
520 
521 /*
522  * Transmit DATA chunks on the retransmit queue.  Upon return from
523  * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
524  * need to be transmitted by the caller.
525  * We assume that pkt->transport has already been set.
526  *
527  * The return value is a normal kernel error return value.
528  */
529 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
530 			       int rtx_timeout, int *start_timer)
531 {
532 	struct list_head *lqueue;
533 	struct sctp_transport *transport = pkt->transport;
534 	sctp_xmit_t status;
535 	struct sctp_chunk *chunk, *chunk1;
536 	int fast_rtx;
537 	int error = 0;
538 	int timer = 0;
539 	int done = 0;
540 
541 	lqueue = &q->retransmit;
542 	fast_rtx = q->fast_rtx;
543 
544 	/* This loop handles time-out retransmissions, fast retransmissions,
545 	 * and retransmissions due to opening of whindow.
546 	 *
547 	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
548 	 *
549 	 * E3) Determine how many of the earliest (i.e., lowest TSN)
550 	 * outstanding DATA chunks for the address for which the
551 	 * T3-rtx has expired will fit into a single packet, subject
552 	 * to the MTU constraint for the path corresponding to the
553 	 * destination transport address to which the retransmission
554 	 * is being sent (this may be different from the address for
555 	 * which the timer expires [see Section 6.4]). Call this value
556 	 * K. Bundle and retransmit those K DATA chunks in a single
557 	 * packet to the destination endpoint.
558 	 *
559 	 * [Just to be painfully clear, if we are retransmitting
560 	 * because a timeout just happened, we should send only ONE
561 	 * packet of retransmitted data.]
562 	 *
563 	 * For fast retransmissions we also send only ONE packet.  However,
564 	 * if we are just flushing the queue due to open window, we'll
565 	 * try to send as much as possible.
566 	 */
567 	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
568 		/* If the chunk is abandoned, move it to abandoned list. */
569 		if (sctp_chunk_abandoned(chunk)) {
570 			list_del_init(&chunk->transmitted_list);
571 			sctp_insert_list(&q->abandoned,
572 					 &chunk->transmitted_list);
573 			continue;
574 		}
575 
576 		/* Make sure that Gap Acked TSNs are not retransmitted.  A
577 		 * simple approach is just to move such TSNs out of the
578 		 * way and into a 'transmitted' queue and skip to the
579 		 * next chunk.
580 		 */
581 		if (chunk->tsn_gap_acked) {
582 			list_move_tail(&chunk->transmitted_list,
583 				       &transport->transmitted);
584 			continue;
585 		}
586 
587 		/* If we are doing fast retransmit, ignore non-fast_rtransmit
588 		 * chunks
589 		 */
590 		if (fast_rtx && !chunk->fast_retransmit)
591 			continue;
592 
593 redo:
594 		/* Attempt to append this chunk to the packet. */
595 		status = sctp_packet_append_chunk(pkt, chunk);
596 
597 		switch (status) {
598 		case SCTP_XMIT_PMTU_FULL:
599 			if (!pkt->has_data && !pkt->has_cookie_echo) {
600 				/* If this packet did not contain DATA then
601 				 * retransmission did not happen, so do it
602 				 * again.  We'll ignore the error here since
603 				 * control chunks are already freed so there
604 				 * is nothing we can do.
605 				 */
606 				sctp_packet_transmit(pkt);
607 				goto redo;
608 			}
609 
610 			/* Send this packet.  */
611 			error = sctp_packet_transmit(pkt);
612 
613 			/* If we are retransmitting, we should only
614 			 * send a single packet.
615 			 * Otherwise, try appending this chunk again.
616 			 */
617 			if (rtx_timeout || fast_rtx)
618 				done = 1;
619 			else
620 				goto redo;
621 
622 			/* Bundle next chunk in the next round.  */
623 			break;
624 
625 		case SCTP_XMIT_RWND_FULL:
626 			/* Send this packet. */
627 			error = sctp_packet_transmit(pkt);
628 
629 			/* Stop sending DATA as there is no more room
630 			 * at the receiver.
631 			 */
632 			done = 1;
633 			break;
634 
635 		case SCTP_XMIT_NAGLE_DELAY:
636 			/* Send this packet. */
637 			error = sctp_packet_transmit(pkt);
638 
639 			/* Stop sending DATA because of nagle delay. */
640 			done = 1;
641 			break;
642 
643 		default:
644 			/* The append was successful, so add this chunk to
645 			 * the transmitted list.
646 			 */
647 			list_move_tail(&chunk->transmitted_list,
648 				       &transport->transmitted);
649 
650 			/* Mark the chunk as ineligible for fast retransmit
651 			 * after it is retransmitted.
652 			 */
653 			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
654 				chunk->fast_retransmit = SCTP_DONT_FRTX;
655 
656 			q->empty = 0;
657 			q->asoc->stats.rtxchunks++;
658 			break;
659 		}
660 
661 		/* Set the timer if there were no errors */
662 		if (!error && !timer)
663 			timer = 1;
664 
665 		if (done)
666 			break;
667 	}
668 
669 	/* If we are here due to a retransmit timeout or a fast
670 	 * retransmit and if there are any chunks left in the retransmit
671 	 * queue that could not fit in the PMTU sized packet, they need
672 	 * to be marked as ineligible for a subsequent fast retransmit.
673 	 */
674 	if (rtx_timeout || fast_rtx) {
675 		list_for_each_entry(chunk1, lqueue, transmitted_list) {
676 			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
677 				chunk1->fast_retransmit = SCTP_DONT_FRTX;
678 		}
679 	}
680 
681 	*start_timer = timer;
682 
683 	/* Clear fast retransmit hint */
684 	if (fast_rtx)
685 		q->fast_rtx = 0;
686 
687 	return error;
688 }
689 
690 /* Cork the outqueue so queued chunks are really queued. */
691 int sctp_outq_uncork(struct sctp_outq *q)
692 {
693 	if (q->cork)
694 		q->cork = 0;
695 
696 	return sctp_outq_flush(q, 0);
697 }
698 
699 
700 /*
701  * Try to flush an outqueue.
702  *
703  * Description: Send everything in q which we legally can, subject to
704  * congestion limitations.
705  * * Note: This function can be called from multiple contexts so appropriate
706  * locking concerns must be made.  Today we use the sock lock to protect
707  * this function.
708  */
709 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
710 {
711 	struct sctp_packet *packet;
712 	struct sctp_packet singleton;
713 	struct sctp_association *asoc = q->asoc;
714 	__u16 sport = asoc->base.bind_addr.port;
715 	__u16 dport = asoc->peer.port;
716 	__u32 vtag = asoc->peer.i.init_tag;
717 	struct sctp_transport *transport = NULL;
718 	struct sctp_transport *new_transport;
719 	struct sctp_chunk *chunk, *tmp;
720 	sctp_xmit_t status;
721 	int error = 0;
722 	int start_timer = 0;
723 	int one_packet = 0;
724 
725 	/* These transports have chunks to send. */
726 	struct list_head transport_list;
727 	struct list_head *ltransport;
728 
729 	INIT_LIST_HEAD(&transport_list);
730 	packet = NULL;
731 
732 	/*
733 	 * 6.10 Bundling
734 	 *   ...
735 	 *   When bundling control chunks with DATA chunks, an
736 	 *   endpoint MUST place control chunks first in the outbound
737 	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
738 	 *   within a SCTP packet in increasing order of TSN.
739 	 *   ...
740 	 */
741 
742 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
743 		/* RFC 5061, 5.3
744 		 * F1) This means that until such time as the ASCONF
745 		 * containing the add is acknowledged, the sender MUST
746 		 * NOT use the new IP address as a source for ANY SCTP
747 		 * packet except on carrying an ASCONF Chunk.
748 		 */
749 		if (asoc->src_out_of_asoc_ok &&
750 		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
751 			continue;
752 
753 		list_del_init(&chunk->list);
754 
755 		/* Pick the right transport to use. */
756 		new_transport = chunk->transport;
757 
758 		if (!new_transport) {
759 			/*
760 			 * If we have a prior transport pointer, see if
761 			 * the destination address of the chunk
762 			 * matches the destination address of the
763 			 * current transport.  If not a match, then
764 			 * try to look up the transport with a given
765 			 * destination address.  We do this because
766 			 * after processing ASCONFs, we may have new
767 			 * transports created.
768 			 */
769 			if (transport &&
770 			    sctp_cmp_addr_exact(&chunk->dest,
771 						&transport->ipaddr))
772 					new_transport = transport;
773 			else
774 				new_transport = sctp_assoc_lookup_paddr(asoc,
775 								&chunk->dest);
776 
777 			/* if we still don't have a new transport, then
778 			 * use the current active path.
779 			 */
780 			if (!new_transport)
781 				new_transport = asoc->peer.active_path;
782 		} else if ((new_transport->state == SCTP_INACTIVE) ||
783 			   (new_transport->state == SCTP_UNCONFIRMED) ||
784 			   (new_transport->state == SCTP_PF)) {
785 			/* If the chunk is Heartbeat or Heartbeat Ack,
786 			 * send it to chunk->transport, even if it's
787 			 * inactive.
788 			 *
789 			 * 3.3.6 Heartbeat Acknowledgement:
790 			 * ...
791 			 * A HEARTBEAT ACK is always sent to the source IP
792 			 * address of the IP datagram containing the
793 			 * HEARTBEAT chunk to which this ack is responding.
794 			 * ...
795 			 *
796 			 * ASCONF_ACKs also must be sent to the source.
797 			 */
798 			if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
799 			    chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
800 			    chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
801 				new_transport = asoc->peer.active_path;
802 		}
803 
804 		/* Are we switching transports?
805 		 * Take care of transport locks.
806 		 */
807 		if (new_transport != transport) {
808 			transport = new_transport;
809 			if (list_empty(&transport->send_ready)) {
810 				list_add_tail(&transport->send_ready,
811 					      &transport_list);
812 			}
813 			packet = &transport->packet;
814 			sctp_packet_config(packet, vtag,
815 					   asoc->peer.ecn_capable);
816 		}
817 
818 		switch (chunk->chunk_hdr->type) {
819 		/*
820 		 * 6.10 Bundling
821 		 *   ...
822 		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
823 		 *   COMPLETE with any other chunks.  [Send them immediately.]
824 		 */
825 		case SCTP_CID_INIT:
826 		case SCTP_CID_INIT_ACK:
827 		case SCTP_CID_SHUTDOWN_COMPLETE:
828 			sctp_packet_init(&singleton, transport, sport, dport);
829 			sctp_packet_config(&singleton, vtag, 0);
830 			sctp_packet_append_chunk(&singleton, chunk);
831 			error = sctp_packet_transmit(&singleton);
832 			if (error < 0)
833 				return error;
834 			break;
835 
836 		case SCTP_CID_ABORT:
837 			if (sctp_test_T_bit(chunk)) {
838 				packet->vtag = asoc->c.my_vtag;
839 			}
840 		/* The following chunks are "response" chunks, i.e.
841 		 * they are generated in response to something we
842 		 * received.  If we are sending these, then we can
843 		 * send only 1 packet containing these chunks.
844 		 */
845 		case SCTP_CID_HEARTBEAT_ACK:
846 		case SCTP_CID_SHUTDOWN_ACK:
847 		case SCTP_CID_COOKIE_ACK:
848 		case SCTP_CID_COOKIE_ECHO:
849 		case SCTP_CID_ERROR:
850 		case SCTP_CID_ECN_CWR:
851 		case SCTP_CID_ASCONF_ACK:
852 			one_packet = 1;
853 			/* Fall through */
854 
855 		case SCTP_CID_SACK:
856 		case SCTP_CID_HEARTBEAT:
857 		case SCTP_CID_SHUTDOWN:
858 		case SCTP_CID_ECN_ECNE:
859 		case SCTP_CID_ASCONF:
860 		case SCTP_CID_FWD_TSN:
861 			status = sctp_packet_transmit_chunk(packet, chunk,
862 							    one_packet);
863 			if (status  != SCTP_XMIT_OK) {
864 				/* put the chunk back */
865 				list_add(&chunk->list, &q->control_chunk_list);
866 			} else {
867 				asoc->stats.octrlchunks++;
868 				/* PR-SCTP C5) If a FORWARD TSN is sent, the
869 				 * sender MUST assure that at least one T3-rtx
870 				 * timer is running.
871 				 */
872 				if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
873 					sctp_transport_reset_timers(transport);
874 			}
875 			break;
876 
877 		default:
878 			/* We built a chunk with an illegal type! */
879 			BUG();
880 		}
881 	}
882 
883 	if (q->asoc->src_out_of_asoc_ok)
884 		goto sctp_flush_out;
885 
886 	/* Is it OK to send data chunks?  */
887 	switch (asoc->state) {
888 	case SCTP_STATE_COOKIE_ECHOED:
889 		/* Only allow bundling when this packet has a COOKIE-ECHO
890 		 * chunk.
891 		 */
892 		if (!packet || !packet->has_cookie_echo)
893 			break;
894 
895 		/* fallthru */
896 	case SCTP_STATE_ESTABLISHED:
897 	case SCTP_STATE_SHUTDOWN_PENDING:
898 	case SCTP_STATE_SHUTDOWN_RECEIVED:
899 		/*
900 		 * RFC 2960 6.1  Transmission of DATA Chunks
901 		 *
902 		 * C) When the time comes for the sender to transmit,
903 		 * before sending new DATA chunks, the sender MUST
904 		 * first transmit any outstanding DATA chunks which
905 		 * are marked for retransmission (limited by the
906 		 * current cwnd).
907 		 */
908 		if (!list_empty(&q->retransmit)) {
909 			if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
910 				goto sctp_flush_out;
911 			if (transport == asoc->peer.retran_path)
912 				goto retran;
913 
914 			/* Switch transports & prepare the packet.  */
915 
916 			transport = asoc->peer.retran_path;
917 
918 			if (list_empty(&transport->send_ready)) {
919 				list_add_tail(&transport->send_ready,
920 					      &transport_list);
921 			}
922 
923 			packet = &transport->packet;
924 			sctp_packet_config(packet, vtag,
925 					   asoc->peer.ecn_capable);
926 		retran:
927 			error = sctp_outq_flush_rtx(q, packet,
928 						    rtx_timeout, &start_timer);
929 
930 			if (start_timer)
931 				sctp_transport_reset_timers(transport);
932 
933 			/* This can happen on COOKIE-ECHO resend.  Only
934 			 * one chunk can get bundled with a COOKIE-ECHO.
935 			 */
936 			if (packet->has_cookie_echo)
937 				goto sctp_flush_out;
938 
939 			/* Don't send new data if there is still data
940 			 * waiting to retransmit.
941 			 */
942 			if (!list_empty(&q->retransmit))
943 				goto sctp_flush_out;
944 		}
945 
946 		/* Apply Max.Burst limitation to the current transport in
947 		 * case it will be used for new data.  We are going to
948 		 * rest it before we return, but we want to apply the limit
949 		 * to the currently queued data.
950 		 */
951 		if (transport)
952 			sctp_transport_burst_limited(transport);
953 
954 		/* Finally, transmit new packets.  */
955 		while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
956 			/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
957 			 * stream identifier.
958 			 */
959 			if (chunk->sinfo.sinfo_stream >=
960 			    asoc->c.sinit_num_ostreams) {
961 
962 				/* Mark as failed send. */
963 				sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
964 				sctp_chunk_free(chunk);
965 				continue;
966 			}
967 
968 			/* Has this chunk expired? */
969 			if (sctp_chunk_abandoned(chunk)) {
970 				sctp_chunk_fail(chunk, 0);
971 				sctp_chunk_free(chunk);
972 				continue;
973 			}
974 
975 			/* If there is a specified transport, use it.
976 			 * Otherwise, we want to use the active path.
977 			 */
978 			new_transport = chunk->transport;
979 			if (!new_transport ||
980 			    ((new_transport->state == SCTP_INACTIVE) ||
981 			     (new_transport->state == SCTP_UNCONFIRMED) ||
982 			     (new_transport->state == SCTP_PF)))
983 				new_transport = asoc->peer.active_path;
984 			if (new_transport->state == SCTP_UNCONFIRMED)
985 				continue;
986 
987 			/* Change packets if necessary.  */
988 			if (new_transport != transport) {
989 				transport = new_transport;
990 
991 				/* Schedule to have this transport's
992 				 * packet flushed.
993 				 */
994 				if (list_empty(&transport->send_ready)) {
995 					list_add_tail(&transport->send_ready,
996 						      &transport_list);
997 				}
998 
999 				packet = &transport->packet;
1000 				sctp_packet_config(packet, vtag,
1001 						   asoc->peer.ecn_capable);
1002 				/* We've switched transports, so apply the
1003 				 * Burst limit to the new transport.
1004 				 */
1005 				sctp_transport_burst_limited(transport);
1006 			}
1007 
1008 			pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1009 				 "skb->users:%d\n",
1010 				 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1011 				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1012 				 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1013 				 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1014 				 atomic_read(&chunk->skb->users) : -1);
1015 
1016 			/* Add the chunk to the packet.  */
1017 			status = sctp_packet_transmit_chunk(packet, chunk, 0);
1018 
1019 			switch (status) {
1020 			case SCTP_XMIT_PMTU_FULL:
1021 			case SCTP_XMIT_RWND_FULL:
1022 			case SCTP_XMIT_NAGLE_DELAY:
1023 				/* We could not append this chunk, so put
1024 				 * the chunk back on the output queue.
1025 				 */
1026 				pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1027 					 __func__, ntohl(chunk->subh.data_hdr->tsn),
1028 					 status);
1029 
1030 				sctp_outq_head_data(q, chunk);
1031 				goto sctp_flush_out;
1032 				break;
1033 
1034 			case SCTP_XMIT_OK:
1035 				/* The sender is in the SHUTDOWN-PENDING state,
1036 				 * The sender MAY set the I-bit in the DATA
1037 				 * chunk header.
1038 				 */
1039 				if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1040 					chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1041 				if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1042 					asoc->stats.ouodchunks++;
1043 				else
1044 					asoc->stats.oodchunks++;
1045 
1046 				break;
1047 
1048 			default:
1049 				BUG();
1050 			}
1051 
1052 			/* BUG: We assume that the sctp_packet_transmit()
1053 			 * call below will succeed all the time and add the
1054 			 * chunk to the transmitted list and restart the
1055 			 * timers.
1056 			 * It is possible that the call can fail under OOM
1057 			 * conditions.
1058 			 *
1059 			 * Is this really a problem?  Won't this behave
1060 			 * like a lost TSN?
1061 			 */
1062 			list_add_tail(&chunk->transmitted_list,
1063 				      &transport->transmitted);
1064 
1065 			sctp_transport_reset_timers(transport);
1066 
1067 			q->empty = 0;
1068 
1069 			/* Only let one DATA chunk get bundled with a
1070 			 * COOKIE-ECHO chunk.
1071 			 */
1072 			if (packet->has_cookie_echo)
1073 				goto sctp_flush_out;
1074 		}
1075 		break;
1076 
1077 	default:
1078 		/* Do nothing.  */
1079 		break;
1080 	}
1081 
1082 sctp_flush_out:
1083 
1084 	/* Before returning, examine all the transports touched in
1085 	 * this call.  Right now, we bluntly force clear all the
1086 	 * transports.  Things might change after we implement Nagle.
1087 	 * But such an examination is still required.
1088 	 *
1089 	 * --xguo
1090 	 */
1091 	while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) {
1092 		struct sctp_transport *t = list_entry(ltransport,
1093 						      struct sctp_transport,
1094 						      send_ready);
1095 		packet = &t->packet;
1096 		if (!sctp_packet_empty(packet))
1097 			error = sctp_packet_transmit(packet);
1098 
1099 		/* Clear the burst limited state, if any */
1100 		sctp_transport_burst_reset(t);
1101 	}
1102 
1103 	return error;
1104 }
1105 
1106 /* Update unack_data based on the incoming SACK chunk */
1107 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1108 					struct sctp_sackhdr *sack)
1109 {
1110 	sctp_sack_variable_t *frags;
1111 	__u16 unack_data;
1112 	int i;
1113 
1114 	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1115 
1116 	frags = sack->variable;
1117 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1118 		unack_data -= ((ntohs(frags[i].gab.end) -
1119 				ntohs(frags[i].gab.start) + 1));
1120 	}
1121 
1122 	assoc->unack_data = unack_data;
1123 }
1124 
1125 /* This is where we REALLY process a SACK.
1126  *
1127  * Process the SACK against the outqueue.  Mostly, this just frees
1128  * things off the transmitted queue.
1129  */
1130 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1131 {
1132 	struct sctp_association *asoc = q->asoc;
1133 	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1134 	struct sctp_transport *transport;
1135 	struct sctp_chunk *tchunk = NULL;
1136 	struct list_head *lchunk, *transport_list, *temp;
1137 	sctp_sack_variable_t *frags = sack->variable;
1138 	__u32 sack_ctsn, ctsn, tsn;
1139 	__u32 highest_tsn, highest_new_tsn;
1140 	__u32 sack_a_rwnd;
1141 	unsigned int outstanding;
1142 	struct sctp_transport *primary = asoc->peer.primary_path;
1143 	int count_of_newacks = 0;
1144 	int gap_ack_blocks;
1145 	u8 accum_moved = 0;
1146 
1147 	/* Grab the association's destination address list. */
1148 	transport_list = &asoc->peer.transport_addr_list;
1149 
1150 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1151 	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1152 	asoc->stats.gapcnt += gap_ack_blocks;
1153 	/*
1154 	 * SFR-CACC algorithm:
1155 	 * On receipt of a SACK the sender SHOULD execute the
1156 	 * following statements.
1157 	 *
1158 	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1159 	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1160 	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1161 	 * all destinations.
1162 	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1163 	 * is set the receiver of the SACK MUST take the following actions:
1164 	 *
1165 	 * A) Initialize the cacc_saw_newack to 0 for all destination
1166 	 * addresses.
1167 	 *
1168 	 * Only bother if changeover_active is set. Otherwise, this is
1169 	 * totally suboptimal to do on every SACK.
1170 	 */
1171 	if (primary->cacc.changeover_active) {
1172 		u8 clear_cycling = 0;
1173 
1174 		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1175 			primary->cacc.changeover_active = 0;
1176 			clear_cycling = 1;
1177 		}
1178 
1179 		if (clear_cycling || gap_ack_blocks) {
1180 			list_for_each_entry(transport, transport_list,
1181 					transports) {
1182 				if (clear_cycling)
1183 					transport->cacc.cycling_changeover = 0;
1184 				if (gap_ack_blocks)
1185 					transport->cacc.cacc_saw_newack = 0;
1186 			}
1187 		}
1188 	}
1189 
1190 	/* Get the highest TSN in the sack. */
1191 	highest_tsn = sack_ctsn;
1192 	if (gap_ack_blocks)
1193 		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1194 
1195 	if (TSN_lt(asoc->highest_sacked, highest_tsn))
1196 		asoc->highest_sacked = highest_tsn;
1197 
1198 	highest_new_tsn = sack_ctsn;
1199 
1200 	/* Run through the retransmit queue.  Credit bytes received
1201 	 * and free those chunks that we can.
1202 	 */
1203 	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1204 
1205 	/* Run through the transmitted queue.
1206 	 * Credit bytes received and free those chunks which we can.
1207 	 *
1208 	 * This is a MASSIVE candidate for optimization.
1209 	 */
1210 	list_for_each_entry(transport, transport_list, transports) {
1211 		sctp_check_transmitted(q, &transport->transmitted,
1212 				       transport, &chunk->source, sack,
1213 				       &highest_new_tsn);
1214 		/*
1215 		 * SFR-CACC algorithm:
1216 		 * C) Let count_of_newacks be the number of
1217 		 * destinations for which cacc_saw_newack is set.
1218 		 */
1219 		if (transport->cacc.cacc_saw_newack)
1220 			count_of_newacks++;
1221 	}
1222 
1223 	/* Move the Cumulative TSN Ack Point if appropriate.  */
1224 	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1225 		asoc->ctsn_ack_point = sack_ctsn;
1226 		accum_moved = 1;
1227 	}
1228 
1229 	if (gap_ack_blocks) {
1230 
1231 		if (asoc->fast_recovery && accum_moved)
1232 			highest_new_tsn = highest_tsn;
1233 
1234 		list_for_each_entry(transport, transport_list, transports)
1235 			sctp_mark_missing(q, &transport->transmitted, transport,
1236 					  highest_new_tsn, count_of_newacks);
1237 	}
1238 
1239 	/* Update unack_data field in the assoc. */
1240 	sctp_sack_update_unack_data(asoc, sack);
1241 
1242 	ctsn = asoc->ctsn_ack_point;
1243 
1244 	/* Throw away stuff rotting on the sack queue.  */
1245 	list_for_each_safe(lchunk, temp, &q->sacked) {
1246 		tchunk = list_entry(lchunk, struct sctp_chunk,
1247 				    transmitted_list);
1248 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1249 		if (TSN_lte(tsn, ctsn)) {
1250 			list_del_init(&tchunk->transmitted_list);
1251 			sctp_chunk_free(tchunk);
1252 		}
1253 	}
1254 
1255 	/* ii) Set rwnd equal to the newly received a_rwnd minus the
1256 	 *     number of bytes still outstanding after processing the
1257 	 *     Cumulative TSN Ack and the Gap Ack Blocks.
1258 	 */
1259 
1260 	sack_a_rwnd = ntohl(sack->a_rwnd);
1261 	outstanding = q->outstanding_bytes;
1262 
1263 	if (outstanding < sack_a_rwnd)
1264 		sack_a_rwnd -= outstanding;
1265 	else
1266 		sack_a_rwnd = 0;
1267 
1268 	asoc->peer.rwnd = sack_a_rwnd;
1269 
1270 	sctp_generate_fwdtsn(q, sack_ctsn);
1271 
1272 	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1273 	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1274 		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1275 		 asoc->adv_peer_ack_point);
1276 
1277 	/* See if all chunks are acked.
1278 	 * Make sure the empty queue handler will get run later.
1279 	 */
1280 	q->empty = (list_empty(&q->out_chunk_list) &&
1281 		    list_empty(&q->retransmit));
1282 	if (!q->empty)
1283 		goto finish;
1284 
1285 	list_for_each_entry(transport, transport_list, transports) {
1286 		q->empty = q->empty && list_empty(&transport->transmitted);
1287 		if (!q->empty)
1288 			goto finish;
1289 	}
1290 
1291 	pr_debug("%s: sack queue is empty\n", __func__);
1292 finish:
1293 	return q->empty;
1294 }
1295 
1296 /* Is the outqueue empty?  */
1297 int sctp_outq_is_empty(const struct sctp_outq *q)
1298 {
1299 	return q->empty;
1300 }
1301 
1302 /********************************************************************
1303  * 2nd Level Abstractions
1304  ********************************************************************/
1305 
1306 /* Go through a transport's transmitted list or the association's retransmit
1307  * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1308  * The retransmit list will not have an associated transport.
1309  *
1310  * I added coherent debug information output.	--xguo
1311  *
1312  * Instead of printing 'sacked' or 'kept' for each TSN on the
1313  * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1314  * KEPT TSN6-TSN7, etc.
1315  */
1316 static void sctp_check_transmitted(struct sctp_outq *q,
1317 				   struct list_head *transmitted_queue,
1318 				   struct sctp_transport *transport,
1319 				   union sctp_addr *saddr,
1320 				   struct sctp_sackhdr *sack,
1321 				   __u32 *highest_new_tsn_in_sack)
1322 {
1323 	struct list_head *lchunk;
1324 	struct sctp_chunk *tchunk;
1325 	struct list_head tlist;
1326 	__u32 tsn;
1327 	__u32 sack_ctsn;
1328 	__u32 rtt;
1329 	__u8 restart_timer = 0;
1330 	int bytes_acked = 0;
1331 	int migrate_bytes = 0;
1332 	bool forward_progress = false;
1333 
1334 	sack_ctsn = ntohl(sack->cum_tsn_ack);
1335 
1336 	INIT_LIST_HEAD(&tlist);
1337 
1338 	/* The while loop will skip empty transmitted queues. */
1339 	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1340 		tchunk = list_entry(lchunk, struct sctp_chunk,
1341 				    transmitted_list);
1342 
1343 		if (sctp_chunk_abandoned(tchunk)) {
1344 			/* Move the chunk to abandoned list. */
1345 			sctp_insert_list(&q->abandoned, lchunk);
1346 
1347 			/* If this chunk has not been acked, stop
1348 			 * considering it as 'outstanding'.
1349 			 */
1350 			if (!tchunk->tsn_gap_acked) {
1351 				if (tchunk->transport)
1352 					tchunk->transport->flight_size -=
1353 							sctp_data_size(tchunk);
1354 				q->outstanding_bytes -= sctp_data_size(tchunk);
1355 			}
1356 			continue;
1357 		}
1358 
1359 		tsn = ntohl(tchunk->subh.data_hdr->tsn);
1360 		if (sctp_acked(sack, tsn)) {
1361 			/* If this queue is the retransmit queue, the
1362 			 * retransmit timer has already reclaimed
1363 			 * the outstanding bytes for this chunk, so only
1364 			 * count bytes associated with a transport.
1365 			 */
1366 			if (transport) {
1367 				/* If this chunk is being used for RTT
1368 				 * measurement, calculate the RTT and update
1369 				 * the RTO using this value.
1370 				 *
1371 				 * 6.3.1 C5) Karn's algorithm: RTT measurements
1372 				 * MUST NOT be made using packets that were
1373 				 * retransmitted (and thus for which it is
1374 				 * ambiguous whether the reply was for the
1375 				 * first instance of the packet or a later
1376 				 * instance).
1377 				 */
1378 				if (!tchunk->tsn_gap_acked &&
1379 				    !tchunk->resent &&
1380 				    tchunk->rtt_in_progress) {
1381 					tchunk->rtt_in_progress = 0;
1382 					rtt = jiffies - tchunk->sent_at;
1383 					sctp_transport_update_rto(transport,
1384 								  rtt);
1385 				}
1386 			}
1387 
1388 			/* If the chunk hasn't been marked as ACKED,
1389 			 * mark it and account bytes_acked if the
1390 			 * chunk had a valid transport (it will not
1391 			 * have a transport if ASCONF had deleted it
1392 			 * while DATA was outstanding).
1393 			 */
1394 			if (!tchunk->tsn_gap_acked) {
1395 				tchunk->tsn_gap_acked = 1;
1396 				if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1397 					*highest_new_tsn_in_sack = tsn;
1398 				bytes_acked += sctp_data_size(tchunk);
1399 				if (!tchunk->transport)
1400 					migrate_bytes += sctp_data_size(tchunk);
1401 				forward_progress = true;
1402 			}
1403 
1404 			if (TSN_lte(tsn, sack_ctsn)) {
1405 				/* RFC 2960  6.3.2 Retransmission Timer Rules
1406 				 *
1407 				 * R3) Whenever a SACK is received
1408 				 * that acknowledges the DATA chunk
1409 				 * with the earliest outstanding TSN
1410 				 * for that address, restart T3-rtx
1411 				 * timer for that address with its
1412 				 * current RTO.
1413 				 */
1414 				restart_timer = 1;
1415 				forward_progress = true;
1416 
1417 				if (!tchunk->tsn_gap_acked) {
1418 					/*
1419 					 * SFR-CACC algorithm:
1420 					 * 2) If the SACK contains gap acks
1421 					 * and the flag CHANGEOVER_ACTIVE is
1422 					 * set the receiver of the SACK MUST
1423 					 * take the following action:
1424 					 *
1425 					 * B) For each TSN t being acked that
1426 					 * has not been acked in any SACK so
1427 					 * far, set cacc_saw_newack to 1 for
1428 					 * the destination that the TSN was
1429 					 * sent to.
1430 					 */
1431 					if (transport &&
1432 					    sack->num_gap_ack_blocks &&
1433 					    q->asoc->peer.primary_path->cacc.
1434 					    changeover_active)
1435 						transport->cacc.cacc_saw_newack
1436 							= 1;
1437 				}
1438 
1439 				list_add_tail(&tchunk->transmitted_list,
1440 					      &q->sacked);
1441 			} else {
1442 				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1443 				 * M2) Each time a SACK arrives reporting
1444 				 * 'Stray DATA chunk(s)' record the highest TSN
1445 				 * reported as newly acknowledged, call this
1446 				 * value 'HighestTSNinSack'. A newly
1447 				 * acknowledged DATA chunk is one not
1448 				 * previously acknowledged in a SACK.
1449 				 *
1450 				 * When the SCTP sender of data receives a SACK
1451 				 * chunk that acknowledges, for the first time,
1452 				 * the receipt of a DATA chunk, all the still
1453 				 * unacknowledged DATA chunks whose TSN is
1454 				 * older than that newly acknowledged DATA
1455 				 * chunk, are qualified as 'Stray DATA chunks'.
1456 				 */
1457 				list_add_tail(lchunk, &tlist);
1458 			}
1459 		} else {
1460 			if (tchunk->tsn_gap_acked) {
1461 				pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1462 					 __func__, tsn);
1463 
1464 				tchunk->tsn_gap_acked = 0;
1465 
1466 				if (tchunk->transport)
1467 					bytes_acked -= sctp_data_size(tchunk);
1468 
1469 				/* RFC 2960 6.3.2 Retransmission Timer Rules
1470 				 *
1471 				 * R4) Whenever a SACK is received missing a
1472 				 * TSN that was previously acknowledged via a
1473 				 * Gap Ack Block, start T3-rtx for the
1474 				 * destination address to which the DATA
1475 				 * chunk was originally
1476 				 * transmitted if it is not already running.
1477 				 */
1478 				restart_timer = 1;
1479 			}
1480 
1481 			list_add_tail(lchunk, &tlist);
1482 		}
1483 	}
1484 
1485 	if (transport) {
1486 		if (bytes_acked) {
1487 			struct sctp_association *asoc = transport->asoc;
1488 
1489 			/* We may have counted DATA that was migrated
1490 			 * to this transport due to DEL-IP operation.
1491 			 * Subtract those bytes, since the were never
1492 			 * send on this transport and shouldn't be
1493 			 * credited to this transport.
1494 			 */
1495 			bytes_acked -= migrate_bytes;
1496 
1497 			/* 8.2. When an outstanding TSN is acknowledged,
1498 			 * the endpoint shall clear the error counter of
1499 			 * the destination transport address to which the
1500 			 * DATA chunk was last sent.
1501 			 * The association's overall error counter is
1502 			 * also cleared.
1503 			 */
1504 			transport->error_count = 0;
1505 			transport->asoc->overall_error_count = 0;
1506 			forward_progress = true;
1507 
1508 			/*
1509 			 * While in SHUTDOWN PENDING, we may have started
1510 			 * the T5 shutdown guard timer after reaching the
1511 			 * retransmission limit. Stop that timer as soon
1512 			 * as the receiver acknowledged any data.
1513 			 */
1514 			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1515 			    del_timer(&asoc->timers
1516 				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1517 					sctp_association_put(asoc);
1518 
1519 			/* Mark the destination transport address as
1520 			 * active if it is not so marked.
1521 			 */
1522 			if ((transport->state == SCTP_INACTIVE ||
1523 			     transport->state == SCTP_UNCONFIRMED) &&
1524 			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1525 				sctp_assoc_control_transport(
1526 					transport->asoc,
1527 					transport,
1528 					SCTP_TRANSPORT_UP,
1529 					SCTP_RECEIVED_SACK);
1530 			}
1531 
1532 			sctp_transport_raise_cwnd(transport, sack_ctsn,
1533 						  bytes_acked);
1534 
1535 			transport->flight_size -= bytes_acked;
1536 			if (transport->flight_size == 0)
1537 				transport->partial_bytes_acked = 0;
1538 			q->outstanding_bytes -= bytes_acked + migrate_bytes;
1539 		} else {
1540 			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
1541 			 * When a sender is doing zero window probing, it
1542 			 * should not timeout the association if it continues
1543 			 * to receive new packets from the receiver. The
1544 			 * reason is that the receiver MAY keep its window
1545 			 * closed for an indefinite time.
1546 			 * A sender is doing zero window probing when the
1547 			 * receiver's advertised window is zero, and there is
1548 			 * only one data chunk in flight to the receiver.
1549 			 *
1550 			 * Allow the association to timeout while in SHUTDOWN
1551 			 * PENDING or SHUTDOWN RECEIVED in case the receiver
1552 			 * stays in zero window mode forever.
1553 			 */
1554 			if (!q->asoc->peer.rwnd &&
1555 			    !list_empty(&tlist) &&
1556 			    (sack_ctsn+2 == q->asoc->next_tsn) &&
1557 			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1558 				pr_debug("%s: sack received for zero window "
1559 					 "probe:%u\n", __func__, sack_ctsn);
1560 
1561 				q->asoc->overall_error_count = 0;
1562 				transport->error_count = 0;
1563 			}
1564 		}
1565 
1566 		/* RFC 2960 6.3.2 Retransmission Timer Rules
1567 		 *
1568 		 * R2) Whenever all outstanding data sent to an address have
1569 		 * been acknowledged, turn off the T3-rtx timer of that
1570 		 * address.
1571 		 */
1572 		if (!transport->flight_size) {
1573 			if (del_timer(&transport->T3_rtx_timer))
1574 				sctp_transport_put(transport);
1575 		} else if (restart_timer) {
1576 			if (!mod_timer(&transport->T3_rtx_timer,
1577 				       jiffies + transport->rto))
1578 				sctp_transport_hold(transport);
1579 		}
1580 
1581 		if (forward_progress) {
1582 			if (transport->dst)
1583 				dst_confirm(transport->dst);
1584 		}
1585 	}
1586 
1587 	list_splice(&tlist, transmitted_queue);
1588 }
1589 
1590 /* Mark chunks as missing and consequently may get retransmitted. */
1591 static void sctp_mark_missing(struct sctp_outq *q,
1592 			      struct list_head *transmitted_queue,
1593 			      struct sctp_transport *transport,
1594 			      __u32 highest_new_tsn_in_sack,
1595 			      int count_of_newacks)
1596 {
1597 	struct sctp_chunk *chunk;
1598 	__u32 tsn;
1599 	char do_fast_retransmit = 0;
1600 	struct sctp_association *asoc = q->asoc;
1601 	struct sctp_transport *primary = asoc->peer.primary_path;
1602 
1603 	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1604 
1605 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1606 
1607 		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1608 		 * 'Unacknowledged TSN's', if the TSN number of an
1609 		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1610 		 * value, increment the 'TSN.Missing.Report' count on that
1611 		 * chunk if it has NOT been fast retransmitted or marked for
1612 		 * fast retransmit already.
1613 		 */
1614 		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1615 		    !chunk->tsn_gap_acked &&
1616 		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1617 
1618 			/* SFR-CACC may require us to skip marking
1619 			 * this chunk as missing.
1620 			 */
1621 			if (!transport || !sctp_cacc_skip(primary,
1622 						chunk->transport,
1623 						count_of_newacks, tsn)) {
1624 				chunk->tsn_missing_report++;
1625 
1626 				pr_debug("%s: tsn:0x%x missing counter:%d\n",
1627 					 __func__, tsn, chunk->tsn_missing_report);
1628 			}
1629 		}
1630 		/*
1631 		 * M4) If any DATA chunk is found to have a
1632 		 * 'TSN.Missing.Report'
1633 		 * value larger than or equal to 3, mark that chunk for
1634 		 * retransmission and start the fast retransmit procedure.
1635 		 */
1636 
1637 		if (chunk->tsn_missing_report >= 3) {
1638 			chunk->fast_retransmit = SCTP_NEED_FRTX;
1639 			do_fast_retransmit = 1;
1640 		}
1641 	}
1642 
1643 	if (transport) {
1644 		if (do_fast_retransmit)
1645 			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1646 
1647 		pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1648 			 "flight_size:%d, pba:%d\n",  __func__, transport,
1649 			 transport->cwnd, transport->ssthresh,
1650 			 transport->flight_size, transport->partial_bytes_acked);
1651 	}
1652 }
1653 
1654 /* Is the given TSN acked by this packet?  */
1655 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1656 {
1657 	int i;
1658 	sctp_sack_variable_t *frags;
1659 	__u16 gap;
1660 	__u32 ctsn = ntohl(sack->cum_tsn_ack);
1661 
1662 	if (TSN_lte(tsn, ctsn))
1663 		goto pass;
1664 
1665 	/* 3.3.4 Selective Acknowledgement (SACK) (3):
1666 	 *
1667 	 * Gap Ack Blocks:
1668 	 *  These fields contain the Gap Ack Blocks. They are repeated
1669 	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
1670 	 *  defined in the Number of Gap Ack Blocks field. All DATA
1671 	 *  chunks with TSNs greater than or equal to (Cumulative TSN
1672 	 *  Ack + Gap Ack Block Start) and less than or equal to
1673 	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1674 	 *  Block are assumed to have been received correctly.
1675 	 */
1676 
1677 	frags = sack->variable;
1678 	gap = tsn - ctsn;
1679 	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1680 		if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1681 		    TSN_lte(gap, ntohs(frags[i].gab.end)))
1682 			goto pass;
1683 	}
1684 
1685 	return 0;
1686 pass:
1687 	return 1;
1688 }
1689 
1690 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1691 				    int nskips, __be16 stream)
1692 {
1693 	int i;
1694 
1695 	for (i = 0; i < nskips; i++) {
1696 		if (skiplist[i].stream == stream)
1697 			return i;
1698 	}
1699 	return i;
1700 }
1701 
1702 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1703 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1704 {
1705 	struct sctp_association *asoc = q->asoc;
1706 	struct sctp_chunk *ftsn_chunk = NULL;
1707 	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1708 	int nskips = 0;
1709 	int skip_pos = 0;
1710 	__u32 tsn;
1711 	struct sctp_chunk *chunk;
1712 	struct list_head *lchunk, *temp;
1713 
1714 	if (!asoc->peer.prsctp_capable)
1715 		return;
1716 
1717 	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1718 	 * received SACK.
1719 	 *
1720 	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1721 	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1722 	 */
1723 	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1724 		asoc->adv_peer_ack_point = ctsn;
1725 
1726 	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1727 	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1728 	 * the chunk next in the out-queue space is marked as "abandoned" as
1729 	 * shown in the following example:
1730 	 *
1731 	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1732 	 * and the Advanced.Peer.Ack.Point is updated to this value:
1733 	 *
1734 	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1735 	 *   normal SACK processing           local advancement
1736 	 *                ...                           ...
1737 	 *   Adv.Ack.Pt-> 102 acked                     102 acked
1738 	 *                103 abandoned                 103 abandoned
1739 	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
1740 	 *                105                           105
1741 	 *                106 acked                     106 acked
1742 	 *                ...                           ...
1743 	 *
1744 	 * In this example, the data sender successfully advanced the
1745 	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1746 	 */
1747 	list_for_each_safe(lchunk, temp, &q->abandoned) {
1748 		chunk = list_entry(lchunk, struct sctp_chunk,
1749 					transmitted_list);
1750 		tsn = ntohl(chunk->subh.data_hdr->tsn);
1751 
1752 		/* Remove any chunks in the abandoned queue that are acked by
1753 		 * the ctsn.
1754 		 */
1755 		if (TSN_lte(tsn, ctsn)) {
1756 			list_del_init(lchunk);
1757 			sctp_chunk_free(chunk);
1758 		} else {
1759 			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1760 				asoc->adv_peer_ack_point = tsn;
1761 				if (chunk->chunk_hdr->flags &
1762 					 SCTP_DATA_UNORDERED)
1763 					continue;
1764 				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1765 						nskips,
1766 						chunk->subh.data_hdr->stream);
1767 				ftsn_skip_arr[skip_pos].stream =
1768 					chunk->subh.data_hdr->stream;
1769 				ftsn_skip_arr[skip_pos].ssn =
1770 					 chunk->subh.data_hdr->ssn;
1771 				if (skip_pos == nskips)
1772 					nskips++;
1773 				if (nskips == 10)
1774 					break;
1775 			} else
1776 				break;
1777 		}
1778 	}
1779 
1780 	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1781 	 * is greater than the Cumulative TSN ACK carried in the received
1782 	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1783 	 * chunk containing the latest value of the
1784 	 * "Advanced.Peer.Ack.Point".
1785 	 *
1786 	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1787 	 * list each stream and sequence number in the forwarded TSN. This
1788 	 * information will enable the receiver to easily find any
1789 	 * stranded TSN's waiting on stream reorder queues. Each stream
1790 	 * SHOULD only be reported once; this means that if multiple
1791 	 * abandoned messages occur in the same stream then only the
1792 	 * highest abandoned stream sequence number is reported. If the
1793 	 * total size of the FORWARD TSN does NOT fit in a single MTU then
1794 	 * the sender of the FORWARD TSN SHOULD lower the
1795 	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1796 	 * single MTU.
1797 	 */
1798 	if (asoc->adv_peer_ack_point > ctsn)
1799 		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1800 					      nskips, &ftsn_skip_arr[0]);
1801 
1802 	if (ftsn_chunk) {
1803 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1804 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1805 	}
1806 }
1807