xref: /freebsd/sys/netinet/sctp_indata.c (revision cca48a59de682fe40c6ac3b2bb4356d0e42f21dd)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_queued_to_read *control,
417     struct sctp_tmit_chunk *chk,
418     int *abort_flag, int opspot)
419 {
420 	char msg[SCTP_DIAG_INFO_LEN];
421 	struct mbuf *oper;
422 
423 	if (stcb->asoc.idata_supported) {
424 		snprintf(msg, sizeof(msg),
425 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 		    opspot,
427 		    control->fsn_included,
428 		    chk->rec.data.TSN_seq,
429 		    chk->rec.data.stream_number,
430 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
431 	} else {
432 		snprintf(msg, sizeof(msg),
433 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 		    opspot,
435 		    control->fsn_included,
436 		    chk->rec.data.TSN_seq,
437 		    chk->rec.data.stream_number,
438 		    chk->rec.data.fsn_num,
439 		    (uint16_t) chk->rec.data.stream_seq);
440 	}
441 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 	sctp_m_freem(chk->data);
443 	chk->data = NULL;
444 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
447 	*abort_flag = 1;
448 }
449 
450 static void
451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 {
453 	/*
454 	 * The control could not be placed and must be cleaned.
455 	 */
456 	struct sctp_tmit_chunk *chk, *nchk;
457 
458 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 		if (chk->data)
461 			sctp_m_freem(chk->data);
462 		chk->data = NULL;
463 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	}
465 	sctp_free_a_readq(stcb, control);
466 }
467 
468 /*
469  * Queue the chunk either right into the socket buffer if it is the next one
470  * to go OR put it in the correct place in the delivery queue.  If we do
471  * append to the so_buf, keep doing so until we are out of order as
472  * long as the control's entered are non-fragmented.
473  */
474 static void
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476     struct sctp_stream_in *strm,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream useage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	char msg[SCTP_DIAG_INFO_LEN];
503 
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    control->sinfo_ssn, strm->last_sequence_delivered);
511 protocol_error:
512 		/*
513 		 * throw it in the stream so it gets cleaned up in
514 		 * association destruction
515 		 */
516 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 		    strm->last_sequence_delivered, control->sinfo_tsn,
519 		    control->sinfo_stream, control->sinfo_ssn);
520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
523 		*abort_flag = 1;
524 		return;
525 
526 	}
527 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
528 		goto protocol_error;
529 	}
530 	queue_needed = 1;
531 	asoc->size_on_all_streams += control->length;
532 	sctp_ucount_incr(asoc->cnt_on_all_streams);
533 	nxt_todel = strm->last_sequence_delivered + 1;
534 	if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 		struct socket *so;
537 
538 		so = SCTP_INP_SO(stcb->sctp_ep);
539 		atomic_add_int(&stcb->asoc.refcnt, 1);
540 		SCTP_TCB_UNLOCK(stcb);
541 		SCTP_SOCKET_LOCK(so, 1);
542 		SCTP_TCB_LOCK(stcb);
543 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 			SCTP_SOCKET_UNLOCK(so, 1);
546 			return;
547 		}
548 #endif
549 		/* can be delivered right away? */
550 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
552 		}
553 		/* EY it wont be queued if it could be delivered directly */
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
560 		    control,
561 		    &stcb->sctp_socket->so_rcv, 1,
562 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
563 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
564 			/* all delivered */
565 			nxt_todel = strm->last_sequence_delivered + 1;
566 			if ((nxt_todel == control->sinfo_ssn) &&
567 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				if (control->on_strm_q == SCTP_ON_ORDERED) {
571 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 #ifdef INVARIANTS
573 				} else {
574 					panic("Huh control: %p is on_strm_q: %d",
575 					    control, control->on_strm_q);
576 #endif
577 				}
578 				control->on_strm_q = 0;
579 				strm->last_sequence_delivered++;
580 				/*
581 				 * We ignore the return of deliver_data here
582 				 * since we always can hold the chunk on the
583 				 * d-queue. And we have a finite number that
584 				 * can be delivered from the strq.
585 				 */
586 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 					sctp_log_strm_del(control, NULL,
588 					    SCTP_STR_LOG_FROM_IMMED_DEL);
589 				}
590 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 				sctp_add_to_readq(stcb->sctp_ep, stcb,
592 				    control,
593 				    &stcb->sctp_socket->so_rcv, 1,
594 				    SCTP_READ_LOCK_NOT_HELD,
595 				    SCTP_SO_NOT_LOCKED);
596 				continue;
597 			} else if (nxt_todel == control->sinfo_ssn) {
598 				*need_reasm = 1;
599 			}
600 			break;
601 		}
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 		SCTP_SOCKET_UNLOCK(so, 1);
604 #endif
605 	}
606 	if (queue_needed) {
607 		/*
608 		 * Ok, we did not deliver this guy, find the correct place
609 		 * to put it on the queue.
610 		 */
611 		if (sctp_place_control_in_stream(strm, asoc, control)) {
612 			snprintf(msg, sizeof(msg),
613 			    "Queue to str msg_id: %u duplicate",
614 			    control->msg_id);
615 			clean_up_control(stcb, control);
616 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 			*abort_flag = 1;
620 		}
621 	}
622 }
623 
624 
625 static void
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
627 {
628 	struct mbuf *m, *prev = NULL;
629 	struct sctp_tcb *stcb;
630 
631 	stcb = control->stcb;
632 	control->held_length = 0;
633 	control->length = 0;
634 	m = control->data;
635 	while (m) {
636 		if (SCTP_BUF_LEN(m) == 0) {
637 			/* Skip mbufs with NO length */
638 			if (prev == NULL) {
639 				/* First one */
640 				control->data = sctp_m_free(m);
641 				m = control->data;
642 			} else {
643 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 				m = SCTP_BUF_NEXT(prev);
645 			}
646 			if (m == NULL) {
647 				control->tail_mbuf = prev;
648 			}
649 			continue;
650 		}
651 		prev = m;
652 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 		if (control->on_read_q) {
654 			/*
655 			 * On read queue so we must increment the SB stuff,
656 			 * we assume caller has done any locks of SB.
657 			 */
658 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
659 		}
660 		m = SCTP_BUF_NEXT(m);
661 	}
662 	if (prev) {
663 		control->tail_mbuf = prev;
664 	}
665 }
666 
667 static void
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
669 {
670 	struct mbuf *prev = NULL;
671 	struct sctp_tcb *stcb;
672 
673 	stcb = control->stcb;
674 	if (stcb == NULL) {
675 #ifdef INVARIANTS
676 		panic("Control broken");
677 #else
678 		return;
679 #endif
680 	}
681 	if (control->tail_mbuf == NULL) {
682 		/* TSNH */
683 		control->data = m;
684 		sctp_setup_tail_pointer(control);
685 		return;
686 	}
687 	control->tail_mbuf->m_next = m;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->tail_mbuf->m_next = sctp_m_free(m);
694 				m = control->tail_mbuf->m_next;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		if (control->on_read_q) {
706 			/*
707 			 * On read queue so we must increment the SB stuff,
708 			 * we assume caller has done any locks of SB.
709 			 */
710 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
711 		}
712 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
722 {
723 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 	nc->sinfo_stream = control->sinfo_stream;
725 	nc->sinfo_ssn = control->sinfo_ssn;
726 	TAILQ_INIT(&nc->reasm);
727 	nc->top_fsn = control->top_fsn;
728 	nc->msg_id = control->msg_id;
729 	nc->sinfo_flags = control->sinfo_flags;
730 	nc->sinfo_ppid = control->sinfo_ppid;
731 	nc->sinfo_context = control->sinfo_context;
732 	nc->fsn_included = 0xffffffff;
733 	nc->sinfo_tsn = control->sinfo_tsn;
734 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 	nc->whoFrom = control->whoFrom;
737 	atomic_add_int(&nc->whoFrom->ref_count, 1);
738 	nc->stcb = control->stcb;
739 	nc->port_from = control->port_from;
740 }
741 
742 static int
743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
744     struct sctp_queued_to_read *control, uint32_t pd_point)
745 {
746 	/*
747 	 * Special handling for the old un-ordered data chunk. All the
748 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
749 	 * watching to see if we have it all. If you return one, no other
750 	 * control entries on the un-ordered queue will be looked at. In
751 	 * theory there should be no others entries in reality, unless the
752 	 * guy is sending both unordered NDATA and unordered DATA...
753 	 */
754 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
755 	uint32_t fsn;
756 	struct sctp_queued_to_read *nc = NULL;
757 	int cnt_added;
758 
759 	if (control->first_frag_seen == 0) {
760 		/* Nothing we can do, we have not seen the first piece yet */
761 		return (1);
762 	}
763 	/* Collapse any we can */
764 	cnt_added = 0;
765 restart:
766 	fsn = control->fsn_included + 1;
767 	/* Now what can we add? */
768 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
769 		if (chk->rec.data.fsn_num == fsn) {
770 			/* Ok lets add it */
771 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
772 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
773 			fsn++;
774 			cnt_added++;
775 			chk = NULL;
776 			if (control->end_added) {
777 				/* We are done */
778 				if (!TAILQ_EMPTY(&control->reasm)) {
779 					/*
780 					 * Ok we have to move anything left
781 					 * on the control queue to a new
782 					 * control.
783 					 */
784 					sctp_alloc_a_readq(stcb, nc);
785 					sctp_build_readq_entry_from_ctl(nc, control);
786 					tchk = TAILQ_FIRST(&control->reasm);
787 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
788 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
789 						nc->first_frag_seen = 1;
790 						nc->fsn_included = tchk->rec.data.fsn_num;
791 						nc->data = tchk->data;
792 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
793 						tchk->data = NULL;
794 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
795 						sctp_setup_tail_pointer(nc);
796 						tchk = TAILQ_FIRST(&control->reasm);
797 					}
798 					/* Spin the rest onto the queue */
799 					while (tchk) {
800 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
801 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
802 						tchk = TAILQ_FIRST(&control->reasm);
803 					}
804 					/*
805 					 * Now lets add it to the queue
806 					 * after removing control
807 					 */
808 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
809 					nc->on_strm_q = SCTP_ON_UNORDERED;
810 					if (control->on_strm_q) {
811 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
812 						control->on_strm_q = 0;
813 					}
814 				}
815 				if (control->pdapi_started) {
816 					strm->pd_api_started = 0;
817 					control->pdapi_started = 0;
818 				}
819 				if (control->on_strm_q) {
820 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
821 					control->on_strm_q = 0;
822 				}
823 				if (control->on_read_q == 0) {
824 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
825 					    &stcb->sctp_socket->so_rcv, control->end_added,
826 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
827 				}
828 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
829 				if ((nc) && (nc->first_frag_seen)) {
830 					/*
831 					 * Switch to the new guy and
832 					 * continue
833 					 */
834 					control = nc;
835 					nc = NULL;
836 					goto restart;
837 				}
838 				return (1);
839 			}
840 		} else {
841 			/* Can't add more */
842 			break;
843 		}
844 	}
845 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
846 		strm->pd_api_started = 1;
847 		control->pdapi_started = 1;
848 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
849 		    &stcb->sctp_socket->so_rcv, control->end_added,
850 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
851 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
852 		return (0);
853 	} else {
854 		return (1);
855 	}
856 }
857 
858 static void
859 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
860     struct sctp_queued_to_read *control,
861     struct sctp_tmit_chunk *chk,
862     int *abort_flag)
863 {
864 	struct sctp_tmit_chunk *at;
865 	int inserted = 0;
866 
867 	/*
868 	 * Here we need to place the chunk into the control structure sorted
869 	 * in the correct order.
870 	 */
871 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
872 		/* Its the very first one. */
873 		SCTPDBG(SCTP_DEBUG_XXX,
874 		    "chunk is a first fsn: %u becomes fsn_included\n",
875 		    chk->rec.data.fsn_num);
876 		if (control->first_frag_seen) {
877 			/*
878 			 * In old un-ordered we can reassembly on one
879 			 * control multiple messages. As long as the next
880 			 * FIRST is greater then the old first (TSN i.e. FSN
881 			 * wise)
882 			 */
883 			struct mbuf *tdata;
884 			uint32_t tmp;
885 
886 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
887 				/*
888 				 * Easy way the start of a new guy beyond
889 				 * the lowest
890 				 */
891 				goto place_chunk;
892 			}
893 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
894 			    (control->pdapi_started)) {
895 				/*
896 				 * Ok this should not happen, if it does we
897 				 * started the pd-api on the higher TSN
898 				 * (since the equals part is a TSN failure
899 				 * it must be that).
900 				 *
901 				 * We are completly hosed in that case since I
902 				 * have no way to recover. This really will
903 				 * only happen if we can get more TSN's
904 				 * higher before the pd-api-point.
905 				 */
906 				sctp_abort_in_reasm(stcb, control, chk,
907 				    abort_flag,
908 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
909 
910 				return;
911 			}
912 			/*
913 			 * Ok we have two firsts and the one we just got is
914 			 * smaller than the one we previously placed.. yuck!
915 			 * We must swap them out.
916 			 */
917 			/* swap the mbufs */
918 			tdata = control->data;
919 			control->data = chk->data;
920 			chk->data = tdata;
921 			/* Swap the lengths */
922 			tmp = control->length;
923 			control->length = chk->send_size;
924 			chk->send_size = tmp;
925 			/* Fix the FSN included */
926 			tmp = control->fsn_included;
927 			control->fsn_included = chk->rec.data.fsn_num;
928 			chk->rec.data.fsn_num = tmp;
929 			goto place_chunk;
930 		}
931 		control->first_frag_seen = 1;
932 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
933 		control->data = chk->data;
934 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
935 		chk->data = NULL;
936 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
937 		sctp_setup_tail_pointer(control);
938 		return;
939 	}
940 place_chunk:
941 	if (TAILQ_EMPTY(&control->reasm)) {
942 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
943 		asoc->size_on_reasm_queue += chk->send_size;
944 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
945 		return;
946 	}
947 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
948 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
949 			/*
950 			 * This one in queue is bigger than the new one,
951 			 * insert the new one before at.
952 			 */
953 			asoc->size_on_reasm_queue += chk->send_size;
954 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 			inserted = 1;
956 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 			break;
958 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
959 			/*
960 			 * They sent a duplicate fsn number. This really
961 			 * should not happen since the FSN is a TSN and it
962 			 * should have been dropped earlier.
963 			 */
964 			if (chk->data) {
965 				sctp_m_freem(chk->data);
966 				chk->data = NULL;
967 			}
968 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
969 			sctp_abort_in_reasm(stcb, control, chk,
970 			    abort_flag,
971 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
972 			return;
973 		}
974 	}
975 	if (inserted == 0) {
976 		/* Its at the end */
977 		asoc->size_on_reasm_queue += chk->send_size;
978 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
979 		control->top_fsn = chk->rec.data.fsn_num;
980 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
981 	}
982 }
983 
984 static int
985 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
986 {
987 	/*
988 	 * Given a stream, strm, see if any of the SSN's on it that are
989 	 * fragmented are ready to deliver. If so go ahead and place them on
990 	 * the read queue. In so placing if we have hit the end, then we
991 	 * need to remove them from the stream's queue.
992 	 */
993 	struct sctp_queued_to_read *control, *nctl = NULL;
994 	uint32_t next_to_del;
995 	uint32_t pd_point;
996 	int ret = 0;
997 
998 	if (stcb->sctp_socket) {
999 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1000 		    stcb->sctp_ep->partial_delivery_point);
1001 	} else {
1002 		pd_point = stcb->sctp_ep->partial_delivery_point;
1003 	}
1004 	control = TAILQ_FIRST(&strm->uno_inqueue);
1005 	if ((control) &&
1006 	    (asoc->idata_supported == 0)) {
1007 		/* Special handling needed for "old" data format */
1008 		if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1009 			goto done_un;
1010 		}
1011 	}
1012 	if (strm->pd_api_started) {
1013 		/* Can't add more */
1014 		return (0);
1015 	}
1016 	while (control) {
1017 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1018 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1019 		nctl = TAILQ_NEXT(control, next_instrm);
1020 		if (control->end_added) {
1021 			/* We just put the last bit on */
1022 			if (control->on_strm_q) {
1023 #ifdef INVARIANTS
1024 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1025 					panic("Huh control: %p on_q: %d -- not unordered?",
1026 					    control, control->on_strm_q);
1027 				}
1028 #endif
1029 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1030 				control->on_strm_q = 0;
1031 			}
1032 			if (control->on_read_q == 0) {
1033 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1034 				    control,
1035 				    &stcb->sctp_socket->so_rcv, control->end_added,
1036 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1037 			}
1038 		} else {
1039 			/* Can we do a PD-API for this un-ordered guy? */
1040 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1041 				strm->pd_api_started = 1;
1042 				control->pdapi_started = 1;
1043 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1044 				    control,
1045 				    &stcb->sctp_socket->so_rcv, control->end_added,
1046 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1047 
1048 				break;
1049 			}
1050 		}
1051 		control = nctl;
1052 	}
1053 done_un:
1054 	control = TAILQ_FIRST(&strm->inqueue);
1055 	if (strm->pd_api_started) {
1056 		/* Can't add more */
1057 		return (0);
1058 	}
1059 	if (control == NULL) {
1060 		return (ret);
1061 	}
1062 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1063 		/*
1064 		 * Ok the guy at the top was being partially delivered
1065 		 * completed, so we remove it. Note the pd_api flag was
1066 		 * taken off when the chunk was merged on in
1067 		 * sctp_queue_data_for_reasm below.
1068 		 */
1069 		nctl = TAILQ_NEXT(control, next_instrm);
1070 		SCTPDBG(SCTP_DEBUG_XXX,
1071 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1072 		    control, control->end_added, control->sinfo_ssn,
1073 		    control->top_fsn, control->fsn_included,
1074 		    strm->last_sequence_delivered);
1075 		if (control->end_added) {
1076 			if (control->on_strm_q) {
1077 #ifdef INVARIANTS
1078 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1079 					panic("Huh control: %p on_q: %d -- not ordered?",
1080 					    control, control->on_strm_q);
1081 				}
1082 #endif
1083 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1084 				control->on_strm_q = 0;
1085 			}
1086 			if (strm->pd_api_started && control->pdapi_started) {
1087 				control->pdapi_started = 0;
1088 				strm->pd_api_started = 0;
1089 			}
1090 			if (control->on_read_q == 0) {
1091 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1092 				    control,
1093 				    &stcb->sctp_socket->so_rcv, control->end_added,
1094 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1095 			}
1096 			control = nctl;
1097 		}
1098 	}
1099 	if (strm->pd_api_started) {
1100 		/*
1101 		 * Can't add more must have gotten an un-ordered above being
1102 		 * partially delivered.
1103 		 */
1104 		return (0);
1105 	}
1106 deliver_more:
1107 	next_to_del = strm->last_sequence_delivered + 1;
1108 	if (control) {
1109 		SCTPDBG(SCTP_DEBUG_XXX,
1110 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1111 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1112 		    next_to_del);
1113 		nctl = TAILQ_NEXT(control, next_instrm);
1114 		if ((control->sinfo_ssn == next_to_del) &&
1115 		    (control->first_frag_seen)) {
1116 			int done;
1117 
1118 			/* Ok we can deliver it onto the stream. */
1119 			if (control->end_added) {
1120 				/* We are done with it afterwards */
1121 				if (control->on_strm_q) {
1122 #ifdef INVARIANTS
1123 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1124 						panic("Huh control: %p on_q: %d -- not ordered?",
1125 						    control, control->on_strm_q);
1126 					}
1127 #endif
1128 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1129 					control->on_strm_q = 0;
1130 				}
1131 				ret++;
1132 			}
1133 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1134 				/*
1135 				 * A singleton now slipping through - mark
1136 				 * it non-revokable too
1137 				 */
1138 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1139 			} else if (control->end_added == 0) {
1140 				/*
1141 				 * Check if we can defer adding until its
1142 				 * all there
1143 				 */
1144 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1145 					/*
1146 					 * Don't need it or cannot add more
1147 					 * (one being delivered that way)
1148 					 */
1149 					goto out;
1150 				}
1151 			}
1152 			done = (control->end_added) && (control->last_frag_seen);
1153 			if (control->on_read_q == 0) {
1154 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1155 				    control,
1156 				    &stcb->sctp_socket->so_rcv, control->end_added,
1157 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1158 			}
1159 			strm->last_sequence_delivered = next_to_del;
1160 			if (done) {
1161 				control = nctl;
1162 				goto deliver_more;
1163 			} else {
1164 				/* We are now doing PD API */
1165 				strm->pd_api_started = 1;
1166 				control->pdapi_started = 1;
1167 			}
1168 		}
1169 	}
1170 out:
1171 	return (ret);
1172 }
1173 
1174 void
1175 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1176     struct sctp_stream_in *strm,
1177     struct sctp_tcb *stcb, struct sctp_association *asoc,
1178     struct sctp_tmit_chunk *chk)
1179 {
1180 	/*
1181 	 * Given a control and a chunk, merge the data from the chk onto the
1182 	 * control and free up the chunk resources.
1183 	 */
1184 	int i_locked = 0;
1185 
1186 	if (control->on_read_q) {
1187 		/*
1188 		 * Its being pd-api'd so we must do some locks.
1189 		 */
1190 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1191 		i_locked = 1;
1192 	}
1193 	if (control->data == NULL) {
1194 		control->data = chk->data;
1195 		sctp_setup_tail_pointer(control);
1196 	} else {
1197 		sctp_add_to_tail_pointer(control, chk->data);
1198 	}
1199 	control->fsn_included = chk->rec.data.fsn_num;
1200 	asoc->size_on_reasm_queue -= chk->send_size;
1201 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1202 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1203 	chk->data = NULL;
1204 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1205 		control->first_frag_seen = 1;
1206 	}
1207 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1208 		/* Its complete */
1209 		if ((control->on_strm_q) && (control->on_read_q)) {
1210 			if (control->pdapi_started) {
1211 				control->pdapi_started = 0;
1212 				strm->pd_api_started = 0;
1213 			}
1214 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1215 				/* Unordered */
1216 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1217 				control->on_strm_q = 0;
1218 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1219 				/* Ordered */
1220 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 				control->on_strm_q = 0;
1222 #ifdef INVARIANTS
1223 			} else if (control->on_strm_q) {
1224 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1225 				    control->on_strm_q);
1226 #endif
1227 			}
1228 		}
1229 		control->end_added = 1;
1230 		control->last_frag_seen = 1;
1231 	}
1232 	if (i_locked) {
1233 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1234 	}
1235 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1236 }
1237 
1238 /*
1239  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1240  * queue, see if anthing can be delivered. If so pull it off (or as much as
1241  * we can. If we run out of space then we must dump what we can and set the
1242  * appropriate flag to say we queued what we could.
1243  */
1244 static void
1245 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1246     struct sctp_stream_in *strm,
1247     struct sctp_queued_to_read *control,
1248     struct sctp_tmit_chunk *chk,
1249     int created_control,
1250     int *abort_flag, uint32_t tsn)
1251 {
1252 	uint32_t next_fsn;
1253 	struct sctp_tmit_chunk *at, *nat;
1254 	int do_wakeup, unordered;
1255 
1256 	/*
1257 	 * For old un-ordered data chunks.
1258 	 */
1259 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1260 		unordered = 1;
1261 	} else {
1262 		unordered = 0;
1263 	}
1264 	/* Must be added to the stream-in queue */
1265 	if (created_control) {
1266 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1267 			/* Duplicate SSN? */
1268 			clean_up_control(stcb, control);
1269 			sctp_abort_in_reasm(stcb, control, chk,
1270 			    abort_flag,
1271 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1272 			return;
1273 		}
1274 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1275 			/*
1276 			 * Ok we created this control and now lets validate
1277 			 * that its legal i.e. there is a B bit set, if not
1278 			 * and we have up to the cum-ack then its invalid.
1279 			 */
1280 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1281 				sctp_abort_in_reasm(stcb, control, chk,
1282 				    abort_flag,
1283 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1284 				return;
1285 			}
1286 		}
1287 	}
1288 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1289 		sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag);
1290 		return;
1291 	}
1292 	/*
1293 	 * Ok we must queue the chunk into the reasembly portion: o if its
1294 	 * the first it goes to the control mbuf. o if its not first but the
1295 	 * next in sequence it goes to the control, and each succeeding one
1296 	 * in order also goes. o if its not in order we place it on the list
1297 	 * in its place.
1298 	 */
1299 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1300 		/* Its the very first one. */
1301 		SCTPDBG(SCTP_DEBUG_XXX,
1302 		    "chunk is a first fsn: %u becomes fsn_included\n",
1303 		    chk->rec.data.fsn_num);
1304 		if (control->first_frag_seen) {
1305 			/*
1306 			 * Error on senders part, they either sent us two
1307 			 * data chunks with FIRST, or they sent two
1308 			 * un-ordered chunks that were fragmented at the
1309 			 * same time in the same stream.
1310 			 */
1311 			sctp_abort_in_reasm(stcb, control, chk,
1312 			    abort_flag,
1313 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1314 			return;
1315 		}
1316 		control->first_frag_seen = 1;
1317 		control->fsn_included = chk->rec.data.fsn_num;
1318 		control->data = chk->data;
1319 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1320 		chk->data = NULL;
1321 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1322 		sctp_setup_tail_pointer(control);
1323 	} else {
1324 		/* Place the chunk in our list */
1325 		int inserted = 0;
1326 
1327 		if (control->last_frag_seen == 0) {
1328 			/* Still willing to raise highest FSN seen */
1329 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1330 				SCTPDBG(SCTP_DEBUG_XXX,
1331 				    "We have a new top_fsn: %u\n",
1332 				    chk->rec.data.fsn_num);
1333 				control->top_fsn = chk->rec.data.fsn_num;
1334 			}
1335 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1336 				SCTPDBG(SCTP_DEBUG_XXX,
1337 				    "The last fsn is now in place fsn: %u\n",
1338 				    chk->rec.data.fsn_num);
1339 				control->last_frag_seen = 1;
1340 			}
1341 			if (asoc->idata_supported || control->first_frag_seen) {
1342 				/*
1343 				 * For IDATA we always check since we know
1344 				 * that the first fragment is 0. For old
1345 				 * DATA we have to receive the first before
1346 				 * we knwo the first FSN (which is the TSN).
1347 				 */
1348 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1349 					/*
1350 					 * We have already delivered up to
1351 					 * this so its a dup
1352 					 */
1353 					sctp_abort_in_reasm(stcb, control, chk,
1354 					    abort_flag,
1355 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1356 					return;
1357 				}
1358 			}
1359 		} else {
1360 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1361 				/* Second last? huh? */
1362 				SCTPDBG(SCTP_DEBUG_XXX,
1363 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1364 				    chk->rec.data.fsn_num, control->top_fsn);
1365 				sctp_abort_in_reasm(stcb, control,
1366 				    chk, abort_flag,
1367 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1368 				return;
1369 			}
1370 			if (asoc->idata_supported || control->first_frag_seen) {
1371 				/*
1372 				 * For IDATA we always check since we know
1373 				 * that the first fragment is 0. For old
1374 				 * DATA we have to receive the first before
1375 				 * we knwo the first FSN (which is the TSN).
1376 				 */
1377 
1378 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1379 					/*
1380 					 * We have already delivered up to
1381 					 * this so its a dup
1382 					 */
1383 					SCTPDBG(SCTP_DEBUG_XXX,
1384 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1385 					    chk->rec.data.fsn_num, control->fsn_included);
1386 					sctp_abort_in_reasm(stcb, control, chk,
1387 					    abort_flag,
1388 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1389 					return;
1390 				}
1391 			}
1392 			/*
1393 			 * validate not beyond top FSN if we have seen last
1394 			 * one
1395 			 */
1396 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1397 				SCTPDBG(SCTP_DEBUG_XXX,
1398 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1399 				    chk->rec.data.fsn_num,
1400 				    control->top_fsn);
1401 				sctp_abort_in_reasm(stcb, control, chk,
1402 				    abort_flag,
1403 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1404 				return;
1405 			}
1406 		}
1407 		/*
1408 		 * If we reach here, we need to place the new chunk in the
1409 		 * reassembly for this control.
1410 		 */
1411 		SCTPDBG(SCTP_DEBUG_XXX,
1412 		    "chunk is a not first fsn: %u needs to be inserted\n",
1413 		    chk->rec.data.fsn_num);
1414 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1415 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1416 				/*
1417 				 * This one in queue is bigger than the new
1418 				 * one, insert the new one before at.
1419 				 */
1420 				SCTPDBG(SCTP_DEBUG_XXX,
1421 				    "Insert it before fsn: %u\n",
1422 				    at->rec.data.fsn_num);
1423 				asoc->size_on_reasm_queue += chk->send_size;
1424 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1425 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1426 				inserted = 1;
1427 				break;
1428 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1429 				/*
1430 				 * Gak, He sent me a duplicate str seq
1431 				 * number
1432 				 */
1433 				/*
1434 				 * foo bar, I guess I will just free this
1435 				 * new guy, should we abort too? FIX ME
1436 				 * MAYBE? Or it COULD be that the SSN's have
1437 				 * wrapped. Maybe I should compare to TSN
1438 				 * somehow... sigh for now just blow away
1439 				 * the chunk!
1440 				 */
1441 				SCTPDBG(SCTP_DEBUG_XXX,
1442 				    "Duplicate to fsn: %u -- abort\n",
1443 				    at->rec.data.fsn_num);
1444 				sctp_abort_in_reasm(stcb, control,
1445 				    chk, abort_flag,
1446 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1447 				return;
1448 			}
1449 		}
1450 		if (inserted == 0) {
1451 			/* Goes on the end */
1452 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1453 			    chk->rec.data.fsn_num);
1454 			asoc->size_on_reasm_queue += chk->send_size;
1455 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1456 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1457 		}
1458 	}
1459 	/*
1460 	 * Ok lets see if we can suck any up into the control structure that
1461 	 * are in seq if it makes sense.
1462 	 */
1463 	do_wakeup = 0;
1464 	/*
1465 	 * If the first fragment has not been seen there is no sense in
1466 	 * looking.
1467 	 */
1468 	if (control->first_frag_seen) {
1469 		next_fsn = control->fsn_included + 1;
1470 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1471 			if (at->rec.data.fsn_num == next_fsn) {
1472 				/* We can add this one now to the control */
1473 				SCTPDBG(SCTP_DEBUG_XXX,
1474 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1475 				    control, at,
1476 				    at->rec.data.fsn_num,
1477 				    next_fsn, control->fsn_included);
1478 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1479 				sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1480 				if (control->on_read_q) {
1481 					do_wakeup = 1;
1482 				}
1483 				next_fsn++;
1484 				if (control->end_added && control->pdapi_started) {
1485 					if (strm->pd_api_started) {
1486 						strm->pd_api_started = 0;
1487 						control->pdapi_started = 0;
1488 					}
1489 					if (control->on_read_q == 0) {
1490 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1491 						    control,
1492 						    &stcb->sctp_socket->so_rcv, control->end_added,
1493 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1494 						do_wakeup = 1;
1495 					}
1496 					break;
1497 				}
1498 			} else {
1499 				break;
1500 			}
1501 		}
1502 	}
1503 	if (do_wakeup) {
1504 		/* Need to wakeup the reader */
1505 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1506 	}
1507 }
1508 
1509 static struct sctp_queued_to_read *
1510 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1511 {
1512 	struct sctp_queued_to_read *control;
1513 
1514 	if (ordered) {
1515 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1516 			if (control->msg_id == msg_id) {
1517 				break;
1518 			}
1519 		}
1520 	} else {
1521 		if (old) {
1522 			control = TAILQ_FIRST(&strm->uno_inqueue);
1523 			return (control);
1524 		}
1525 		TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1526 			if (control->msg_id == msg_id) {
1527 				break;
1528 			}
1529 		}
1530 	}
1531 	return (control);
1532 }
1533 
1534 static int
1535 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1536     struct mbuf **m, int offset, int chk_length,
1537     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1538     int *break_flag, int last_chunk, uint8_t chtype)
1539 {
1540 	/* Process a data chunk */
1541 	/* struct sctp_tmit_chunk *chk; */
1542 	struct sctp_data_chunk *ch;
1543 	struct sctp_idata_chunk *nch, chunk_buf;
1544 	struct sctp_tmit_chunk *chk;
1545 	uint32_t tsn, fsn, gap, msg_id;
1546 	struct mbuf *dmbuf;
1547 	int the_len;
1548 	int need_reasm_check = 0;
1549 	uint16_t strmno;
1550 	struct mbuf *op_err;
1551 	char msg[SCTP_DIAG_INFO_LEN];
1552 	struct sctp_queued_to_read *control = NULL;
1553 	uint32_t protocol_id;
1554 	uint8_t chunk_flags;
1555 	struct sctp_stream_reset_list *liste;
1556 	struct sctp_stream_in *strm;
1557 	int ordered;
1558 	size_t clen;
1559 	int created_control = 0;
1560 	uint8_t old_data;
1561 
1562 	chk = NULL;
1563 	if (chtype == SCTP_IDATA) {
1564 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1565 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1566 		ch = (struct sctp_data_chunk *)nch;
1567 		clen = sizeof(struct sctp_idata_chunk);
1568 		tsn = ntohl(ch->dp.tsn);
1569 		msg_id = ntohl(nch->dp.msg_id);
1570 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1571 			fsn = 0;
1572 		else
1573 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1574 		old_data = 0;
1575 	} else {
1576 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1577 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1578 		tsn = ntohl(ch->dp.tsn);
1579 		clen = sizeof(struct sctp_data_chunk);
1580 		fsn = tsn;
1581 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1582 		nch = NULL;
1583 		old_data = 1;
1584 	}
1585 	chunk_flags = ch->ch.chunk_flags;
1586 	if ((size_t)chk_length == clen) {
1587 		/*
1588 		 * Need to send an abort since we had a empty data chunk.
1589 		 */
1590 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1591 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1592 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1593 		*abort_flag = 1;
1594 		return (0);
1595 	}
1596 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1597 		asoc->send_sack = 1;
1598 	}
1599 	protocol_id = ch->dp.protocol_id;
1600 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1601 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1602 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1603 	}
1604 	if (stcb == NULL) {
1605 		return (0);
1606 	}
1607 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1608 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1609 		/* It is a duplicate */
1610 		SCTP_STAT_INCR(sctps_recvdupdata);
1611 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1612 			/* Record a dup for the next outbound sack */
1613 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1614 			asoc->numduptsns++;
1615 		}
1616 		asoc->send_sack = 1;
1617 		return (0);
1618 	}
1619 	/* Calculate the number of TSN's between the base and this TSN */
1620 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1621 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1622 		/* Can't hold the bit in the mapping at max array, toss it */
1623 		return (0);
1624 	}
1625 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1626 		SCTP_TCB_LOCK_ASSERT(stcb);
1627 		if (sctp_expand_mapping_array(asoc, gap)) {
1628 			/* Can't expand, drop it */
1629 			return (0);
1630 		}
1631 	}
1632 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1633 		*high_tsn = tsn;
1634 	}
1635 	/* See if we have received this one already */
1636 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1637 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1638 		SCTP_STAT_INCR(sctps_recvdupdata);
1639 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1640 			/* Record a dup for the next outbound sack */
1641 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1642 			asoc->numduptsns++;
1643 		}
1644 		asoc->send_sack = 1;
1645 		return (0);
1646 	}
1647 	/*
1648 	 * Check to see about the GONE flag, duplicates would cause a sack
1649 	 * to be sent up above
1650 	 */
1651 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1652 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1653 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1654 		/*
1655 		 * wait a minute, this guy is gone, there is no longer a
1656 		 * receiver. Send peer an ABORT!
1657 		 */
1658 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1659 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1660 		*abort_flag = 1;
1661 		return (0);
1662 	}
1663 	/*
1664 	 * Now before going further we see if there is room. If NOT then we
1665 	 * MAY let one through only IF this TSN is the one we are waiting
1666 	 * for on a partial delivery API.
1667 	 */
1668 
1669 	/* Is the stream valid? */
1670 	strmno = ntohs(ch->dp.stream_id);
1671 
1672 	if (strmno >= asoc->streamincnt) {
1673 		struct sctp_error_invalid_stream *cause;
1674 
1675 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1676 		    0, M_NOWAIT, 1, MT_DATA);
1677 		if (op_err != NULL) {
1678 			/* add some space up front so prepend will work well */
1679 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1680 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1681 			/*
1682 			 * Error causes are just param's and this one has
1683 			 * two back to back phdr, one with the error type
1684 			 * and size, the other with the streamid and a rsvd
1685 			 */
1686 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1687 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1688 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1689 			cause->stream_id = ch->dp.stream_id;
1690 			cause->reserved = htons(0);
1691 			sctp_queue_op_err(stcb, op_err);
1692 		}
1693 		SCTP_STAT_INCR(sctps_badsid);
1694 		SCTP_TCB_LOCK_ASSERT(stcb);
1695 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1696 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1697 			asoc->highest_tsn_inside_nr_map = tsn;
1698 		}
1699 		if (tsn == (asoc->cumulative_tsn + 1)) {
1700 			/* Update cum-ack */
1701 			asoc->cumulative_tsn = tsn;
1702 		}
1703 		return (0);
1704 	}
1705 	strm = &asoc->strmin[strmno];
1706 	/*
1707 	 * If its a fragmented message, lets see if we can find the control
1708 	 * on the reassembly queues.
1709 	 */
1710 	if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1711 		/*
1712 		 * The first *must* be fsn 0, and other (middle/end) pieces
1713 		 * can *not* be fsn 0.
1714 		 */
1715 		goto err_out;
1716 	}
1717 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1718 		/* See if we can find the re-assembly entity */
1719 		control = find_reasm_entry(strm, msg_id, ordered, old_data);
1720 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1721 		    chunk_flags, control);
1722 		if (control) {
1723 			/* We found something, does it belong? */
1724 			if (ordered && (msg_id != control->sinfo_ssn)) {
1725 		err_out:
1726 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1727 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1728 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1729 				*abort_flag = 1;
1730 				return (0);
1731 			}
1732 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1733 				/*
1734 				 * We can't have a switched order with an
1735 				 * unordered chunk
1736 				 */
1737 				goto err_out;
1738 			}
1739 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1740 				/*
1741 				 * We can't have a switched unordered with a
1742 				 * ordered chunk
1743 				 */
1744 				goto err_out;
1745 			}
1746 		}
1747 	} else {
1748 		/*
1749 		 * Its a complete segment. Lets validate we don't have a
1750 		 * re-assembly going on with the same Stream/Seq (for
1751 		 * ordered) or in the same Stream for unordered.
1752 		 */
1753 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1754 		    chunk_flags);
1755 		if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1756 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1757 			    chunk_flags,
1758 			    msg_id);
1759 
1760 			goto err_out;
1761 		}
1762 	}
1763 	/* now do the tests */
1764 	if (((asoc->cnt_on_all_streams +
1765 	    asoc->cnt_on_reasm_queue +
1766 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1767 	    (((int)asoc->my_rwnd) <= 0)) {
1768 		/*
1769 		 * When we have NO room in the rwnd we check to make sure
1770 		 * the reader is doing its job...
1771 		 */
1772 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1773 			/* some to read, wake-up */
1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1775 			struct socket *so;
1776 
1777 			so = SCTP_INP_SO(stcb->sctp_ep);
1778 			atomic_add_int(&stcb->asoc.refcnt, 1);
1779 			SCTP_TCB_UNLOCK(stcb);
1780 			SCTP_SOCKET_LOCK(so, 1);
1781 			SCTP_TCB_LOCK(stcb);
1782 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1783 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1784 				/* assoc was freed while we were unlocked */
1785 				SCTP_SOCKET_UNLOCK(so, 1);
1786 				return (0);
1787 			}
1788 #endif
1789 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1791 			SCTP_SOCKET_UNLOCK(so, 1);
1792 #endif
1793 		}
1794 		/* now is it in the mapping array of what we have accepted? */
1795 		if (nch == NULL) {
1796 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1797 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1798 				/* Nope not in the valid range dump it */
1799 		dump_packet:
1800 				sctp_set_rwnd(stcb, asoc);
1801 				if ((asoc->cnt_on_all_streams +
1802 				    asoc->cnt_on_reasm_queue +
1803 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1804 					SCTP_STAT_INCR(sctps_datadropchklmt);
1805 				} else {
1806 					SCTP_STAT_INCR(sctps_datadroprwnd);
1807 				}
1808 				*break_flag = 1;
1809 				return (0);
1810 			}
1811 		} else {
1812 			if (control == NULL) {
1813 				goto dump_packet;
1814 			}
1815 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1816 				goto dump_packet;
1817 			}
1818 		}
1819 	}
1820 #ifdef SCTP_ASOCLOG_OF_TSNS
1821 	SCTP_TCB_LOCK_ASSERT(stcb);
1822 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1823 		asoc->tsn_in_at = 0;
1824 		asoc->tsn_in_wrapped = 1;
1825 	}
1826 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1827 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1828 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1829 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1830 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1831 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1832 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1833 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1834 	asoc->tsn_in_at++;
1835 #endif
1836 	/*
1837 	 * Before we continue lets validate that we are not being fooled by
1838 	 * an evil attacker. We can only have Nk chunks based on our TSN
1839 	 * spread allowed by the mapping array N * 8 bits, so there is no
1840 	 * way our stream sequence numbers could have wrapped. We of course
1841 	 * only validate the FIRST fragment so the bit must be set.
1842 	 */
1843 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1844 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1845 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1846 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1847 		/* The incoming sseq is behind where we last delivered? */
1848 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1849 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1850 
1851 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1852 		    asoc->strmin[strmno].last_sequence_delivered,
1853 		    tsn, strmno, msg_id);
1854 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1855 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1856 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1857 		*abort_flag = 1;
1858 		return (0);
1859 	}
1860 	/************************************
1861 	 * From here down we may find ch-> invalid
1862 	 * so its a good idea NOT to use it.
1863 	 *************************************/
1864 	if (nch) {
1865 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1866 	} else {
1867 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1868 	}
1869 	if (last_chunk == 0) {
1870 		if (nch) {
1871 			dmbuf = SCTP_M_COPYM(*m,
1872 			    (offset + sizeof(struct sctp_idata_chunk)),
1873 			    the_len, M_NOWAIT);
1874 		} else {
1875 			dmbuf = SCTP_M_COPYM(*m,
1876 			    (offset + sizeof(struct sctp_data_chunk)),
1877 			    the_len, M_NOWAIT);
1878 		}
1879 #ifdef SCTP_MBUF_LOGGING
1880 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1881 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1882 		}
1883 #endif
1884 	} else {
1885 		/* We can steal the last chunk */
1886 		int l_len;
1887 
1888 		dmbuf = *m;
1889 		/* lop off the top part */
1890 		if (nch) {
1891 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1892 		} else {
1893 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1894 		}
1895 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1896 			l_len = SCTP_BUF_LEN(dmbuf);
1897 		} else {
1898 			/*
1899 			 * need to count up the size hopefully does not hit
1900 			 * this to often :-0
1901 			 */
1902 			struct mbuf *lat;
1903 
1904 			l_len = 0;
1905 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1906 				l_len += SCTP_BUF_LEN(lat);
1907 			}
1908 		}
1909 		if (l_len > the_len) {
1910 			/* Trim the end round bytes off  too */
1911 			m_adj(dmbuf, -(l_len - the_len));
1912 		}
1913 	}
1914 	if (dmbuf == NULL) {
1915 		SCTP_STAT_INCR(sctps_nomem);
1916 		return (0);
1917 	}
1918 	/*
1919 	 * Now no matter what we need a control, get one if we don't have
1920 	 * one (we may have gotten it above when we found the message was
1921 	 * fragmented
1922 	 */
1923 	if (control == NULL) {
1924 		sctp_alloc_a_readq(stcb, control);
1925 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1926 		    protocol_id,
1927 		    strmno, msg_id,
1928 		    chunk_flags,
1929 		    NULL, fsn, msg_id);
1930 		if (control == NULL) {
1931 			SCTP_STAT_INCR(sctps_nomem);
1932 			return (0);
1933 		}
1934 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1935 			control->data = dmbuf;
1936 			control->tail_mbuf = NULL;
1937 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1938 			control->top_fsn = control->fsn_included = fsn;
1939 		}
1940 		created_control = 1;
1941 	}
1942 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1943 	    chunk_flags, ordered, msg_id, control);
1944 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1945 	    TAILQ_EMPTY(&asoc->resetHead) &&
1946 	    ((ordered == 0) ||
1947 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1948 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1949 		/* Candidate for express delivery */
1950 		/*
1951 		 * Its not fragmented, No PD-API is up, Nothing in the
1952 		 * delivery queue, Its un-ordered OR ordered and the next to
1953 		 * deliver AND nothing else is stuck on the stream queue,
1954 		 * And there is room for it in the socket buffer. Lets just
1955 		 * stuff it up the buffer....
1956 		 */
1957 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1958 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1959 			asoc->highest_tsn_inside_nr_map = tsn;
1960 		}
1961 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1962 		    control, msg_id);
1963 
1964 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1965 		    control, &stcb->sctp_socket->so_rcv,
1966 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1967 
1968 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1969 			/* for ordered, bump what we delivered */
1970 			strm->last_sequence_delivered++;
1971 		}
1972 		SCTP_STAT_INCR(sctps_recvexpress);
1973 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1974 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1975 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1976 		}
1977 		control = NULL;
1978 		goto finish_express_del;
1979 	}
1980 	/* Now will we need a chunk too? */
1981 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1982 		sctp_alloc_a_chunk(stcb, chk);
1983 		if (chk == NULL) {
1984 			/* No memory so we drop the chunk */
1985 			SCTP_STAT_INCR(sctps_nomem);
1986 			if (last_chunk == 0) {
1987 				/* we copied it, free the copy */
1988 				sctp_m_freem(dmbuf);
1989 			}
1990 			return (0);
1991 		}
1992 		chk->rec.data.TSN_seq = tsn;
1993 		chk->no_fr_allowed = 0;
1994 		chk->rec.data.fsn_num = fsn;
1995 		chk->rec.data.stream_seq = msg_id;
1996 		chk->rec.data.stream_number = strmno;
1997 		chk->rec.data.payloadtype = protocol_id;
1998 		chk->rec.data.context = stcb->asoc.context;
1999 		chk->rec.data.doing_fast_retransmit = 0;
2000 		chk->rec.data.rcv_flags = chunk_flags;
2001 		chk->asoc = asoc;
2002 		chk->send_size = the_len;
2003 		chk->whoTo = net;
2004 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2005 		    chk,
2006 		    control, msg_id);
2007 		atomic_add_int(&net->ref_count, 1);
2008 		chk->data = dmbuf;
2009 	}
2010 	/* Set the appropriate TSN mark */
2011 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2012 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2013 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2014 			asoc->highest_tsn_inside_nr_map = tsn;
2015 		}
2016 	} else {
2017 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2018 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2019 			asoc->highest_tsn_inside_map = tsn;
2020 		}
2021 	}
2022 	/* Now is it complete (i.e. not fragmented)? */
2023 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2024 		/*
2025 		 * Special check for when streams are resetting. We could be
2026 		 * more smart about this and check the actual stream to see
2027 		 * if it is not being reset.. that way we would not create a
2028 		 * HOLB when amongst streams being reset and those not being
2029 		 * reset.
2030 		 *
2031 		 */
2032 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2033 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2034 			/*
2035 			 * yep its past where we need to reset... go ahead
2036 			 * and queue it.
2037 			 */
2038 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2039 				/* first one on */
2040 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2041 			} else {
2042 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2043 				unsigned char inserted = 0;
2044 
2045 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2046 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2047 
2048 						continue;
2049 					} else {
2050 						/* found it */
2051 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2052 						inserted = 1;
2053 						break;
2054 					}
2055 				}
2056 				if (inserted == 0) {
2057 					/*
2058 					 * must be put at end, use prevP
2059 					 * (all setup from loop) to setup
2060 					 * nextP.
2061 					 */
2062 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2063 				}
2064 			}
2065 			goto finish_express_del;
2066 		}
2067 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2068 			/* queue directly into socket buffer */
2069 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2070 			    control, msg_id);
2071 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2072 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2073 			    control,
2074 			    &stcb->sctp_socket->so_rcv, 1,
2075 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2076 
2077 		} else {
2078 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2079 			    msg_id);
2080 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2081 			if (*abort_flag) {
2082 				if (last_chunk) {
2083 					*m = NULL;
2084 				}
2085 				return (0);
2086 			}
2087 		}
2088 		goto finish_express_del;
2089 	}
2090 	/* If we reach here its a reassembly */
2091 	need_reasm_check = 1;
2092 	SCTPDBG(SCTP_DEBUG_XXX,
2093 	    "Queue data to stream for reasm control: %p msg_id: %u\n",
2094 	    control, msg_id);
2095 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2096 	if (*abort_flag) {
2097 		/*
2098 		 * the assoc is now gone and chk was put onto the reasm
2099 		 * queue, which has all been freed.
2100 		 */
2101 		if (last_chunk) {
2102 			*m = NULL;
2103 		}
2104 		return (0);
2105 	}
2106 finish_express_del:
2107 	/* Here we tidy up things */
2108 	if (tsn == (asoc->cumulative_tsn + 1)) {
2109 		/* Update cum-ack */
2110 		asoc->cumulative_tsn = tsn;
2111 	}
2112 	if (last_chunk) {
2113 		*m = NULL;
2114 	}
2115 	if (ordered) {
2116 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2117 	} else {
2118 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2119 	}
2120 	SCTP_STAT_INCR(sctps_recvdata);
2121 	/* Set it present please */
2122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2123 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2124 	}
2125 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2126 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2127 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2128 	}
2129 	/* check the special flag for stream resets */
2130 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2131 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2132 		/*
2133 		 * we have finished working through the backlogged TSN's now
2134 		 * time to reset streams. 1: call reset function. 2: free
2135 		 * pending_reply space 3: distribute any chunks in
2136 		 * pending_reply_queue.
2137 		 */
2138 		struct sctp_queued_to_read *ctl, *nctl;
2139 
2140 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2141 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2142 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2143 		SCTP_FREE(liste, SCTP_M_STRESET);
2144 		/* sa_ignore FREED_MEMORY */
2145 		liste = TAILQ_FIRST(&asoc->resetHead);
2146 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2147 			/* All can be removed */
2148 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2149 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2150 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2151 				if (*abort_flag) {
2152 					return (0);
2153 				}
2154 			}
2155 		} else {
2156 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2157 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2158 					break;
2159 				}
2160 				/*
2161 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2162 				 * process it which is the NOT of
2163 				 * ctl->sinfo_tsn > liste->tsn
2164 				 */
2165 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2166 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2167 				if (*abort_flag) {
2168 					return (0);
2169 				}
2170 			}
2171 		}
2172 		/*
2173 		 * Now service re-assembly to pick up anything that has been
2174 		 * held on reassembly queue?
2175 		 */
2176 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2177 		need_reasm_check = 0;
2178 	}
2179 	if (need_reasm_check) {
2180 		/* Another one waits ? */
2181 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2182 	}
2183 	return (1);
2184 }
2185 
2186 static const int8_t sctp_map_lookup_tab[256] = {
2187 	0, 1, 0, 2, 0, 1, 0, 3,
2188 	0, 1, 0, 2, 0, 1, 0, 4,
2189 	0, 1, 0, 2, 0, 1, 0, 3,
2190 	0, 1, 0, 2, 0, 1, 0, 5,
2191 	0, 1, 0, 2, 0, 1, 0, 3,
2192 	0, 1, 0, 2, 0, 1, 0, 4,
2193 	0, 1, 0, 2, 0, 1, 0, 3,
2194 	0, 1, 0, 2, 0, 1, 0, 6,
2195 	0, 1, 0, 2, 0, 1, 0, 3,
2196 	0, 1, 0, 2, 0, 1, 0, 4,
2197 	0, 1, 0, 2, 0, 1, 0, 3,
2198 	0, 1, 0, 2, 0, 1, 0, 5,
2199 	0, 1, 0, 2, 0, 1, 0, 3,
2200 	0, 1, 0, 2, 0, 1, 0, 4,
2201 	0, 1, 0, 2, 0, 1, 0, 3,
2202 	0, 1, 0, 2, 0, 1, 0, 7,
2203 	0, 1, 0, 2, 0, 1, 0, 3,
2204 	0, 1, 0, 2, 0, 1, 0, 4,
2205 	0, 1, 0, 2, 0, 1, 0, 3,
2206 	0, 1, 0, 2, 0, 1, 0, 5,
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 4,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 6,
2211 	0, 1, 0, 2, 0, 1, 0, 3,
2212 	0, 1, 0, 2, 0, 1, 0, 4,
2213 	0, 1, 0, 2, 0, 1, 0, 3,
2214 	0, 1, 0, 2, 0, 1, 0, 5,
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 4,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 8
2219 };
2220 
2221 
2222 void
2223 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2224 {
2225 	/*
2226 	 * Now we also need to check the mapping array in a couple of ways.
2227 	 * 1) Did we move the cum-ack point?
2228 	 *
2229 	 * When you first glance at this you might think that all entries that
2230 	 * make up the postion of the cum-ack would be in the nr-mapping
2231 	 * array only.. i.e. things up to the cum-ack are always
2232 	 * deliverable. Thats true with one exception, when its a fragmented
2233 	 * message we may not deliver the data until some threshold (or all
2234 	 * of it) is in place. So we must OR the nr_mapping_array and
2235 	 * mapping_array to get a true picture of the cum-ack.
2236 	 */
2237 	struct sctp_association *asoc;
2238 	int at;
2239 	uint8_t val;
2240 	int slide_from, slide_end, lgap, distance;
2241 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2242 
2243 	asoc = &stcb->asoc;
2244 
2245 	old_cumack = asoc->cumulative_tsn;
2246 	old_base = asoc->mapping_array_base_tsn;
2247 	old_highest = asoc->highest_tsn_inside_map;
2248 	/*
2249 	 * We could probably improve this a small bit by calculating the
2250 	 * offset of the current cum-ack as the starting point.
2251 	 */
2252 	at = 0;
2253 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2254 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2255 		if (val == 0xff) {
2256 			at += 8;
2257 		} else {
2258 			/* there is a 0 bit */
2259 			at += sctp_map_lookup_tab[val];
2260 			break;
2261 		}
2262 	}
2263 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2264 
2265 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2266 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2267 #ifdef INVARIANTS
2268 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2269 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2270 #else
2271 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2272 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2273 		sctp_print_mapping_array(asoc);
2274 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2275 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2276 		}
2277 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2278 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2279 #endif
2280 	}
2281 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2282 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2283 	} else {
2284 		highest_tsn = asoc->highest_tsn_inside_map;
2285 	}
2286 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2287 		/* The complete array was completed by a single FR */
2288 		/* highest becomes the cum-ack */
2289 		int clr;
2290 
2291 #ifdef INVARIANTS
2292 		unsigned int i;
2293 
2294 #endif
2295 
2296 		/* clear the array */
2297 		clr = ((at + 7) >> 3);
2298 		if (clr > asoc->mapping_array_size) {
2299 			clr = asoc->mapping_array_size;
2300 		}
2301 		memset(asoc->mapping_array, 0, clr);
2302 		memset(asoc->nr_mapping_array, 0, clr);
2303 #ifdef INVARIANTS
2304 		for (i = 0; i < asoc->mapping_array_size; i++) {
2305 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2306 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2307 				sctp_print_mapping_array(asoc);
2308 			}
2309 		}
2310 #endif
2311 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2312 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2313 	} else if (at >= 8) {
2314 		/* we can slide the mapping array down */
2315 		/* slide_from holds where we hit the first NON 0xff byte */
2316 
2317 		/*
2318 		 * now calculate the ceiling of the move using our highest
2319 		 * TSN value
2320 		 */
2321 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2322 		slide_end = (lgap >> 3);
2323 		if (slide_end < slide_from) {
2324 			sctp_print_mapping_array(asoc);
2325 #ifdef INVARIANTS
2326 			panic("impossible slide");
2327 #else
2328 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2329 			    lgap, slide_end, slide_from, at);
2330 			return;
2331 #endif
2332 		}
2333 		if (slide_end > asoc->mapping_array_size) {
2334 #ifdef INVARIANTS
2335 			panic("would overrun buffer");
2336 #else
2337 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2338 			    asoc->mapping_array_size, slide_end);
2339 			slide_end = asoc->mapping_array_size;
2340 #endif
2341 		}
2342 		distance = (slide_end - slide_from) + 1;
2343 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2344 			sctp_log_map(old_base, old_cumack, old_highest,
2345 			    SCTP_MAP_PREPARE_SLIDE);
2346 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2347 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2348 		}
2349 		if (distance + slide_from > asoc->mapping_array_size ||
2350 		    distance < 0) {
2351 			/*
2352 			 * Here we do NOT slide forward the array so that
2353 			 * hopefully when more data comes in to fill it up
2354 			 * we will be able to slide it forward. Really I
2355 			 * don't think this should happen :-0
2356 			 */
2357 
2358 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2359 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2360 				    (uint32_t) asoc->mapping_array_size,
2361 				    SCTP_MAP_SLIDE_NONE);
2362 			}
2363 		} else {
2364 			int ii;
2365 
2366 			for (ii = 0; ii < distance; ii++) {
2367 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2368 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2369 
2370 			}
2371 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2372 				asoc->mapping_array[ii] = 0;
2373 				asoc->nr_mapping_array[ii] = 0;
2374 			}
2375 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2376 				asoc->highest_tsn_inside_map += (slide_from << 3);
2377 			}
2378 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2379 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2380 			}
2381 			asoc->mapping_array_base_tsn += (slide_from << 3);
2382 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2383 				sctp_log_map(asoc->mapping_array_base_tsn,
2384 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2385 				    SCTP_MAP_SLIDE_RESULT);
2386 			}
2387 		}
2388 	}
2389 }
2390 
2391 void
2392 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2393 {
2394 	struct sctp_association *asoc;
2395 	uint32_t highest_tsn;
2396 
2397 	asoc = &stcb->asoc;
2398 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2399 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2400 	} else {
2401 		highest_tsn = asoc->highest_tsn_inside_map;
2402 	}
2403 
2404 	/*
2405 	 * Now we need to see if we need to queue a sack or just start the
2406 	 * timer (if allowed).
2407 	 */
2408 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2409 		/*
2410 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2411 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2412 		 * SACK
2413 		 */
2414 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2415 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2416 			    stcb->sctp_ep, stcb, NULL,
2417 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2418 		}
2419 		sctp_send_shutdown(stcb,
2420 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2421 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2422 	} else {
2423 		int is_a_gap;
2424 
2425 		/* is there a gap now ? */
2426 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2427 
2428 		/*
2429 		 * CMT DAC algorithm: increase number of packets received
2430 		 * since last ack
2431 		 */
2432 		stcb->asoc.cmt_dac_pkts_rcvd++;
2433 
2434 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2435 							 * SACK */
2436 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2437 							 * longer is one */
2438 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2439 		    (is_a_gap) ||	/* is still a gap */
2440 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2441 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2442 		    ) {
2443 
2444 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2445 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2446 			    (stcb->asoc.send_sack == 0) &&
2447 			    (stcb->asoc.numduptsns == 0) &&
2448 			    (stcb->asoc.delayed_ack) &&
2449 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2450 
2451 				/*
2452 				 * CMT DAC algorithm: With CMT, delay acks
2453 				 * even in the face of
2454 				 *
2455 				 * reordering. Therefore, if acks that do not
2456 				 * have to be sent because of the above
2457 				 * reasons, will be delayed. That is, acks
2458 				 * that would have been sent due to gap
2459 				 * reports will be delayed with DAC. Start
2460 				 * the delayed ack timer.
2461 				 */
2462 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2463 				    stcb->sctp_ep, stcb, NULL);
2464 			} else {
2465 				/*
2466 				 * Ok we must build a SACK since the timer
2467 				 * is pending, we got our first packet OR
2468 				 * there are gaps or duplicates.
2469 				 */
2470 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2471 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2472 			}
2473 		} else {
2474 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2475 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2476 				    stcb->sctp_ep, stcb, NULL);
2477 			}
2478 		}
2479 	}
2480 }
2481 
2482 int
2483 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2484     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2485     struct sctp_nets *net, uint32_t * high_tsn)
2486 {
2487 	struct sctp_chunkhdr *ch, chunk_buf;
2488 	struct sctp_association *asoc;
2489 	int num_chunks = 0;	/* number of control chunks processed */
2490 	int stop_proc = 0;
2491 	int chk_length, break_flag, last_chunk;
2492 	int abort_flag = 0, was_a_gap;
2493 	struct mbuf *m;
2494 	uint32_t highest_tsn;
2495 
2496 	/* set the rwnd */
2497 	sctp_set_rwnd(stcb, &stcb->asoc);
2498 
2499 	m = *mm;
2500 	SCTP_TCB_LOCK_ASSERT(stcb);
2501 	asoc = &stcb->asoc;
2502 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2503 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2504 	} else {
2505 		highest_tsn = asoc->highest_tsn_inside_map;
2506 	}
2507 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2508 	/*
2509 	 * setup where we got the last DATA packet from for any SACK that
2510 	 * may need to go out. Don't bump the net. This is done ONLY when a
2511 	 * chunk is assigned.
2512 	 */
2513 	asoc->last_data_chunk_from = net;
2514 
2515 	/*-
2516 	 * Now before we proceed we must figure out if this is a wasted
2517 	 * cluster... i.e. it is a small packet sent in and yet the driver
2518 	 * underneath allocated a full cluster for it. If so we must copy it
2519 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2520 	 * with cluster starvation. Note for __Panda__ we don't do this
2521 	 * since it has clusters all the way down to 64 bytes.
2522 	 */
2523 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2524 		/* we only handle mbufs that are singletons.. not chains */
2525 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2526 		if (m) {
2527 			/* ok lets see if we can copy the data up */
2528 			caddr_t *from, *to;
2529 
2530 			/* get the pointers and copy */
2531 			to = mtod(m, caddr_t *);
2532 			from = mtod((*mm), caddr_t *);
2533 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2534 			/* copy the length and free up the old */
2535 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2536 			sctp_m_freem(*mm);
2537 			/* sucess, back copy */
2538 			*mm = m;
2539 		} else {
2540 			/* We are in trouble in the mbuf world .. yikes */
2541 			m = *mm;
2542 		}
2543 	}
2544 	/* get pointer to the first chunk header */
2545 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2546 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2547 	if (ch == NULL) {
2548 		return (1);
2549 	}
2550 	/*
2551 	 * process all DATA chunks...
2552 	 */
2553 	*high_tsn = asoc->cumulative_tsn;
2554 	break_flag = 0;
2555 	asoc->data_pkts_seen++;
2556 	while (stop_proc == 0) {
2557 		/* validate chunk length */
2558 		chk_length = ntohs(ch->chunk_length);
2559 		if (length - *offset < chk_length) {
2560 			/* all done, mutulated chunk */
2561 			stop_proc = 1;
2562 			continue;
2563 		}
2564 		if ((asoc->idata_supported == 1) &&
2565 		    (ch->chunk_type == SCTP_DATA)) {
2566 			struct mbuf *op_err;
2567 			char msg[SCTP_DIAG_INFO_LEN];
2568 
2569 			snprintf(msg, sizeof(msg), "I-DATA chunk received when DATA was negotiated");
2570 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2571 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2572 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2573 			return (2);
2574 		}
2575 		if ((asoc->idata_supported == 0) &&
2576 		    (ch->chunk_type == SCTP_IDATA)) {
2577 			struct mbuf *op_err;
2578 			char msg[SCTP_DIAG_INFO_LEN];
2579 
2580 			snprintf(msg, sizeof(msg), "DATA chunk received when I-DATA was negotiated");
2581 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2582 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2583 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2584 			return (2);
2585 		}
2586 		if ((ch->chunk_type == SCTP_DATA) ||
2587 		    (ch->chunk_type == SCTP_IDATA)) {
2588 			int clen;
2589 
2590 			if (ch->chunk_type == SCTP_DATA) {
2591 				clen = sizeof(struct sctp_data_chunk);
2592 			} else {
2593 				clen = sizeof(struct sctp_idata_chunk);
2594 			}
2595 			if (chk_length < clen) {
2596 				/*
2597 				 * Need to send an abort since we had a
2598 				 * invalid data chunk.
2599 				 */
2600 				struct mbuf *op_err;
2601 				char msg[SCTP_DIAG_INFO_LEN];
2602 
2603 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2604 				    chk_length);
2605 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2606 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2607 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2608 				return (2);
2609 			}
2610 #ifdef SCTP_AUDITING_ENABLED
2611 			sctp_audit_log(0xB1, 0);
2612 #endif
2613 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2614 				last_chunk = 1;
2615 			} else {
2616 				last_chunk = 0;
2617 			}
2618 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2619 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2620 			    last_chunk, ch->chunk_type)) {
2621 				num_chunks++;
2622 			}
2623 			if (abort_flag)
2624 				return (2);
2625 
2626 			if (break_flag) {
2627 				/*
2628 				 * Set because of out of rwnd space and no
2629 				 * drop rep space left.
2630 				 */
2631 				stop_proc = 1;
2632 				continue;
2633 			}
2634 		} else {
2635 			/* not a data chunk in the data region */
2636 			switch (ch->chunk_type) {
2637 			case SCTP_INITIATION:
2638 			case SCTP_INITIATION_ACK:
2639 			case SCTP_SELECTIVE_ACK:
2640 			case SCTP_NR_SELECTIVE_ACK:
2641 			case SCTP_HEARTBEAT_REQUEST:
2642 			case SCTP_HEARTBEAT_ACK:
2643 			case SCTP_ABORT_ASSOCIATION:
2644 			case SCTP_SHUTDOWN:
2645 			case SCTP_SHUTDOWN_ACK:
2646 			case SCTP_OPERATION_ERROR:
2647 			case SCTP_COOKIE_ECHO:
2648 			case SCTP_COOKIE_ACK:
2649 			case SCTP_ECN_ECHO:
2650 			case SCTP_ECN_CWR:
2651 			case SCTP_SHUTDOWN_COMPLETE:
2652 			case SCTP_AUTHENTICATION:
2653 			case SCTP_ASCONF_ACK:
2654 			case SCTP_PACKET_DROPPED:
2655 			case SCTP_STREAM_RESET:
2656 			case SCTP_FORWARD_CUM_TSN:
2657 			case SCTP_ASCONF:
2658 				/*
2659 				 * Now, what do we do with KNOWN chunks that
2660 				 * are NOT in the right place?
2661 				 *
2662 				 * For now, I do nothing but ignore them. We
2663 				 * may later want to add sysctl stuff to
2664 				 * switch out and do either an ABORT() or
2665 				 * possibly process them.
2666 				 */
2667 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2668 					struct mbuf *op_err;
2669 					char msg[SCTP_DIAG_INFO_LEN];
2670 
2671 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2672 					    ch->chunk_type);
2673 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2674 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2675 					return (2);
2676 				}
2677 				break;
2678 			default:
2679 				/* unknown chunk type, use bit rules */
2680 				if (ch->chunk_type & 0x40) {
2681 					/* Add a error report to the queue */
2682 					struct mbuf *op_err;
2683 					struct sctp_gen_error_cause *cause;
2684 
2685 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2686 					    0, M_NOWAIT, 1, MT_DATA);
2687 					if (op_err != NULL) {
2688 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2689 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2690 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2691 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2692 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2693 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2694 							sctp_queue_op_err(stcb, op_err);
2695 						} else {
2696 							sctp_m_freem(op_err);
2697 						}
2698 					}
2699 				}
2700 				if ((ch->chunk_type & 0x80) == 0) {
2701 					/* discard the rest of this packet */
2702 					stop_proc = 1;
2703 				}	/* else skip this bad chunk and
2704 					 * continue... */
2705 				break;
2706 			}	/* switch of chunk type */
2707 		}
2708 		*offset += SCTP_SIZE32(chk_length);
2709 		if ((*offset >= length) || stop_proc) {
2710 			/* no more data left in the mbuf chain */
2711 			stop_proc = 1;
2712 			continue;
2713 		}
2714 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2715 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2716 		if (ch == NULL) {
2717 			*offset = length;
2718 			stop_proc = 1;
2719 			continue;
2720 		}
2721 	}
2722 	if (break_flag) {
2723 		/*
2724 		 * we need to report rwnd overrun drops.
2725 		 */
2726 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2727 	}
2728 	if (num_chunks) {
2729 		/*
2730 		 * Did we get data, if so update the time for auto-close and
2731 		 * give peer credit for being alive.
2732 		 */
2733 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2734 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2735 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2736 			    stcb->asoc.overall_error_count,
2737 			    0,
2738 			    SCTP_FROM_SCTP_INDATA,
2739 			    __LINE__);
2740 		}
2741 		stcb->asoc.overall_error_count = 0;
2742 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2743 	}
2744 	/* now service all of the reassm queue if needed */
2745 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2746 		/* Assure that we ack right away */
2747 		stcb->asoc.send_sack = 1;
2748 	}
2749 	/* Start a sack timer or QUEUE a SACK for sending */
2750 	sctp_sack_check(stcb, was_a_gap);
2751 	return (0);
2752 }
2753 
2754 static int
2755 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2756     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2757     int *num_frs,
2758     uint32_t * biggest_newly_acked_tsn,
2759     uint32_t * this_sack_lowest_newack,
2760     int *rto_ok)
2761 {
2762 	struct sctp_tmit_chunk *tp1;
2763 	unsigned int theTSN;
2764 	int j, wake_him = 0, circled = 0;
2765 
2766 	/* Recover the tp1 we last saw */
2767 	tp1 = *p_tp1;
2768 	if (tp1 == NULL) {
2769 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2770 	}
2771 	for (j = frag_strt; j <= frag_end; j++) {
2772 		theTSN = j + last_tsn;
2773 		while (tp1) {
2774 			if (tp1->rec.data.doing_fast_retransmit)
2775 				(*num_frs) += 1;
2776 
2777 			/*-
2778 			 * CMT: CUCv2 algorithm. For each TSN being
2779 			 * processed from the sent queue, track the
2780 			 * next expected pseudo-cumack, or
2781 			 * rtx_pseudo_cumack, if required. Separate
2782 			 * cumack trackers for first transmissions,
2783 			 * and retransmissions.
2784 			 */
2785 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2786 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2787 			    (tp1->snd_count == 1)) {
2788 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2789 				tp1->whoTo->find_pseudo_cumack = 0;
2790 			}
2791 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2792 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2793 			    (tp1->snd_count > 1)) {
2794 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2795 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2796 			}
2797 			if (tp1->rec.data.TSN_seq == theTSN) {
2798 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2799 					/*-
2800 					 * must be held until
2801 					 * cum-ack passes
2802 					 */
2803 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2804 						/*-
2805 						 * If it is less than RESEND, it is
2806 						 * now no-longer in flight.
2807 						 * Higher values may already be set
2808 						 * via previous Gap Ack Blocks...
2809 						 * i.e. ACKED or RESEND.
2810 						 */
2811 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2812 						    *biggest_newly_acked_tsn)) {
2813 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2814 						}
2815 						/*-
2816 						 * CMT: SFR algo (and HTNA) - set
2817 						 * saw_newack to 1 for dest being
2818 						 * newly acked. update
2819 						 * this_sack_highest_newack if
2820 						 * appropriate.
2821 						 */
2822 						if (tp1->rec.data.chunk_was_revoked == 0)
2823 							tp1->whoTo->saw_newack = 1;
2824 
2825 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2826 						    tp1->whoTo->this_sack_highest_newack)) {
2827 							tp1->whoTo->this_sack_highest_newack =
2828 							    tp1->rec.data.TSN_seq;
2829 						}
2830 						/*-
2831 						 * CMT DAC algo: also update
2832 						 * this_sack_lowest_newack
2833 						 */
2834 						if (*this_sack_lowest_newack == 0) {
2835 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2836 								sctp_log_sack(*this_sack_lowest_newack,
2837 								    last_tsn,
2838 								    tp1->rec.data.TSN_seq,
2839 								    0,
2840 								    0,
2841 								    SCTP_LOG_TSN_ACKED);
2842 							}
2843 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2844 						}
2845 						/*-
2846 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2847 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2848 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2849 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2850 						 * Separate pseudo_cumack trackers for first transmissions and
2851 						 * retransmissions.
2852 						 */
2853 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2854 							if (tp1->rec.data.chunk_was_revoked == 0) {
2855 								tp1->whoTo->new_pseudo_cumack = 1;
2856 							}
2857 							tp1->whoTo->find_pseudo_cumack = 1;
2858 						}
2859 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2860 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2861 						}
2862 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2863 							if (tp1->rec.data.chunk_was_revoked == 0) {
2864 								tp1->whoTo->new_pseudo_cumack = 1;
2865 							}
2866 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2867 						}
2868 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2869 							sctp_log_sack(*biggest_newly_acked_tsn,
2870 							    last_tsn,
2871 							    tp1->rec.data.TSN_seq,
2872 							    frag_strt,
2873 							    frag_end,
2874 							    SCTP_LOG_TSN_ACKED);
2875 						}
2876 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2877 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2878 							    tp1->whoTo->flight_size,
2879 							    tp1->book_size,
2880 							    (uint32_t) (uintptr_t) tp1->whoTo,
2881 							    tp1->rec.data.TSN_seq);
2882 						}
2883 						sctp_flight_size_decrease(tp1);
2884 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2885 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2886 							    tp1);
2887 						}
2888 						sctp_total_flight_decrease(stcb, tp1);
2889 
2890 						tp1->whoTo->net_ack += tp1->send_size;
2891 						if (tp1->snd_count < 2) {
2892 							/*-
2893 							 * True non-retransmited chunk
2894 							 */
2895 							tp1->whoTo->net_ack2 += tp1->send_size;
2896 
2897 							/*-
2898 							 * update RTO too ?
2899 							 */
2900 							if (tp1->do_rtt) {
2901 								if (*rto_ok) {
2902 									tp1->whoTo->RTO =
2903 									    sctp_calculate_rto(stcb,
2904 									    &stcb->asoc,
2905 									    tp1->whoTo,
2906 									    &tp1->sent_rcv_time,
2907 									    sctp_align_safe_nocopy,
2908 									    SCTP_RTT_FROM_DATA);
2909 									*rto_ok = 0;
2910 								}
2911 								if (tp1->whoTo->rto_needed == 0) {
2912 									tp1->whoTo->rto_needed = 1;
2913 								}
2914 								tp1->do_rtt = 0;
2915 							}
2916 						}
2917 					}
2918 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2919 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2920 						    stcb->asoc.this_sack_highest_gap)) {
2921 							stcb->asoc.this_sack_highest_gap =
2922 							    tp1->rec.data.TSN_seq;
2923 						}
2924 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2925 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2926 #ifdef SCTP_AUDITING_ENABLED
2927 							sctp_audit_log(0xB2,
2928 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2929 #endif
2930 						}
2931 					}
2932 					/*-
2933 					 * All chunks NOT UNSENT fall through here and are marked
2934 					 * (leave PR-SCTP ones that are to skip alone though)
2935 					 */
2936 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2937 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2938 						tp1->sent = SCTP_DATAGRAM_MARKED;
2939 					}
2940 					if (tp1->rec.data.chunk_was_revoked) {
2941 						/* deflate the cwnd */
2942 						tp1->whoTo->cwnd -= tp1->book_size;
2943 						tp1->rec.data.chunk_was_revoked = 0;
2944 					}
2945 					/* NR Sack code here */
2946 					if (nr_sacking &&
2947 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2948 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2949 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2950 #ifdef INVARIANTS
2951 						} else {
2952 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2953 #endif
2954 						}
2955 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2956 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2957 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2958 							stcb->asoc.trigger_reset = 1;
2959 						}
2960 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2961 						if (tp1->data) {
2962 							/*
2963 							 * sa_ignore
2964 							 * NO_NULL_CHK
2965 							 */
2966 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2967 							sctp_m_freem(tp1->data);
2968 							tp1->data = NULL;
2969 						}
2970 						wake_him++;
2971 					}
2972 				}
2973 				break;
2974 			}	/* if (tp1->TSN_seq == theTSN) */
2975 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2976 				break;
2977 			}
2978 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2979 			if ((tp1 == NULL) && (circled == 0)) {
2980 				circled++;
2981 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2982 			}
2983 		}		/* end while (tp1) */
2984 		if (tp1 == NULL) {
2985 			circled = 0;
2986 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2987 		}
2988 		/* In case the fragments were not in order we must reset */
2989 	}			/* end for (j = fragStart */
2990 	*p_tp1 = tp1;
2991 	return (wake_him);	/* Return value only used for nr-sack */
2992 }
2993 
2994 
2995 static int
2996 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2997     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2998     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2999     int num_seg, int num_nr_seg, int *rto_ok)
3000 {
3001 	struct sctp_gap_ack_block *frag, block;
3002 	struct sctp_tmit_chunk *tp1;
3003 	int i;
3004 	int num_frs = 0;
3005 	int chunk_freed;
3006 	int non_revocable;
3007 	uint16_t frag_strt, frag_end, prev_frag_end;
3008 
3009 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3010 	prev_frag_end = 0;
3011 	chunk_freed = 0;
3012 
3013 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3014 		if (i == num_seg) {
3015 			prev_frag_end = 0;
3016 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3017 		}
3018 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3019 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3020 		*offset += sizeof(block);
3021 		if (frag == NULL) {
3022 			return (chunk_freed);
3023 		}
3024 		frag_strt = ntohs(frag->start);
3025 		frag_end = ntohs(frag->end);
3026 
3027 		if (frag_strt > frag_end) {
3028 			/* This gap report is malformed, skip it. */
3029 			continue;
3030 		}
3031 		if (frag_strt <= prev_frag_end) {
3032 			/* This gap report is not in order, so restart. */
3033 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3034 		}
3035 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3036 			*biggest_tsn_acked = last_tsn + frag_end;
3037 		}
3038 		if (i < num_seg) {
3039 			non_revocable = 0;
3040 		} else {
3041 			non_revocable = 1;
3042 		}
3043 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3044 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3045 		    this_sack_lowest_newack, rto_ok)) {
3046 			chunk_freed = 1;
3047 		}
3048 		prev_frag_end = frag_end;
3049 	}
3050 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3051 		if (num_frs)
3052 			sctp_log_fr(*biggest_tsn_acked,
3053 			    *biggest_newly_acked_tsn,
3054 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3055 	}
3056 	return (chunk_freed);
3057 }
3058 
3059 static void
3060 sctp_check_for_revoked(struct sctp_tcb *stcb,
3061     struct sctp_association *asoc, uint32_t cumack,
3062     uint32_t biggest_tsn_acked)
3063 {
3064 	struct sctp_tmit_chunk *tp1;
3065 
3066 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3067 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3068 			/*
3069 			 * ok this guy is either ACK or MARKED. If it is
3070 			 * ACKED it has been previously acked but not this
3071 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3072 			 * again.
3073 			 */
3074 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3075 				break;
3076 			}
3077 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3078 				/* it has been revoked */
3079 				tp1->sent = SCTP_DATAGRAM_SENT;
3080 				tp1->rec.data.chunk_was_revoked = 1;
3081 				/*
3082 				 * We must add this stuff back in to assure
3083 				 * timers and such get started.
3084 				 */
3085 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3086 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3087 					    tp1->whoTo->flight_size,
3088 					    tp1->book_size,
3089 					    (uint32_t) (uintptr_t) tp1->whoTo,
3090 					    tp1->rec.data.TSN_seq);
3091 				}
3092 				sctp_flight_size_increase(tp1);
3093 				sctp_total_flight_increase(stcb, tp1);
3094 				/*
3095 				 * We inflate the cwnd to compensate for our
3096 				 * artificial inflation of the flight_size.
3097 				 */
3098 				tp1->whoTo->cwnd += tp1->book_size;
3099 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3100 					sctp_log_sack(asoc->last_acked_seq,
3101 					    cumack,
3102 					    tp1->rec.data.TSN_seq,
3103 					    0,
3104 					    0,
3105 					    SCTP_LOG_TSN_REVOKED);
3106 				}
3107 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3108 				/* it has been re-acked in this SACK */
3109 				tp1->sent = SCTP_DATAGRAM_ACKED;
3110 			}
3111 		}
3112 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3113 			break;
3114 	}
3115 }
3116 
3117 
3118 static void
3119 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3120     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3121 {
3122 	struct sctp_tmit_chunk *tp1;
3123 	int strike_flag = 0;
3124 	struct timeval now;
3125 	int tot_retrans = 0;
3126 	uint32_t sending_seq;
3127 	struct sctp_nets *net;
3128 	int num_dests_sacked = 0;
3129 
3130 	/*
3131 	 * select the sending_seq, this is either the next thing ready to be
3132 	 * sent but not transmitted, OR, the next seq we assign.
3133 	 */
3134 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3135 	if (tp1 == NULL) {
3136 		sending_seq = asoc->sending_seq;
3137 	} else {
3138 		sending_seq = tp1->rec.data.TSN_seq;
3139 	}
3140 
3141 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3142 	if ((asoc->sctp_cmt_on_off > 0) &&
3143 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3144 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3145 			if (net->saw_newack)
3146 				num_dests_sacked++;
3147 		}
3148 	}
3149 	if (stcb->asoc.prsctp_supported) {
3150 		(void)SCTP_GETTIME_TIMEVAL(&now);
3151 	}
3152 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3153 		strike_flag = 0;
3154 		if (tp1->no_fr_allowed) {
3155 			/* this one had a timeout or something */
3156 			continue;
3157 		}
3158 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3159 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3160 				sctp_log_fr(biggest_tsn_newly_acked,
3161 				    tp1->rec.data.TSN_seq,
3162 				    tp1->sent,
3163 				    SCTP_FR_LOG_CHECK_STRIKE);
3164 		}
3165 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3166 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3167 			/* done */
3168 			break;
3169 		}
3170 		if (stcb->asoc.prsctp_supported) {
3171 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3172 				/* Is it expired? */
3173 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3174 					/* Yes so drop it */
3175 					if (tp1->data != NULL) {
3176 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3177 						    SCTP_SO_NOT_LOCKED);
3178 					}
3179 					continue;
3180 				}
3181 			}
3182 		}
3183 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3184 			/* we are beyond the tsn in the sack  */
3185 			break;
3186 		}
3187 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3188 			/* either a RESEND, ACKED, or MARKED */
3189 			/* skip */
3190 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3191 				/* Continue strikin FWD-TSN chunks */
3192 				tp1->rec.data.fwd_tsn_cnt++;
3193 			}
3194 			continue;
3195 		}
3196 		/*
3197 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3198 		 */
3199 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3200 			/*
3201 			 * No new acks were receieved for data sent to this
3202 			 * dest. Therefore, according to the SFR algo for
3203 			 * CMT, no data sent to this dest can be marked for
3204 			 * FR using this SACK.
3205 			 */
3206 			continue;
3207 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3208 		    tp1->whoTo->this_sack_highest_newack)) {
3209 			/*
3210 			 * CMT: New acks were receieved for data sent to
3211 			 * this dest. But no new acks were seen for data
3212 			 * sent after tp1. Therefore, according to the SFR
3213 			 * algo for CMT, tp1 cannot be marked for FR using
3214 			 * this SACK. This step covers part of the DAC algo
3215 			 * and the HTNA algo as well.
3216 			 */
3217 			continue;
3218 		}
3219 		/*
3220 		 * Here we check to see if we were have already done a FR
3221 		 * and if so we see if the biggest TSN we saw in the sack is
3222 		 * smaller than the recovery point. If so we don't strike
3223 		 * the tsn... otherwise we CAN strike the TSN.
3224 		 */
3225 		/*
3226 		 * @@@ JRI: Check for CMT if (accum_moved &&
3227 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3228 		 * 0)) {
3229 		 */
3230 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3231 			/*
3232 			 * Strike the TSN if in fast-recovery and cum-ack
3233 			 * moved.
3234 			 */
3235 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3236 				sctp_log_fr(biggest_tsn_newly_acked,
3237 				    tp1->rec.data.TSN_seq,
3238 				    tp1->sent,
3239 				    SCTP_FR_LOG_STRIKE_CHUNK);
3240 			}
3241 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3242 				tp1->sent++;
3243 			}
3244 			if ((asoc->sctp_cmt_on_off > 0) &&
3245 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3246 				/*
3247 				 * CMT DAC algorithm: If SACK flag is set to
3248 				 * 0, then lowest_newack test will not pass
3249 				 * because it would have been set to the
3250 				 * cumack earlier. If not already to be
3251 				 * rtx'd, If not a mixed sack and if tp1 is
3252 				 * not between two sacked TSNs, then mark by
3253 				 * one more. NOTE that we are marking by one
3254 				 * additional time since the SACK DAC flag
3255 				 * indicates that two packets have been
3256 				 * received after this missing TSN.
3257 				 */
3258 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3259 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3260 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3261 						sctp_log_fr(16 + num_dests_sacked,
3262 						    tp1->rec.data.TSN_seq,
3263 						    tp1->sent,
3264 						    SCTP_FR_LOG_STRIKE_CHUNK);
3265 					}
3266 					tp1->sent++;
3267 				}
3268 			}
3269 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3270 		    (asoc->sctp_cmt_on_off == 0)) {
3271 			/*
3272 			 * For those that have done a FR we must take
3273 			 * special consideration if we strike. I.e the
3274 			 * biggest_newly_acked must be higher than the
3275 			 * sending_seq at the time we did the FR.
3276 			 */
3277 			if (
3278 #ifdef SCTP_FR_TO_ALTERNATE
3279 			/*
3280 			 * If FR's go to new networks, then we must only do
3281 			 * this for singly homed asoc's. However if the FR's
3282 			 * go to the same network (Armando's work) then its
3283 			 * ok to FR multiple times.
3284 			 */
3285 			    (asoc->numnets < 2)
3286 #else
3287 			    (1)
3288 #endif
3289 			    ) {
3290 
3291 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3292 				    tp1->rec.data.fast_retran_tsn)) {
3293 					/*
3294 					 * Strike the TSN, since this ack is
3295 					 * beyond where things were when we
3296 					 * did a FR.
3297 					 */
3298 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3299 						sctp_log_fr(biggest_tsn_newly_acked,
3300 						    tp1->rec.data.TSN_seq,
3301 						    tp1->sent,
3302 						    SCTP_FR_LOG_STRIKE_CHUNK);
3303 					}
3304 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3305 						tp1->sent++;
3306 					}
3307 					strike_flag = 1;
3308 					if ((asoc->sctp_cmt_on_off > 0) &&
3309 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3310 						/*
3311 						 * CMT DAC algorithm: If
3312 						 * SACK flag is set to 0,
3313 						 * then lowest_newack test
3314 						 * will not pass because it
3315 						 * would have been set to
3316 						 * the cumack earlier. If
3317 						 * not already to be rtx'd,
3318 						 * If not a mixed sack and
3319 						 * if tp1 is not between two
3320 						 * sacked TSNs, then mark by
3321 						 * one more. NOTE that we
3322 						 * are marking by one
3323 						 * additional time since the
3324 						 * SACK DAC flag indicates
3325 						 * that two packets have
3326 						 * been received after this
3327 						 * missing TSN.
3328 						 */
3329 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3330 						    (num_dests_sacked == 1) &&
3331 						    SCTP_TSN_GT(this_sack_lowest_newack,
3332 						    tp1->rec.data.TSN_seq)) {
3333 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3334 								sctp_log_fr(32 + num_dests_sacked,
3335 								    tp1->rec.data.TSN_seq,
3336 								    tp1->sent,
3337 								    SCTP_FR_LOG_STRIKE_CHUNK);
3338 							}
3339 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3340 								tp1->sent++;
3341 							}
3342 						}
3343 					}
3344 				}
3345 			}
3346 			/*
3347 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3348 			 * algo covers HTNA.
3349 			 */
3350 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3351 		    biggest_tsn_newly_acked)) {
3352 			/*
3353 			 * We don't strike these: This is the  HTNA
3354 			 * algorithm i.e. we don't strike If our TSN is
3355 			 * larger than the Highest TSN Newly Acked.
3356 			 */
3357 			;
3358 		} else {
3359 			/* Strike the TSN */
3360 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3361 				sctp_log_fr(biggest_tsn_newly_acked,
3362 				    tp1->rec.data.TSN_seq,
3363 				    tp1->sent,
3364 				    SCTP_FR_LOG_STRIKE_CHUNK);
3365 			}
3366 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3367 				tp1->sent++;
3368 			}
3369 			if ((asoc->sctp_cmt_on_off > 0) &&
3370 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3371 				/*
3372 				 * CMT DAC algorithm: If SACK flag is set to
3373 				 * 0, then lowest_newack test will not pass
3374 				 * because it would have been set to the
3375 				 * cumack earlier. If not already to be
3376 				 * rtx'd, If not a mixed sack and if tp1 is
3377 				 * not between two sacked TSNs, then mark by
3378 				 * one more. NOTE that we are marking by one
3379 				 * additional time since the SACK DAC flag
3380 				 * indicates that two packets have been
3381 				 * received after this missing TSN.
3382 				 */
3383 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3384 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3385 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3386 						sctp_log_fr(48 + num_dests_sacked,
3387 						    tp1->rec.data.TSN_seq,
3388 						    tp1->sent,
3389 						    SCTP_FR_LOG_STRIKE_CHUNK);
3390 					}
3391 					tp1->sent++;
3392 				}
3393 			}
3394 		}
3395 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3396 			struct sctp_nets *alt;
3397 
3398 			/* fix counts and things */
3399 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3400 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3401 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3402 				    tp1->book_size,
3403 				    (uint32_t) (uintptr_t) tp1->whoTo,
3404 				    tp1->rec.data.TSN_seq);
3405 			}
3406 			if (tp1->whoTo) {
3407 				tp1->whoTo->net_ack++;
3408 				sctp_flight_size_decrease(tp1);
3409 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3410 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3411 					    tp1);
3412 				}
3413 			}
3414 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3415 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3416 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3417 			}
3418 			/* add back to the rwnd */
3419 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3420 
3421 			/* remove from the total flight */
3422 			sctp_total_flight_decrease(stcb, tp1);
3423 
3424 			if ((stcb->asoc.prsctp_supported) &&
3425 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3426 				/*
3427 				 * Has it been retransmitted tv_sec times? -
3428 				 * we store the retran count there.
3429 				 */
3430 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3431 					/* Yes, so drop it */
3432 					if (tp1->data != NULL) {
3433 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3434 						    SCTP_SO_NOT_LOCKED);
3435 					}
3436 					/* Make sure to flag we had a FR */
3437 					tp1->whoTo->net_ack++;
3438 					continue;
3439 				}
3440 			}
3441 			/*
3442 			 * SCTP_PRINTF("OK, we are now ready to FR this
3443 			 * guy\n");
3444 			 */
3445 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3446 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3447 				    0, SCTP_FR_MARKED);
3448 			}
3449 			if (strike_flag) {
3450 				/* This is a subsequent FR */
3451 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3452 			}
3453 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3454 			if (asoc->sctp_cmt_on_off > 0) {
3455 				/*
3456 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3457 				 * If CMT is being used, then pick dest with
3458 				 * largest ssthresh for any retransmission.
3459 				 */
3460 				tp1->no_fr_allowed = 1;
3461 				alt = tp1->whoTo;
3462 				/* sa_ignore NO_NULL_CHK */
3463 				if (asoc->sctp_cmt_pf > 0) {
3464 					/*
3465 					 * JRS 5/18/07 - If CMT PF is on,
3466 					 * use the PF version of
3467 					 * find_alt_net()
3468 					 */
3469 					alt = sctp_find_alternate_net(stcb, alt, 2);
3470 				} else {
3471 					/*
3472 					 * JRS 5/18/07 - If only CMT is on,
3473 					 * use the CMT version of
3474 					 * find_alt_net()
3475 					 */
3476 					/* sa_ignore NO_NULL_CHK */
3477 					alt = sctp_find_alternate_net(stcb, alt, 1);
3478 				}
3479 				if (alt == NULL) {
3480 					alt = tp1->whoTo;
3481 				}
3482 				/*
3483 				 * CUCv2: If a different dest is picked for
3484 				 * the retransmission, then new
3485 				 * (rtx-)pseudo_cumack needs to be tracked
3486 				 * for orig dest. Let CUCv2 track new (rtx-)
3487 				 * pseudo-cumack always.
3488 				 */
3489 				if (tp1->whoTo) {
3490 					tp1->whoTo->find_pseudo_cumack = 1;
3491 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3492 				}
3493 			} else {/* CMT is OFF */
3494 
3495 #ifdef SCTP_FR_TO_ALTERNATE
3496 				/* Can we find an alternate? */
3497 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3498 #else
3499 				/*
3500 				 * default behavior is to NOT retransmit
3501 				 * FR's to an alternate. Armando Caro's
3502 				 * paper details why.
3503 				 */
3504 				alt = tp1->whoTo;
3505 #endif
3506 			}
3507 
3508 			tp1->rec.data.doing_fast_retransmit = 1;
3509 			tot_retrans++;
3510 			/* mark the sending seq for possible subsequent FR's */
3511 			/*
3512 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3513 			 * (uint32_t)tpi->rec.data.TSN_seq);
3514 			 */
3515 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3516 				/*
3517 				 * If the queue of send is empty then its
3518 				 * the next sequence number that will be
3519 				 * assigned so we subtract one from this to
3520 				 * get the one we last sent.
3521 				 */
3522 				tp1->rec.data.fast_retran_tsn = sending_seq;
3523 			} else {
3524 				/*
3525 				 * If there are chunks on the send queue
3526 				 * (unsent data that has made it from the
3527 				 * stream queues but not out the door, we
3528 				 * take the first one (which will have the
3529 				 * lowest TSN) and subtract one to get the
3530 				 * one we last sent.
3531 				 */
3532 				struct sctp_tmit_chunk *ttt;
3533 
3534 				ttt = TAILQ_FIRST(&asoc->send_queue);
3535 				tp1->rec.data.fast_retran_tsn =
3536 				    ttt->rec.data.TSN_seq;
3537 			}
3538 
3539 			if (tp1->do_rtt) {
3540 				/*
3541 				 * this guy had a RTO calculation pending on
3542 				 * it, cancel it
3543 				 */
3544 				if ((tp1->whoTo != NULL) &&
3545 				    (tp1->whoTo->rto_needed == 0)) {
3546 					tp1->whoTo->rto_needed = 1;
3547 				}
3548 				tp1->do_rtt = 0;
3549 			}
3550 			if (alt != tp1->whoTo) {
3551 				/* yes, there is an alternate. */
3552 				sctp_free_remote_addr(tp1->whoTo);
3553 				/* sa_ignore FREED_MEMORY */
3554 				tp1->whoTo = alt;
3555 				atomic_add_int(&alt->ref_count, 1);
3556 			}
3557 		}
3558 	}
3559 }
3560 
3561 struct sctp_tmit_chunk *
3562 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3563     struct sctp_association *asoc)
3564 {
3565 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3566 	struct timeval now;
3567 	int now_filled = 0;
3568 
3569 	if (asoc->prsctp_supported == 0) {
3570 		return (NULL);
3571 	}
3572 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3573 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3574 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3575 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3576 			/* no chance to advance, out of here */
3577 			break;
3578 		}
3579 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3580 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3581 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3582 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3583 				    asoc->advanced_peer_ack_point,
3584 				    tp1->rec.data.TSN_seq, 0, 0);
3585 			}
3586 		}
3587 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3588 			/*
3589 			 * We can't fwd-tsn past any that are reliable aka
3590 			 * retransmitted until the asoc fails.
3591 			 */
3592 			break;
3593 		}
3594 		if (!now_filled) {
3595 			(void)SCTP_GETTIME_TIMEVAL(&now);
3596 			now_filled = 1;
3597 		}
3598 		/*
3599 		 * now we got a chunk which is marked for another
3600 		 * retransmission to a PR-stream but has run out its chances
3601 		 * already maybe OR has been marked to skip now. Can we skip
3602 		 * it if its a resend?
3603 		 */
3604 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3605 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3606 			/*
3607 			 * Now is this one marked for resend and its time is
3608 			 * now up?
3609 			 */
3610 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3611 				/* Yes so drop it */
3612 				if (tp1->data) {
3613 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3614 					    1, SCTP_SO_NOT_LOCKED);
3615 				}
3616 			} else {
3617 				/*
3618 				 * No, we are done when hit one for resend
3619 				 * whos time as not expired.
3620 				 */
3621 				break;
3622 			}
3623 		}
3624 		/*
3625 		 * Ok now if this chunk is marked to drop it we can clean up
3626 		 * the chunk, advance our peer ack point and we can check
3627 		 * the next chunk.
3628 		 */
3629 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3630 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3631 			/* advance PeerAckPoint goes forward */
3632 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3633 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3634 				a_adv = tp1;
3635 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3636 				/* No update but we do save the chk */
3637 				a_adv = tp1;
3638 			}
3639 		} else {
3640 			/*
3641 			 * If it is still in RESEND we can advance no
3642 			 * further
3643 			 */
3644 			break;
3645 		}
3646 	}
3647 	return (a_adv);
3648 }
3649 
3650 static int
3651 sctp_fs_audit(struct sctp_association *asoc)
3652 {
3653 	struct sctp_tmit_chunk *chk;
3654 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3655 	int ret;
3656 
3657 #ifndef INVARIANTS
3658 	int entry_flight, entry_cnt;
3659 
3660 #endif
3661 
3662 	ret = 0;
3663 #ifndef INVARIANTS
3664 	entry_flight = asoc->total_flight;
3665 	entry_cnt = asoc->total_flight_count;
3666 #endif
3667 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3668 		return (0);
3669 
3670 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3671 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3672 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3673 			    chk->rec.data.TSN_seq,
3674 			    chk->send_size,
3675 			    chk->snd_count);
3676 			inflight++;
3677 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3678 			resend++;
3679 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3680 			inbetween++;
3681 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3682 			above++;
3683 		} else {
3684 			acked++;
3685 		}
3686 	}
3687 
3688 	if ((inflight > 0) || (inbetween > 0)) {
3689 #ifdef INVARIANTS
3690 		panic("Flight size-express incorrect? \n");
3691 #else
3692 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3693 		    entry_flight, entry_cnt);
3694 
3695 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3696 		    inflight, inbetween, resend, above, acked);
3697 		ret = 1;
3698 #endif
3699 	}
3700 	return (ret);
3701 }
3702 
3703 
3704 static void
3705 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3706     struct sctp_association *asoc,
3707     struct sctp_tmit_chunk *tp1)
3708 {
3709 	tp1->window_probe = 0;
3710 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3711 		/* TSN's skipped we do NOT move back. */
3712 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3713 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3714 		    tp1->book_size,
3715 		    (uint32_t) (uintptr_t) tp1->whoTo,
3716 		    tp1->rec.data.TSN_seq);
3717 		return;
3718 	}
3719 	/* First setup this by shrinking flight */
3720 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3721 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3722 		    tp1);
3723 	}
3724 	sctp_flight_size_decrease(tp1);
3725 	sctp_total_flight_decrease(stcb, tp1);
3726 	/* Now mark for resend */
3727 	tp1->sent = SCTP_DATAGRAM_RESEND;
3728 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3729 
3730 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3731 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3732 		    tp1->whoTo->flight_size,
3733 		    tp1->book_size,
3734 		    (uint32_t) (uintptr_t) tp1->whoTo,
3735 		    tp1->rec.data.TSN_seq);
3736 	}
3737 }
3738 
3739 void
3740 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3741     uint32_t rwnd, int *abort_now, int ecne_seen)
3742 {
3743 	struct sctp_nets *net;
3744 	struct sctp_association *asoc;
3745 	struct sctp_tmit_chunk *tp1, *tp2;
3746 	uint32_t old_rwnd;
3747 	int win_probe_recovery = 0;
3748 	int win_probe_recovered = 0;
3749 	int j, done_once = 0;
3750 	int rto_ok = 1;
3751 
3752 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3753 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3754 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3755 	}
3756 	SCTP_TCB_LOCK_ASSERT(stcb);
3757 #ifdef SCTP_ASOCLOG_OF_TSNS
3758 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3759 	stcb->asoc.cumack_log_at++;
3760 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3761 		stcb->asoc.cumack_log_at = 0;
3762 	}
3763 #endif
3764 	asoc = &stcb->asoc;
3765 	old_rwnd = asoc->peers_rwnd;
3766 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3767 		/* old ack */
3768 		return;
3769 	} else if (asoc->last_acked_seq == cumack) {
3770 		/* Window update sack */
3771 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3772 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3773 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3774 			/* SWS sender side engages */
3775 			asoc->peers_rwnd = 0;
3776 		}
3777 		if (asoc->peers_rwnd > old_rwnd) {
3778 			goto again;
3779 		}
3780 		return;
3781 	}
3782 	/* First setup for CC stuff */
3783 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3784 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3785 			/* Drag along the window_tsn for cwr's */
3786 			net->cwr_window_tsn = cumack;
3787 		}
3788 		net->prev_cwnd = net->cwnd;
3789 		net->net_ack = 0;
3790 		net->net_ack2 = 0;
3791 
3792 		/*
3793 		 * CMT: Reset CUC and Fast recovery algo variables before
3794 		 * SACK processing
3795 		 */
3796 		net->new_pseudo_cumack = 0;
3797 		net->will_exit_fast_recovery = 0;
3798 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3799 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3800 		}
3801 	}
3802 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3803 		uint32_t send_s;
3804 
3805 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3806 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3807 			    sctpchunk_listhead);
3808 			send_s = tp1->rec.data.TSN_seq + 1;
3809 		} else {
3810 			send_s = asoc->sending_seq;
3811 		}
3812 		if (SCTP_TSN_GE(cumack, send_s)) {
3813 			struct mbuf *op_err;
3814 			char msg[SCTP_DIAG_INFO_LEN];
3815 
3816 			*abort_now = 1;
3817 			/* XXX */
3818 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3819 			    cumack, send_s);
3820 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3821 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3822 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3823 			return;
3824 		}
3825 	}
3826 	asoc->this_sack_highest_gap = cumack;
3827 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3828 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3829 		    stcb->asoc.overall_error_count,
3830 		    0,
3831 		    SCTP_FROM_SCTP_INDATA,
3832 		    __LINE__);
3833 	}
3834 	stcb->asoc.overall_error_count = 0;
3835 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3836 		/* process the new consecutive TSN first */
3837 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3838 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3839 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3840 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3841 				}
3842 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3843 					/*
3844 					 * If it is less than ACKED, it is
3845 					 * now no-longer in flight. Higher
3846 					 * values may occur during marking
3847 					 */
3848 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3849 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3850 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3851 							    tp1->whoTo->flight_size,
3852 							    tp1->book_size,
3853 							    (uint32_t) (uintptr_t) tp1->whoTo,
3854 							    tp1->rec.data.TSN_seq);
3855 						}
3856 						sctp_flight_size_decrease(tp1);
3857 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3858 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3859 							    tp1);
3860 						}
3861 						/* sa_ignore NO_NULL_CHK */
3862 						sctp_total_flight_decrease(stcb, tp1);
3863 					}
3864 					tp1->whoTo->net_ack += tp1->send_size;
3865 					if (tp1->snd_count < 2) {
3866 						/*
3867 						 * True non-retransmited
3868 						 * chunk
3869 						 */
3870 						tp1->whoTo->net_ack2 +=
3871 						    tp1->send_size;
3872 
3873 						/* update RTO too? */
3874 						if (tp1->do_rtt) {
3875 							if (rto_ok) {
3876 								tp1->whoTo->RTO =
3877 								/*
3878 								 * sa_ignore
3879 								 * NO_NULL_CH
3880 								 * K
3881 								 */
3882 								    sctp_calculate_rto(stcb,
3883 								    asoc, tp1->whoTo,
3884 								    &tp1->sent_rcv_time,
3885 								    sctp_align_safe_nocopy,
3886 								    SCTP_RTT_FROM_DATA);
3887 								rto_ok = 0;
3888 							}
3889 							if (tp1->whoTo->rto_needed == 0) {
3890 								tp1->whoTo->rto_needed = 1;
3891 							}
3892 							tp1->do_rtt = 0;
3893 						}
3894 					}
3895 					/*
3896 					 * CMT: CUCv2 algorithm. From the
3897 					 * cumack'd TSNs, for each TSN being
3898 					 * acked for the first time, set the
3899 					 * following variables for the
3900 					 * corresp destination.
3901 					 * new_pseudo_cumack will trigger a
3902 					 * cwnd update.
3903 					 * find_(rtx_)pseudo_cumack will
3904 					 * trigger search for the next
3905 					 * expected (rtx-)pseudo-cumack.
3906 					 */
3907 					tp1->whoTo->new_pseudo_cumack = 1;
3908 					tp1->whoTo->find_pseudo_cumack = 1;
3909 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3910 
3911 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3912 						/* sa_ignore NO_NULL_CHK */
3913 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3914 					}
3915 				}
3916 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3917 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3918 				}
3919 				if (tp1->rec.data.chunk_was_revoked) {
3920 					/* deflate the cwnd */
3921 					tp1->whoTo->cwnd -= tp1->book_size;
3922 					tp1->rec.data.chunk_was_revoked = 0;
3923 				}
3924 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3925 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3926 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3927 #ifdef INVARIANTS
3928 					} else {
3929 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3930 #endif
3931 					}
3932 				}
3933 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3934 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3935 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3936 					asoc->trigger_reset = 1;
3937 				}
3938 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3939 				if (tp1->data) {
3940 					/* sa_ignore NO_NULL_CHK */
3941 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3942 					sctp_m_freem(tp1->data);
3943 					tp1->data = NULL;
3944 				}
3945 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3946 					sctp_log_sack(asoc->last_acked_seq,
3947 					    cumack,
3948 					    tp1->rec.data.TSN_seq,
3949 					    0,
3950 					    0,
3951 					    SCTP_LOG_FREE_SENT);
3952 				}
3953 				asoc->sent_queue_cnt--;
3954 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3955 			} else {
3956 				break;
3957 			}
3958 		}
3959 
3960 	}
3961 	/* sa_ignore NO_NULL_CHK */
3962 	if (stcb->sctp_socket) {
3963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 		struct socket *so;
3965 
3966 #endif
3967 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3969 			/* sa_ignore NO_NULL_CHK */
3970 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3971 		}
3972 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 		so = SCTP_INP_SO(stcb->sctp_ep);
3974 		atomic_add_int(&stcb->asoc.refcnt, 1);
3975 		SCTP_TCB_UNLOCK(stcb);
3976 		SCTP_SOCKET_LOCK(so, 1);
3977 		SCTP_TCB_LOCK(stcb);
3978 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3979 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3980 			/* assoc was freed while we were unlocked */
3981 			SCTP_SOCKET_UNLOCK(so, 1);
3982 			return;
3983 		}
3984 #endif
3985 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3986 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3987 		SCTP_SOCKET_UNLOCK(so, 1);
3988 #endif
3989 	} else {
3990 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3991 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3992 		}
3993 	}
3994 
3995 	/* JRS - Use the congestion control given in the CC module */
3996 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3997 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3998 			if (net->net_ack2 > 0) {
3999 				/*
4000 				 * Karn's rule applies to clearing error
4001 				 * count, this is optional.
4002 				 */
4003 				net->error_count = 0;
4004 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4005 					/* addr came good */
4006 					net->dest_state |= SCTP_ADDR_REACHABLE;
4007 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4008 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4009 				}
4010 				if (net == stcb->asoc.primary_destination) {
4011 					if (stcb->asoc.alternate) {
4012 						/*
4013 						 * release the alternate,
4014 						 * primary is good
4015 						 */
4016 						sctp_free_remote_addr(stcb->asoc.alternate);
4017 						stcb->asoc.alternate = NULL;
4018 					}
4019 				}
4020 				if (net->dest_state & SCTP_ADDR_PF) {
4021 					net->dest_state &= ~SCTP_ADDR_PF;
4022 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4023 					    stcb->sctp_ep, stcb, net,
4024 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4025 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4026 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4027 					/* Done with this net */
4028 					net->net_ack = 0;
4029 				}
4030 				/* restore any doubled timers */
4031 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4032 				if (net->RTO < stcb->asoc.minrto) {
4033 					net->RTO = stcb->asoc.minrto;
4034 				}
4035 				if (net->RTO > stcb->asoc.maxrto) {
4036 					net->RTO = stcb->asoc.maxrto;
4037 				}
4038 			}
4039 		}
4040 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4041 	}
4042 	asoc->last_acked_seq = cumack;
4043 
4044 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4045 		/* nothing left in-flight */
4046 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4047 			net->flight_size = 0;
4048 			net->partial_bytes_acked = 0;
4049 		}
4050 		asoc->total_flight = 0;
4051 		asoc->total_flight_count = 0;
4052 	}
4053 	/* RWND update */
4054 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4055 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4056 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4057 		/* SWS sender side engages */
4058 		asoc->peers_rwnd = 0;
4059 	}
4060 	if (asoc->peers_rwnd > old_rwnd) {
4061 		win_probe_recovery = 1;
4062 	}
4063 	/* Now assure a timer where data is queued at */
4064 again:
4065 	j = 0;
4066 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4067 		int to_ticks;
4068 
4069 		if (win_probe_recovery && (net->window_probe)) {
4070 			win_probe_recovered = 1;
4071 			/*
4072 			 * Find first chunk that was used with window probe
4073 			 * and clear the sent
4074 			 */
4075 			/* sa_ignore FREED_MEMORY */
4076 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4077 				if (tp1->window_probe) {
4078 					/* move back to data send queue */
4079 					sctp_window_probe_recovery(stcb, asoc, tp1);
4080 					break;
4081 				}
4082 			}
4083 		}
4084 		if (net->RTO == 0) {
4085 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4086 		} else {
4087 			to_ticks = MSEC_TO_TICKS(net->RTO);
4088 		}
4089 		if (net->flight_size) {
4090 			j++;
4091 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4092 			    sctp_timeout_handler, &net->rxt_timer);
4093 			if (net->window_probe) {
4094 				net->window_probe = 0;
4095 			}
4096 		} else {
4097 			if (net->window_probe) {
4098 				/*
4099 				 * In window probes we must assure a timer
4100 				 * is still running there
4101 				 */
4102 				net->window_probe = 0;
4103 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4104 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4105 					    sctp_timeout_handler, &net->rxt_timer);
4106 				}
4107 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4108 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4109 				    stcb, net,
4110 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4111 			}
4112 		}
4113 	}
4114 	if ((j == 0) &&
4115 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4116 	    (asoc->sent_queue_retran_cnt == 0) &&
4117 	    (win_probe_recovered == 0) &&
4118 	    (done_once == 0)) {
4119 		/*
4120 		 * huh, this should not happen unless all packets are
4121 		 * PR-SCTP and marked to skip of course.
4122 		 */
4123 		if (sctp_fs_audit(asoc)) {
4124 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4125 				net->flight_size = 0;
4126 			}
4127 			asoc->total_flight = 0;
4128 			asoc->total_flight_count = 0;
4129 			asoc->sent_queue_retran_cnt = 0;
4130 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4131 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4132 					sctp_flight_size_increase(tp1);
4133 					sctp_total_flight_increase(stcb, tp1);
4134 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4135 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4136 				}
4137 			}
4138 		}
4139 		done_once = 1;
4140 		goto again;
4141 	}
4142 	/**********************************/
4143 	/* Now what about shutdown issues */
4144 	/**********************************/
4145 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4146 		/* nothing left on sendqueue.. consider done */
4147 		/* clean up */
4148 		if ((asoc->stream_queue_cnt == 1) &&
4149 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4150 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4151 		    (asoc->locked_on_sending)
4152 		    ) {
4153 			struct sctp_stream_queue_pending *sp;
4154 
4155 			/*
4156 			 * I may be in a state where we got all across.. but
4157 			 * cannot write more due to a shutdown... we abort
4158 			 * since the user did not indicate EOR in this case.
4159 			 * The sp will be cleaned during free of the asoc.
4160 			 */
4161 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4162 			    sctp_streamhead);
4163 			if ((sp) && (sp->length == 0)) {
4164 				/* Let cleanup code purge it */
4165 				if (sp->msg_is_complete) {
4166 					asoc->stream_queue_cnt--;
4167 				} else {
4168 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4169 					asoc->locked_on_sending = NULL;
4170 					asoc->stream_queue_cnt--;
4171 				}
4172 			}
4173 		}
4174 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4175 		    (asoc->stream_queue_cnt == 0)) {
4176 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4177 				/* Need to abort here */
4178 				struct mbuf *op_err;
4179 
4180 		abort_out_now:
4181 				*abort_now = 1;
4182 				/* XXX */
4183 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4184 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4185 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4186 				return;
4187 			} else {
4188 				struct sctp_nets *netp;
4189 
4190 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4191 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4192 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4193 				}
4194 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4195 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4196 				sctp_stop_timers_for_shutdown(stcb);
4197 				if (asoc->alternate) {
4198 					netp = asoc->alternate;
4199 				} else {
4200 					netp = asoc->primary_destination;
4201 				}
4202 				sctp_send_shutdown(stcb, netp);
4203 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4204 				    stcb->sctp_ep, stcb, netp);
4205 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4206 				    stcb->sctp_ep, stcb, netp);
4207 			}
4208 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4209 		    (asoc->stream_queue_cnt == 0)) {
4210 			struct sctp_nets *netp;
4211 
4212 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4213 				goto abort_out_now;
4214 			}
4215 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4216 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4217 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4218 			sctp_stop_timers_for_shutdown(stcb);
4219 			if (asoc->alternate) {
4220 				netp = asoc->alternate;
4221 			} else {
4222 				netp = asoc->primary_destination;
4223 			}
4224 			sctp_send_shutdown_ack(stcb, netp);
4225 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4226 			    stcb->sctp_ep, stcb, netp);
4227 		}
4228 	}
4229 	/*********************************************/
4230 	/* Here we perform PR-SCTP procedures        */
4231 	/* (section 4.2)                             */
4232 	/*********************************************/
4233 	/* C1. update advancedPeerAckPoint */
4234 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4235 		asoc->advanced_peer_ack_point = cumack;
4236 	}
4237 	/* PR-Sctp issues need to be addressed too */
4238 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4239 		struct sctp_tmit_chunk *lchk;
4240 		uint32_t old_adv_peer_ack_point;
4241 
4242 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4243 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4244 		/* C3. See if we need to send a Fwd-TSN */
4245 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4246 			/*
4247 			 * ISSUE with ECN, see FWD-TSN processing.
4248 			 */
4249 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4250 				send_forward_tsn(stcb, asoc);
4251 			} else if (lchk) {
4252 				/* try to FR fwd-tsn's that get lost too */
4253 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4254 					send_forward_tsn(stcb, asoc);
4255 				}
4256 			}
4257 		}
4258 		if (lchk) {
4259 			/* Assure a timer is up */
4260 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4261 			    stcb->sctp_ep, stcb, lchk->whoTo);
4262 		}
4263 	}
4264 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4265 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4266 		    rwnd,
4267 		    stcb->asoc.peers_rwnd,
4268 		    stcb->asoc.total_flight,
4269 		    stcb->asoc.total_output_queue_size);
4270 	}
4271 }
4272 
4273 void
4274 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4275     struct sctp_tcb *stcb,
4276     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4277     int *abort_now, uint8_t flags,
4278     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4279 {
4280 	struct sctp_association *asoc;
4281 	struct sctp_tmit_chunk *tp1, *tp2;
4282 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4283 	uint16_t wake_him = 0;
4284 	uint32_t send_s = 0;
4285 	long j;
4286 	int accum_moved = 0;
4287 	int will_exit_fast_recovery = 0;
4288 	uint32_t a_rwnd, old_rwnd;
4289 	int win_probe_recovery = 0;
4290 	int win_probe_recovered = 0;
4291 	struct sctp_nets *net = NULL;
4292 	int done_once;
4293 	int rto_ok = 1;
4294 	uint8_t reneged_all = 0;
4295 	uint8_t cmt_dac_flag;
4296 
4297 	/*
4298 	 * we take any chance we can to service our queues since we cannot
4299 	 * get awoken when the socket is read from :<
4300 	 */
4301 	/*
4302 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4303 	 * old sack, if so discard. 2) If there is nothing left in the send
4304 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4305 	 * too, update any rwnd change and verify no timers are running.
4306 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4307 	 * moved process these first and note that it moved. 4) Process any
4308 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4309 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4310 	 * sync up flightsizes and things, stop all timers and also check
4311 	 * for shutdown_pending state. If so then go ahead and send off the
4312 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4313 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4314 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4315 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4316 	 * if in shutdown_recv state.
4317 	 */
4318 	SCTP_TCB_LOCK_ASSERT(stcb);
4319 	/* CMT DAC algo */
4320 	this_sack_lowest_newack = 0;
4321 	SCTP_STAT_INCR(sctps_slowpath_sack);
4322 	last_tsn = cum_ack;
4323 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4324 #ifdef SCTP_ASOCLOG_OF_TSNS
4325 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4326 	stcb->asoc.cumack_log_at++;
4327 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4328 		stcb->asoc.cumack_log_at = 0;
4329 	}
4330 #endif
4331 	a_rwnd = rwnd;
4332 
4333 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4334 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4335 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4336 	}
4337 	old_rwnd = stcb->asoc.peers_rwnd;
4338 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4339 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4340 		    stcb->asoc.overall_error_count,
4341 		    0,
4342 		    SCTP_FROM_SCTP_INDATA,
4343 		    __LINE__);
4344 	}
4345 	stcb->asoc.overall_error_count = 0;
4346 	asoc = &stcb->asoc;
4347 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4348 		sctp_log_sack(asoc->last_acked_seq,
4349 		    cum_ack,
4350 		    0,
4351 		    num_seg,
4352 		    num_dup,
4353 		    SCTP_LOG_NEW_SACK);
4354 	}
4355 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4356 		uint16_t i;
4357 		uint32_t *dupdata, dblock;
4358 
4359 		for (i = 0; i < num_dup; i++) {
4360 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4361 			    sizeof(uint32_t), (uint8_t *) & dblock);
4362 			if (dupdata == NULL) {
4363 				break;
4364 			}
4365 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4366 		}
4367 	}
4368 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4369 		/* reality check */
4370 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4371 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4372 			    sctpchunk_listhead);
4373 			send_s = tp1->rec.data.TSN_seq + 1;
4374 		} else {
4375 			tp1 = NULL;
4376 			send_s = asoc->sending_seq;
4377 		}
4378 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4379 			struct mbuf *op_err;
4380 			char msg[SCTP_DIAG_INFO_LEN];
4381 
4382 			/*
4383 			 * no way, we have not even sent this TSN out yet.
4384 			 * Peer is hopelessly messed up with us.
4385 			 */
4386 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4387 			    cum_ack, send_s);
4388 			if (tp1) {
4389 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4390 				    tp1->rec.data.TSN_seq, (void *)tp1);
4391 			}
4392 	hopeless_peer:
4393 			*abort_now = 1;
4394 			/* XXX */
4395 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4396 			    cum_ack, send_s);
4397 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4398 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4399 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4400 			return;
4401 		}
4402 	}
4403 	/**********************/
4404 	/* 1) check the range */
4405 	/**********************/
4406 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4407 		/* acking something behind */
4408 		return;
4409 	}
4410 	/* update the Rwnd of the peer */
4411 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4412 	    TAILQ_EMPTY(&asoc->send_queue) &&
4413 	    (asoc->stream_queue_cnt == 0)) {
4414 		/* nothing left on send/sent and strmq */
4415 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4416 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4417 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4418 		}
4419 		asoc->peers_rwnd = a_rwnd;
4420 		if (asoc->sent_queue_retran_cnt) {
4421 			asoc->sent_queue_retran_cnt = 0;
4422 		}
4423 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4424 			/* SWS sender side engages */
4425 			asoc->peers_rwnd = 0;
4426 		}
4427 		/* stop any timers */
4428 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4429 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4430 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4431 			net->partial_bytes_acked = 0;
4432 			net->flight_size = 0;
4433 		}
4434 		asoc->total_flight = 0;
4435 		asoc->total_flight_count = 0;
4436 		return;
4437 	}
4438 	/*
4439 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4440 	 * things. The total byte count acked is tracked in netAckSz AND
4441 	 * netAck2 is used to track the total bytes acked that are un-
4442 	 * amibguious and were never retransmitted. We track these on a per
4443 	 * destination address basis.
4444 	 */
4445 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4446 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4447 			/* Drag along the window_tsn for cwr's */
4448 			net->cwr_window_tsn = cum_ack;
4449 		}
4450 		net->prev_cwnd = net->cwnd;
4451 		net->net_ack = 0;
4452 		net->net_ack2 = 0;
4453 
4454 		/*
4455 		 * CMT: Reset CUC and Fast recovery algo variables before
4456 		 * SACK processing
4457 		 */
4458 		net->new_pseudo_cumack = 0;
4459 		net->will_exit_fast_recovery = 0;
4460 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4461 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4462 		}
4463 	}
4464 	/* process the new consecutive TSN first */
4465 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4466 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4467 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4468 				accum_moved = 1;
4469 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4470 					/*
4471 					 * If it is less than ACKED, it is
4472 					 * now no-longer in flight. Higher
4473 					 * values may occur during marking
4474 					 */
4475 					if ((tp1->whoTo->dest_state &
4476 					    SCTP_ADDR_UNCONFIRMED) &&
4477 					    (tp1->snd_count < 2)) {
4478 						/*
4479 						 * If there was no retran
4480 						 * and the address is
4481 						 * un-confirmed and we sent
4482 						 * there and are now
4483 						 * sacked.. its confirmed,
4484 						 * mark it so.
4485 						 */
4486 						tp1->whoTo->dest_state &=
4487 						    ~SCTP_ADDR_UNCONFIRMED;
4488 					}
4489 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4490 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4491 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4492 							    tp1->whoTo->flight_size,
4493 							    tp1->book_size,
4494 							    (uint32_t) (uintptr_t) tp1->whoTo,
4495 							    tp1->rec.data.TSN_seq);
4496 						}
4497 						sctp_flight_size_decrease(tp1);
4498 						sctp_total_flight_decrease(stcb, tp1);
4499 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4500 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4501 							    tp1);
4502 						}
4503 					}
4504 					tp1->whoTo->net_ack += tp1->send_size;
4505 
4506 					/* CMT SFR and DAC algos */
4507 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4508 					tp1->whoTo->saw_newack = 1;
4509 
4510 					if (tp1->snd_count < 2) {
4511 						/*
4512 						 * True non-retransmited
4513 						 * chunk
4514 						 */
4515 						tp1->whoTo->net_ack2 +=
4516 						    tp1->send_size;
4517 
4518 						/* update RTO too? */
4519 						if (tp1->do_rtt) {
4520 							if (rto_ok) {
4521 								tp1->whoTo->RTO =
4522 								    sctp_calculate_rto(stcb,
4523 								    asoc, tp1->whoTo,
4524 								    &tp1->sent_rcv_time,
4525 								    sctp_align_safe_nocopy,
4526 								    SCTP_RTT_FROM_DATA);
4527 								rto_ok = 0;
4528 							}
4529 							if (tp1->whoTo->rto_needed == 0) {
4530 								tp1->whoTo->rto_needed = 1;
4531 							}
4532 							tp1->do_rtt = 0;
4533 						}
4534 					}
4535 					/*
4536 					 * CMT: CUCv2 algorithm. From the
4537 					 * cumack'd TSNs, for each TSN being
4538 					 * acked for the first time, set the
4539 					 * following variables for the
4540 					 * corresp destination.
4541 					 * new_pseudo_cumack will trigger a
4542 					 * cwnd update.
4543 					 * find_(rtx_)pseudo_cumack will
4544 					 * trigger search for the next
4545 					 * expected (rtx-)pseudo-cumack.
4546 					 */
4547 					tp1->whoTo->new_pseudo_cumack = 1;
4548 					tp1->whoTo->find_pseudo_cumack = 1;
4549 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4550 
4551 
4552 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4553 						sctp_log_sack(asoc->last_acked_seq,
4554 						    cum_ack,
4555 						    tp1->rec.data.TSN_seq,
4556 						    0,
4557 						    0,
4558 						    SCTP_LOG_TSN_ACKED);
4559 					}
4560 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4561 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4562 					}
4563 				}
4564 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4565 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4566 #ifdef SCTP_AUDITING_ENABLED
4567 					sctp_audit_log(0xB3,
4568 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4569 #endif
4570 				}
4571 				if (tp1->rec.data.chunk_was_revoked) {
4572 					/* deflate the cwnd */
4573 					tp1->whoTo->cwnd -= tp1->book_size;
4574 					tp1->rec.data.chunk_was_revoked = 0;
4575 				}
4576 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4577 					tp1->sent = SCTP_DATAGRAM_ACKED;
4578 				}
4579 			}
4580 		} else {
4581 			break;
4582 		}
4583 	}
4584 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4585 	/* always set this up to cum-ack */
4586 	asoc->this_sack_highest_gap = last_tsn;
4587 
4588 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4589 
4590 		/*
4591 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4592 		 * to be greater than the cumack. Also reset saw_newack to 0
4593 		 * for all dests.
4594 		 */
4595 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4596 			net->saw_newack = 0;
4597 			net->this_sack_highest_newack = last_tsn;
4598 		}
4599 
4600 		/*
4601 		 * thisSackHighestGap will increase while handling NEW
4602 		 * segments this_sack_highest_newack will increase while
4603 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4604 		 * used for CMT DAC algo. saw_newack will also change.
4605 		 */
4606 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4607 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4608 		    num_seg, num_nr_seg, &rto_ok)) {
4609 			wake_him++;
4610 		}
4611 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4612 			/*
4613 			 * validate the biggest_tsn_acked in the gap acks if
4614 			 * strict adherence is wanted.
4615 			 */
4616 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4617 				/*
4618 				 * peer is either confused or we are under
4619 				 * attack. We must abort.
4620 				 */
4621 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4622 				    biggest_tsn_acked, send_s);
4623 				goto hopeless_peer;
4624 			}
4625 		}
4626 	}
4627 	/*******************************************/
4628 	/* cancel ALL T3-send timer if accum moved */
4629 	/*******************************************/
4630 	if (asoc->sctp_cmt_on_off > 0) {
4631 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4632 			if (net->new_pseudo_cumack)
4633 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4634 				    stcb, net,
4635 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4636 
4637 		}
4638 	} else {
4639 		if (accum_moved) {
4640 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4641 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4642 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4643 			}
4644 		}
4645 	}
4646 	/********************************************/
4647 	/* drop the acked chunks from the sentqueue */
4648 	/********************************************/
4649 	asoc->last_acked_seq = cum_ack;
4650 
4651 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4652 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4653 			break;
4654 		}
4655 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4656 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4657 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4658 #ifdef INVARIANTS
4659 			} else {
4660 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4661 #endif
4662 			}
4663 		}
4664 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4665 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4666 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4667 			asoc->trigger_reset = 1;
4668 		}
4669 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4670 		if (PR_SCTP_ENABLED(tp1->flags)) {
4671 			if (asoc->pr_sctp_cnt != 0)
4672 				asoc->pr_sctp_cnt--;
4673 		}
4674 		asoc->sent_queue_cnt--;
4675 		if (tp1->data) {
4676 			/* sa_ignore NO_NULL_CHK */
4677 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4678 			sctp_m_freem(tp1->data);
4679 			tp1->data = NULL;
4680 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4681 				asoc->sent_queue_cnt_removeable--;
4682 			}
4683 		}
4684 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4685 			sctp_log_sack(asoc->last_acked_seq,
4686 			    cum_ack,
4687 			    tp1->rec.data.TSN_seq,
4688 			    0,
4689 			    0,
4690 			    SCTP_LOG_FREE_SENT);
4691 		}
4692 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4693 		wake_him++;
4694 	}
4695 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4696 #ifdef INVARIANTS
4697 		panic("Warning flight size is postive and should be 0");
4698 #else
4699 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4700 		    asoc->total_flight);
4701 #endif
4702 		asoc->total_flight = 0;
4703 	}
4704 	/* sa_ignore NO_NULL_CHK */
4705 	if ((wake_him) && (stcb->sctp_socket)) {
4706 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4707 		struct socket *so;
4708 
4709 #endif
4710 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4711 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4712 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4713 		}
4714 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4715 		so = SCTP_INP_SO(stcb->sctp_ep);
4716 		atomic_add_int(&stcb->asoc.refcnt, 1);
4717 		SCTP_TCB_UNLOCK(stcb);
4718 		SCTP_SOCKET_LOCK(so, 1);
4719 		SCTP_TCB_LOCK(stcb);
4720 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4721 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4722 			/* assoc was freed while we were unlocked */
4723 			SCTP_SOCKET_UNLOCK(so, 1);
4724 			return;
4725 		}
4726 #endif
4727 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4729 		SCTP_SOCKET_UNLOCK(so, 1);
4730 #endif
4731 	} else {
4732 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4733 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4734 		}
4735 	}
4736 
4737 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4738 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4739 			/* Setup so we will exit RFC2582 fast recovery */
4740 			will_exit_fast_recovery = 1;
4741 		}
4742 	}
4743 	/*
4744 	 * Check for revoked fragments:
4745 	 *
4746 	 * if Previous sack - Had no frags then we can't have any revoked if
4747 	 * Previous sack - Had frag's then - If we now have frags aka
4748 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4749 	 * some of them. else - The peer revoked all ACKED fragments, since
4750 	 * we had some before and now we have NONE.
4751 	 */
4752 
4753 	if (num_seg) {
4754 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4755 		asoc->saw_sack_with_frags = 1;
4756 	} else if (asoc->saw_sack_with_frags) {
4757 		int cnt_revoked = 0;
4758 
4759 		/* Peer revoked all dg's marked or acked */
4760 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4761 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4762 				tp1->sent = SCTP_DATAGRAM_SENT;
4763 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4764 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4765 					    tp1->whoTo->flight_size,
4766 					    tp1->book_size,
4767 					    (uint32_t) (uintptr_t) tp1->whoTo,
4768 					    tp1->rec.data.TSN_seq);
4769 				}
4770 				sctp_flight_size_increase(tp1);
4771 				sctp_total_flight_increase(stcb, tp1);
4772 				tp1->rec.data.chunk_was_revoked = 1;
4773 				/*
4774 				 * To ensure that this increase in
4775 				 * flightsize, which is artificial, does not
4776 				 * throttle the sender, we also increase the
4777 				 * cwnd artificially.
4778 				 */
4779 				tp1->whoTo->cwnd += tp1->book_size;
4780 				cnt_revoked++;
4781 			}
4782 		}
4783 		if (cnt_revoked) {
4784 			reneged_all = 1;
4785 		}
4786 		asoc->saw_sack_with_frags = 0;
4787 	}
4788 	if (num_nr_seg > 0)
4789 		asoc->saw_sack_with_nr_frags = 1;
4790 	else
4791 		asoc->saw_sack_with_nr_frags = 0;
4792 
4793 	/* JRS - Use the congestion control given in the CC module */
4794 	if (ecne_seen == 0) {
4795 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 			if (net->net_ack2 > 0) {
4797 				/*
4798 				 * Karn's rule applies to clearing error
4799 				 * count, this is optional.
4800 				 */
4801 				net->error_count = 0;
4802 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4803 					/* addr came good */
4804 					net->dest_state |= SCTP_ADDR_REACHABLE;
4805 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4806 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4807 				}
4808 				if (net == stcb->asoc.primary_destination) {
4809 					if (stcb->asoc.alternate) {
4810 						/*
4811 						 * release the alternate,
4812 						 * primary is good
4813 						 */
4814 						sctp_free_remote_addr(stcb->asoc.alternate);
4815 						stcb->asoc.alternate = NULL;
4816 					}
4817 				}
4818 				if (net->dest_state & SCTP_ADDR_PF) {
4819 					net->dest_state &= ~SCTP_ADDR_PF;
4820 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4821 					    stcb->sctp_ep, stcb, net,
4822 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4823 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4824 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4825 					/* Done with this net */
4826 					net->net_ack = 0;
4827 				}
4828 				/* restore any doubled timers */
4829 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4830 				if (net->RTO < stcb->asoc.minrto) {
4831 					net->RTO = stcb->asoc.minrto;
4832 				}
4833 				if (net->RTO > stcb->asoc.maxrto) {
4834 					net->RTO = stcb->asoc.maxrto;
4835 				}
4836 			}
4837 		}
4838 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4839 	}
4840 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4841 		/* nothing left in-flight */
4842 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4843 			/* stop all timers */
4844 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4845 			    stcb, net,
4846 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4847 			net->flight_size = 0;
4848 			net->partial_bytes_acked = 0;
4849 		}
4850 		asoc->total_flight = 0;
4851 		asoc->total_flight_count = 0;
4852 	}
4853 	/**********************************/
4854 	/* Now what about shutdown issues */
4855 	/**********************************/
4856 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4857 		/* nothing left on sendqueue.. consider done */
4858 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4859 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4860 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4861 		}
4862 		asoc->peers_rwnd = a_rwnd;
4863 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4864 			/* SWS sender side engages */
4865 			asoc->peers_rwnd = 0;
4866 		}
4867 		/* clean up */
4868 		if ((asoc->stream_queue_cnt == 1) &&
4869 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4870 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4871 		    (asoc->locked_on_sending)
4872 		    ) {
4873 			struct sctp_stream_queue_pending *sp;
4874 
4875 			/*
4876 			 * I may be in a state where we got all across.. but
4877 			 * cannot write more due to a shutdown... we abort
4878 			 * since the user did not indicate EOR in this case.
4879 			 */
4880 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4881 			    sctp_streamhead);
4882 			if ((sp) && (sp->length == 0)) {
4883 				asoc->locked_on_sending = NULL;
4884 				if (sp->msg_is_complete) {
4885 					asoc->stream_queue_cnt--;
4886 				} else {
4887 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4888 					asoc->stream_queue_cnt--;
4889 				}
4890 			}
4891 		}
4892 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4893 		    (asoc->stream_queue_cnt == 0)) {
4894 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4895 				/* Need to abort here */
4896 				struct mbuf *op_err;
4897 
4898 		abort_out_now:
4899 				*abort_now = 1;
4900 				/* XXX */
4901 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4902 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4903 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4904 				return;
4905 			} else {
4906 				struct sctp_nets *netp;
4907 
4908 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4909 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4910 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4911 				}
4912 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4913 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4914 				sctp_stop_timers_for_shutdown(stcb);
4915 				if (asoc->alternate) {
4916 					netp = asoc->alternate;
4917 				} else {
4918 					netp = asoc->primary_destination;
4919 				}
4920 				sctp_send_shutdown(stcb, netp);
4921 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4922 				    stcb->sctp_ep, stcb, netp);
4923 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4924 				    stcb->sctp_ep, stcb, netp);
4925 			}
4926 			return;
4927 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4928 		    (asoc->stream_queue_cnt == 0)) {
4929 			struct sctp_nets *netp;
4930 
4931 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4932 				goto abort_out_now;
4933 			}
4934 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4935 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4936 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4937 			sctp_stop_timers_for_shutdown(stcb);
4938 			if (asoc->alternate) {
4939 				netp = asoc->alternate;
4940 			} else {
4941 				netp = asoc->primary_destination;
4942 			}
4943 			sctp_send_shutdown_ack(stcb, netp);
4944 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4945 			    stcb->sctp_ep, stcb, netp);
4946 			return;
4947 		}
4948 	}
4949 	/*
4950 	 * Now here we are going to recycle net_ack for a different use...
4951 	 * HEADS UP.
4952 	 */
4953 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 		net->net_ack = 0;
4955 	}
4956 
4957 	/*
4958 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4959 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4960 	 * automatically ensure that.
4961 	 */
4962 	if ((asoc->sctp_cmt_on_off > 0) &&
4963 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4964 	    (cmt_dac_flag == 0)) {
4965 		this_sack_lowest_newack = cum_ack;
4966 	}
4967 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4968 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4969 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4970 	}
4971 	/* JRS - Use the congestion control given in the CC module */
4972 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4973 
4974 	/* Now are we exiting loss recovery ? */
4975 	if (will_exit_fast_recovery) {
4976 		/* Ok, we must exit fast recovery */
4977 		asoc->fast_retran_loss_recovery = 0;
4978 	}
4979 	if ((asoc->sat_t3_loss_recovery) &&
4980 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4981 		/* end satellite t3 loss recovery */
4982 		asoc->sat_t3_loss_recovery = 0;
4983 	}
4984 	/*
4985 	 * CMT Fast recovery
4986 	 */
4987 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4988 		if (net->will_exit_fast_recovery) {
4989 			/* Ok, we must exit fast recovery */
4990 			net->fast_retran_loss_recovery = 0;
4991 		}
4992 	}
4993 
4994 	/* Adjust and set the new rwnd value */
4995 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4996 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4997 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4998 	}
4999 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5000 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5001 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 		/* SWS sender side engages */
5003 		asoc->peers_rwnd = 0;
5004 	}
5005 	if (asoc->peers_rwnd > old_rwnd) {
5006 		win_probe_recovery = 1;
5007 	}
5008 	/*
5009 	 * Now we must setup so we have a timer up for anyone with
5010 	 * outstanding data.
5011 	 */
5012 	done_once = 0;
5013 again:
5014 	j = 0;
5015 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5016 		if (win_probe_recovery && (net->window_probe)) {
5017 			win_probe_recovered = 1;
5018 			/*-
5019 			 * Find first chunk that was used with
5020 			 * window probe and clear the event. Put
5021 			 * it back into the send queue as if has
5022 			 * not been sent.
5023 			 */
5024 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5025 				if (tp1->window_probe) {
5026 					sctp_window_probe_recovery(stcb, asoc, tp1);
5027 					break;
5028 				}
5029 			}
5030 		}
5031 		if (net->flight_size) {
5032 			j++;
5033 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5034 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5035 				    stcb->sctp_ep, stcb, net);
5036 			}
5037 			if (net->window_probe) {
5038 				net->window_probe = 0;
5039 			}
5040 		} else {
5041 			if (net->window_probe) {
5042 				/*
5043 				 * In window probes we must assure a timer
5044 				 * is still running there
5045 				 */
5046 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5047 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5048 					    stcb->sctp_ep, stcb, net);
5049 
5050 				}
5051 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5052 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5053 				    stcb, net,
5054 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5055 			}
5056 		}
5057 	}
5058 	if ((j == 0) &&
5059 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5060 	    (asoc->sent_queue_retran_cnt == 0) &&
5061 	    (win_probe_recovered == 0) &&
5062 	    (done_once == 0)) {
5063 		/*
5064 		 * huh, this should not happen unless all packets are
5065 		 * PR-SCTP and marked to skip of course.
5066 		 */
5067 		if (sctp_fs_audit(asoc)) {
5068 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5069 				net->flight_size = 0;
5070 			}
5071 			asoc->total_flight = 0;
5072 			asoc->total_flight_count = 0;
5073 			asoc->sent_queue_retran_cnt = 0;
5074 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5075 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5076 					sctp_flight_size_increase(tp1);
5077 					sctp_total_flight_increase(stcb, tp1);
5078 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5079 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5080 				}
5081 			}
5082 		}
5083 		done_once = 1;
5084 		goto again;
5085 	}
5086 	/*********************************************/
5087 	/* Here we perform PR-SCTP procedures        */
5088 	/* (section 4.2)                             */
5089 	/*********************************************/
5090 	/* C1. update advancedPeerAckPoint */
5091 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5092 		asoc->advanced_peer_ack_point = cum_ack;
5093 	}
5094 	/* C2. try to further move advancedPeerAckPoint ahead */
5095 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5096 		struct sctp_tmit_chunk *lchk;
5097 		uint32_t old_adv_peer_ack_point;
5098 
5099 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5100 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5101 		/* C3. See if we need to send a Fwd-TSN */
5102 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5103 			/*
5104 			 * ISSUE with ECN, see FWD-TSN processing.
5105 			 */
5106 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5107 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5108 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5109 				    old_adv_peer_ack_point);
5110 			}
5111 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5112 				send_forward_tsn(stcb, asoc);
5113 			} else if (lchk) {
5114 				/* try to FR fwd-tsn's that get lost too */
5115 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5116 					send_forward_tsn(stcb, asoc);
5117 				}
5118 			}
5119 		}
5120 		if (lchk) {
5121 			/* Assure a timer is up */
5122 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5123 			    stcb->sctp_ep, stcb, lchk->whoTo);
5124 		}
5125 	}
5126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5127 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5128 		    a_rwnd,
5129 		    stcb->asoc.peers_rwnd,
5130 		    stcb->asoc.total_flight,
5131 		    stcb->asoc.total_output_queue_size);
5132 	}
5133 }
5134 
5135 void
5136 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5137 {
5138 	/* Copy cum-ack */
5139 	uint32_t cum_ack, a_rwnd;
5140 
5141 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5142 	/* Arrange so a_rwnd does NOT change */
5143 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5144 
5145 	/* Now call the express sack handling */
5146 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5147 }
5148 
5149 static void
5150 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5151     struct sctp_stream_in *strmin)
5152 {
5153 	struct sctp_queued_to_read *ctl, *nctl;
5154 	struct sctp_association *asoc;
5155 	uint32_t tt;
5156 	int need_reasm_check = 0, old;
5157 
5158 	asoc = &stcb->asoc;
5159 	tt = strmin->last_sequence_delivered;
5160 	if (asoc->idata_supported) {
5161 		old = 0;
5162 	} else {
5163 		old = 1;
5164 	}
5165 	/*
5166 	 * First deliver anything prior to and including the stream no that
5167 	 * came in.
5168 	 */
5169 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5170 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5171 			/* this is deliverable now */
5172 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5173 				if (ctl->on_strm_q) {
5174 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5175 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5176 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5177 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5178 #ifdef INVARIANTS
5179 					} else {
5180 						panic("strmin: %p ctl: %p unknown %d",
5181 						    strmin, ctl, ctl->on_strm_q);
5182 #endif
5183 					}
5184 					ctl->on_strm_q = 0;
5185 				}
5186 				/* subtract pending on streams */
5187 				asoc->size_on_all_streams -= ctl->length;
5188 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5189 				/* deliver it to at least the delivery-q */
5190 				if (stcb->sctp_socket) {
5191 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5192 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5193 					    ctl,
5194 					    &stcb->sctp_socket->so_rcv,
5195 					    1, SCTP_READ_LOCK_HELD,
5196 					    SCTP_SO_NOT_LOCKED);
5197 				}
5198 			} else {
5199 				/* Its a fragmented message */
5200 				if (ctl->first_frag_seen) {
5201 					/*
5202 					 * Make it so this is next to
5203 					 * deliver, we restore later
5204 					 */
5205 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5206 					need_reasm_check = 1;
5207 					break;
5208 				}
5209 			}
5210 		} else {
5211 			/* no more delivery now. */
5212 			break;
5213 		}
5214 	}
5215 	if (need_reasm_check) {
5216 		int ret;
5217 
5218 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5219 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5220 			/* Restore the next to deliver unless we are ahead */
5221 			strmin->last_sequence_delivered = tt;
5222 		}
5223 		if (ret == 0) {
5224 			/* Left the front Partial one on */
5225 			return;
5226 		}
5227 		need_reasm_check = 0;
5228 	}
5229 	/*
5230 	 * now we must deliver things in queue the normal way  if any are
5231 	 * now ready.
5232 	 */
5233 	tt = strmin->last_sequence_delivered + 1;
5234 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5235 		if (tt == ctl->sinfo_ssn) {
5236 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5237 				/* this is deliverable now */
5238 				if (ctl->on_strm_q) {
5239 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5240 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5241 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5242 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5243 #ifdef INVARIANTS
5244 					} else {
5245 						panic("strmin: %p ctl: %p unknown %d",
5246 						    strmin, ctl, ctl->on_strm_q);
5247 #endif
5248 					}
5249 					ctl->on_strm_q = 0;
5250 				}
5251 				/* subtract pending on streams */
5252 				asoc->size_on_all_streams -= ctl->length;
5253 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5254 				/* deliver it to at least the delivery-q */
5255 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5256 				if (stcb->sctp_socket) {
5257 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5258 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5259 					    ctl,
5260 					    &stcb->sctp_socket->so_rcv, 1,
5261 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5262 
5263 				}
5264 				tt = strmin->last_sequence_delivered + 1;
5265 			} else {
5266 				/* Its a fragmented message */
5267 				if (ctl->first_frag_seen) {
5268 					/*
5269 					 * Make it so this is next to
5270 					 * deliver
5271 					 */
5272 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5273 					need_reasm_check = 1;
5274 					break;
5275 				}
5276 			}
5277 		} else {
5278 			break;
5279 		}
5280 	}
5281 	if (need_reasm_check) {
5282 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5283 	}
5284 }
5285 
5286 static void
5287 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5288     struct sctp_association *asoc,
5289     uint16_t stream, uint32_t seq)
5290 {
5291 	struct sctp_queued_to_read *control;
5292 	struct sctp_stream_in *strm;
5293 	struct sctp_tmit_chunk *chk, *nchk;
5294 
5295 	/*
5296 	 * For now large messages held on the stream reasm that are complete
5297 	 * will be tossed too. We could in theory do more work to spin
5298 	 * through and stop after dumping one msg aka seeing the start of a
5299 	 * new msg at the head, and call the delivery function... to see if
5300 	 * it can be delivered... But for now we just dump everything on the
5301 	 * queue.
5302 	 */
5303 	strm = &asoc->strmin[stream];
5304 	control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5305 	if (control == NULL) {
5306 		/* Not found */
5307 		return;
5308 	}
5309 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5310 		/* Purge hanging chunks */
5311 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5312 		asoc->size_on_reasm_queue -= chk->send_size;
5313 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5314 		if (chk->data) {
5315 			sctp_m_freem(chk->data);
5316 			chk->data = NULL;
5317 		}
5318 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5319 	}
5320 	TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5321 	if (control->on_read_q == 0) {
5322 		sctp_free_remote_addr(control->whoFrom);
5323 		if (control->data) {
5324 			sctp_m_freem(control->data);
5325 			control->data = NULL;
5326 		}
5327 		sctp_free_a_readq(stcb, control);
5328 	}
5329 }
5330 
5331 
5332 void
5333 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5334     struct sctp_forward_tsn_chunk *fwd,
5335     int *abort_flag, struct mbuf *m, int offset)
5336 {
5337 	/* The pr-sctp fwd tsn */
5338 	/*
5339 	 * here we will perform all the data receiver side steps for
5340 	 * processing FwdTSN, as required in by pr-sctp draft:
5341 	 *
5342 	 * Assume we get FwdTSN(x):
5343 	 *
5344 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5345 	 * others we have 3) examine and update re-ordering queue on
5346 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5347 	 * report where we are.
5348 	 */
5349 	struct sctp_association *asoc;
5350 	uint32_t new_cum_tsn, gap;
5351 	unsigned int i, fwd_sz, m_size;
5352 	uint32_t str_seq;
5353 	struct sctp_stream_in *strm;
5354 	struct sctp_queued_to_read *ctl, *sv;
5355 
5356 	asoc = &stcb->asoc;
5357 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5358 		SCTPDBG(SCTP_DEBUG_INDATA1,
5359 		    "Bad size too small/big fwd-tsn\n");
5360 		return;
5361 	}
5362 	m_size = (stcb->asoc.mapping_array_size << 3);
5363 	/*************************************************************/
5364 	/* 1. Here we update local cumTSN and shift the bitmap array */
5365 	/*************************************************************/
5366 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5367 
5368 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5369 		/* Already got there ... */
5370 		return;
5371 	}
5372 	/*
5373 	 * now we know the new TSN is more advanced, let's find the actual
5374 	 * gap
5375 	 */
5376 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5377 	asoc->cumulative_tsn = new_cum_tsn;
5378 	if (gap >= m_size) {
5379 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5380 			struct mbuf *op_err;
5381 			char msg[SCTP_DIAG_INFO_LEN];
5382 
5383 			/*
5384 			 * out of range (of single byte chunks in the rwnd I
5385 			 * give out). This must be an attacker.
5386 			 */
5387 			*abort_flag = 1;
5388 			snprintf(msg, sizeof(msg),
5389 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5390 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5391 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5392 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5393 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5394 			return;
5395 		}
5396 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5397 
5398 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5399 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5400 		asoc->highest_tsn_inside_map = new_cum_tsn;
5401 
5402 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5403 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5404 
5405 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5406 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5407 		}
5408 	} else {
5409 		SCTP_TCB_LOCK_ASSERT(stcb);
5410 		for (i = 0; i <= gap; i++) {
5411 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5412 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5413 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5414 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5415 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5416 				}
5417 			}
5418 		}
5419 	}
5420 	/*************************************************************/
5421 	/* 2. Clear up re-assembly queue                             */
5422 	/*************************************************************/
5423 
5424 	/* This is now done as part of clearing up the stream/seq */
5425 
5426 	/*******************************************************/
5427 	/* 3. Update the PR-stream re-ordering queues and fix  */
5428 	/* delivery issues as needed.                       */
5429 	/*******************************************************/
5430 	fwd_sz -= sizeof(*fwd);
5431 	if (m && fwd_sz) {
5432 		/* New method. */
5433 		unsigned int num_str;
5434 		uint32_t sequence;
5435 		uint16_t stream;
5436 		int old;
5437 		struct sctp_strseq *stseq, strseqbuf;
5438 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5439 
5440 		offset += sizeof(*fwd);
5441 
5442 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5443 		if (asoc->idata_supported) {
5444 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5445 			old = 0;
5446 		} else {
5447 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5448 			old = 1;
5449 		}
5450 		for (i = 0; i < num_str; i++) {
5451 			if (asoc->idata_supported) {
5452 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5453 				    sizeof(struct sctp_strseq_mid),
5454 				    (uint8_t *) & strseqbuf_m);
5455 				offset += sizeof(struct sctp_strseq_mid);
5456 				if (stseq_m == NULL) {
5457 					break;
5458 				}
5459 				stream = ntohs(stseq_m->stream);
5460 				sequence = ntohl(stseq_m->msg_id);
5461 			} else {
5462 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5463 				    sizeof(struct sctp_strseq),
5464 				    (uint8_t *) & strseqbuf);
5465 				offset += sizeof(struct sctp_strseq);
5466 				if (stseq == NULL) {
5467 					break;
5468 				}
5469 				stream = ntohs(stseq->stream);
5470 				sequence = (uint32_t) ntohs(stseq->sequence);
5471 			}
5472 			/* Convert */
5473 
5474 			/* now process */
5475 
5476 			/*
5477 			 * Ok we now look for the stream/seq on the read
5478 			 * queue where its not all delivered. If we find it
5479 			 * we transmute the read entry into a PDI_ABORTED.
5480 			 */
5481 			if (stream >= asoc->streamincnt) {
5482 				/* screwed up streams, stop!  */
5483 				break;
5484 			}
5485 			if ((asoc->str_of_pdapi == stream) &&
5486 			    (asoc->ssn_of_pdapi == sequence)) {
5487 				/*
5488 				 * If this is the one we were partially
5489 				 * delivering now then we no longer are.
5490 				 * Note this will change with the reassembly
5491 				 * re-write.
5492 				 */
5493 				asoc->fragmented_delivery_inprogress = 0;
5494 			}
5495 			strm = &asoc->strmin[stream];
5496 			sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5497 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5498 				if ((ctl->sinfo_stream == stream) &&
5499 				    (ctl->sinfo_ssn == sequence)) {
5500 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5501 					ctl->pdapi_aborted = 1;
5502 					sv = stcb->asoc.control_pdapi;
5503 					ctl->end_added = 1;
5504 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5505 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5506 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5507 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5508 #ifdef INVARIANTS
5509 					} else if (ctl->on_strm_q) {
5510 						panic("strm: %p ctl: %p unknown %d",
5511 						    strm, ctl, ctl->on_strm_q);
5512 #endif
5513 					}
5514 					ctl->on_strm_q = 0;
5515 					stcb->asoc.control_pdapi = ctl;
5516 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5517 					    stcb,
5518 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5519 					    (void *)&str_seq,
5520 					    SCTP_SO_NOT_LOCKED);
5521 					stcb->asoc.control_pdapi = sv;
5522 					break;
5523 				} else if ((ctl->sinfo_stream == stream) &&
5524 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5525 					/* We are past our victim SSN */
5526 					break;
5527 				}
5528 			}
5529 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5530 				/* Update the sequence number */
5531 				strm->last_sequence_delivered = sequence;
5532 			}
5533 			/* now kick the stream the new way */
5534 			/* sa_ignore NO_NULL_CHK */
5535 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5536 		}
5537 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5538 	}
5539 	/*
5540 	 * Now slide thing forward.
5541 	 */
5542 	sctp_slide_mapping_arrays(stcb);
5543 }
5544