xref: /freebsd/sys/netinet/sctp_indata.c (revision 1f8b431d185416f70e96f03b8fd69b98442b1913)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 
286 static void
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 {
289 	uint32_t gap, i, cumackp1;
290 	int fnd = 0;
291 	int in_r = 0, in_nr = 0;
292 
293 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 		return;
295 	}
296 	cumackp1 = asoc->cumulative_tsn + 1;
297 	if (SCTP_TSN_GT(cumackp1, tsn)) {
298 		/*
299 		 * this tsn is behind the cum ack and thus we don't need to
300 		 * worry about it being moved from one to the other.
301 		 */
302 		return;
303 	}
304 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	if ((in_r == 0) && (in_nr == 0)) {
308 #ifdef INVARIANTS
309 		panic("Things are really messed up now");
310 #else
311 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 		sctp_print_mapping_array(asoc);
313 #endif
314 	}
315 	if (in_nr == 0)
316 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	if (in_r)
318 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 		asoc->highest_tsn_inside_nr_map = tsn;
321 	}
322 	if (tsn == asoc->highest_tsn_inside_map) {
323 		/* We must back down to see what the new highest is */
324 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 static int
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340     struct sctp_association *asoc,
341     struct sctp_queued_to_read *control)
342 {
343 	struct sctp_queued_to_read *at;
344 	struct sctp_readhead *q;
345 	uint8_t flags, unordered;
346 
347 	flags = (control->sinfo_flags >> 8);
348 	unordered = flags & SCTP_DATA_UNORDERED;
349 	if (unordered) {
350 		q = &strm->uno_inqueue;
351 		if (asoc->idata_supported == 0) {
352 			if (!TAILQ_EMPTY(q)) {
353 				/*
354 				 * Only one stream can be here in old style
355 				 * -- abort
356 				 */
357 				return (-1);
358 			}
359 			TAILQ_INSERT_TAIL(q, control, next_instrm);
360 			control->on_strm_q = SCTP_ON_UNORDERED;
361 			return (0);
362 		}
363 	} else {
364 		q = &strm->inqueue;
365 	}
366 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 		control->end_added = 1;
368 		control->first_frag_seen = 1;
369 		control->last_frag_seen = 1;
370 	}
371 	if (TAILQ_EMPTY(q)) {
372 		/* Empty queue */
373 		TAILQ_INSERT_HEAD(q, control, next_instrm);
374 		if (unordered) {
375 			control->on_strm_q = SCTP_ON_UNORDERED;
376 		} else {
377 			control->on_strm_q = SCTP_ON_ORDERED;
378 		}
379 		return (0);
380 	} else {
381 		TAILQ_FOREACH(at, q, next_instrm) {
382 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * one in queue is bigger than the new one,
385 				 * insert before this one
386 				 */
387 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 				if (unordered) {
389 					control->on_strm_q = SCTP_ON_UNORDERED;
390 				} else {
391 					control->on_strm_q = SCTP_ON_ORDERED;
392 				}
393 				break;
394 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 				/*
396 				 * Gak, He sent me a duplicate msg id
397 				 * number?? return -1 to abort.
398 				 */
399 				return (-1);
400 			} else {
401 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 					/*
403 					 * We are at the end, insert it
404 					 * after this one
405 					 */
406 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 						sctp_log_strm_del(control, at,
408 						    SCTP_STR_LOG_FROM_INSERT_TL);
409 					}
410 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 					if (unordered) {
412 						control->on_strm_q = SCTP_ON_UNORDERED;
413 					} else {
414 						control->on_strm_q = SCTP_ON_ORDERED;
415 					}
416 					break;
417 				}
418 			}
419 		}
420 	}
421 	return (0);
422 }
423 
424 static void
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426     struct sctp_queued_to_read *control,
427     struct sctp_tmit_chunk *chk,
428     int *abort_flag, int opspot)
429 {
430 	char msg[SCTP_DIAG_INFO_LEN];
431 	struct mbuf *oper;
432 
433 	if (stcb->asoc.idata_supported) {
434 		snprintf(msg, sizeof(msg),
435 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 		    opspot,
437 		    control->fsn_included,
438 		    chk->rec.data.tsn,
439 		    chk->rec.data.sid,
440 		    chk->rec.data.fsn, chk->rec.data.mid);
441 	} else {
442 		snprintf(msg, sizeof(msg),
443 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 		    opspot,
445 		    control->fsn_included,
446 		    chk->rec.data.tsn,
447 		    chk->rec.data.sid,
448 		    chk->rec.data.fsn,
449 		    (uint16_t)chk->rec.data.mid);
450 	}
451 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 	sctp_m_freem(chk->data);
453 	chk->data = NULL;
454 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
457 	*abort_flag = 1;
458 }
459 
460 static void
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 {
463 	/*
464 	 * The control could not be placed and must be cleaned.
465 	 */
466 	struct sctp_tmit_chunk *chk, *nchk;
467 
468 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 		if (chk->data)
471 			sctp_m_freem(chk->data);
472 		chk->data = NULL;
473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	}
475 	sctp_free_a_readq(stcb, control);
476 }
477 
478 /*
479  * Queue the chunk either right into the socket buffer if it is the next one
480  * to go OR put it in the correct place in the delivery queue.  If we do
481  * append to the so_buf, keep doing so until we are out of order as
482  * long as the control's entered are non-fragmented.
483  */
484 static void
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486     struct sctp_association *asoc,
487     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
488 {
489 	/*
490 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 	 * all the data in one stream this could happen quite rapidly. One
492 	 * could use the TSN to keep track of things, but this scheme breaks
493 	 * down in the other type of stream usage that could occur. Send a
494 	 * single msg to stream 0, send 4Billion messages to stream 1, now
495 	 * send a message to stream 0. You have a situation where the TSN
496 	 * has wrapped but not in the stream. Is this worth worrying about
497 	 * or should we just change our queue sort at the bottom to be by
498 	 * TSN.
499 	 *
500 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 	 * assignment this could happen... and I don't see how this would be
503 	 * a violation. So for now I am undecided an will leave the sort by
504 	 * SSN alone. Maybe a hybred approach is the answer
505 	 *
506 	 */
507 	struct sctp_queued_to_read *at;
508 	int queue_needed;
509 	uint32_t nxt_todel;
510 	struct mbuf *op_err;
511 	struct sctp_stream_in *strm;
512 	char msg[SCTP_DIAG_INFO_LEN];
513 
514 	strm = &asoc->strmin[control->sinfo_stream];
515 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
517 	}
518 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 		/* The incoming sseq is behind where we last delivered? */
520 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 		    strm->last_mid_delivered, control->mid);
522 		/*
523 		 * throw it in the stream so it gets cleaned up in
524 		 * association destruction
525 		 */
526 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 		if (asoc->idata_supported) {
528 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 			    strm->last_mid_delivered, control->sinfo_tsn,
530 			    control->sinfo_stream, control->mid);
531 		} else {
532 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 			    (uint16_t)strm->last_mid_delivered,
534 			    control->sinfo_tsn,
535 			    control->sinfo_stream,
536 			    (uint16_t)control->mid);
537 		}
538 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
541 		*abort_flag = 1;
542 		return;
543 
544 	}
545 	queue_needed = 1;
546 	asoc->size_on_all_streams += control->length;
547 	sctp_ucount_incr(asoc->cnt_on_all_streams);
548 	nxt_todel = strm->last_mid_delivered + 1;
549 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
551 		struct socket *so;
552 
553 		so = SCTP_INP_SO(stcb->sctp_ep);
554 		atomic_add_int(&stcb->asoc.refcnt, 1);
555 		SCTP_TCB_UNLOCK(stcb);
556 		SCTP_SOCKET_LOCK(so, 1);
557 		SCTP_TCB_LOCK(stcb);
558 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 			SCTP_SOCKET_UNLOCK(so, 1);
561 			return;
562 		}
563 #endif
564 		/* can be delivered right away? */
565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
567 		}
568 		/* EY it wont be queued if it could be delivered directly */
569 		queue_needed = 0;
570 		if (asoc->size_on_all_streams >= control->length) {
571 			asoc->size_on_all_streams -= control->length;
572 		} else {
573 #ifdef INVARIANTS
574 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
575 #else
576 			asoc->size_on_all_streams = 0;
577 #endif
578 		}
579 		sctp_ucount_decr(asoc->cnt_on_all_streams);
580 		strm->last_mid_delivered++;
581 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 		sctp_add_to_readq(stcb->sctp_ep, stcb,
583 		    control,
584 		    &stcb->sctp_socket->so_rcv, 1,
585 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
587 			/* all delivered */
588 			nxt_todel = strm->last_mid_delivered + 1;
589 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 				if (control->on_strm_q == SCTP_ON_ORDERED) {
592 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 					if (asoc->size_on_all_streams >= control->length) {
594 						asoc->size_on_all_streams -= control->length;
595 					} else {
596 #ifdef INVARIANTS
597 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598 #else
599 						asoc->size_on_all_streams = 0;
600 #endif
601 					}
602 					sctp_ucount_decr(asoc->cnt_on_all_streams);
603 #ifdef INVARIANTS
604 				} else {
605 					panic("Huh control: %p is on_strm_q: %d",
606 					    control, control->on_strm_q);
607 #endif
608 				}
609 				control->on_strm_q = 0;
610 				strm->last_mid_delivered++;
611 				/*
612 				 * We ignore the return of deliver_data here
613 				 * since we always can hold the chunk on the
614 				 * d-queue. And we have a finite number that
615 				 * can be delivered from the strq.
616 				 */
617 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 					sctp_log_strm_del(control, NULL,
619 					    SCTP_STR_LOG_FROM_IMMED_DEL);
620 				}
621 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 				sctp_add_to_readq(stcb->sctp_ep, stcb,
623 				    control,
624 				    &stcb->sctp_socket->so_rcv, 1,
625 				    SCTP_READ_LOCK_NOT_HELD,
626 				    SCTP_SO_LOCKED);
627 				continue;
628 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
629 				*need_reasm = 1;
630 			}
631 			break;
632 		}
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 		SCTP_SOCKET_UNLOCK(so, 1);
635 #endif
636 	}
637 	if (queue_needed) {
638 		/*
639 		 * Ok, we did not deliver this guy, find the correct place
640 		 * to put it on the queue.
641 		 */
642 		if (sctp_place_control_in_stream(strm, asoc, control)) {
643 			snprintf(msg, sizeof(msg),
644 			    "Queue to str MID: %u duplicate",
645 			    control->mid);
646 			sctp_clean_up_control(stcb, control);
647 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
650 			*abort_flag = 1;
651 		}
652 	}
653 }
654 
655 
656 static void
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
658 {
659 	struct mbuf *m, *prev = NULL;
660 	struct sctp_tcb *stcb;
661 
662 	stcb = control->stcb;
663 	control->held_length = 0;
664 	control->length = 0;
665 	m = control->data;
666 	while (m) {
667 		if (SCTP_BUF_LEN(m) == 0) {
668 			/* Skip mbufs with NO length */
669 			if (prev == NULL) {
670 				/* First one */
671 				control->data = sctp_m_free(m);
672 				m = control->data;
673 			} else {
674 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 				m = SCTP_BUF_NEXT(prev);
676 			}
677 			if (m == NULL) {
678 				control->tail_mbuf = prev;
679 			}
680 			continue;
681 		}
682 		prev = m;
683 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 		if (control->on_read_q) {
685 			/*
686 			 * On read queue so we must increment the SB stuff,
687 			 * we assume caller has done any locks of SB.
688 			 */
689 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
690 		}
691 		m = SCTP_BUF_NEXT(m);
692 	}
693 	if (prev) {
694 		control->tail_mbuf = prev;
695 	}
696 }
697 
698 static void
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
700 {
701 	struct mbuf *prev = NULL;
702 	struct sctp_tcb *stcb;
703 
704 	stcb = control->stcb;
705 	if (stcb == NULL) {
706 #ifdef INVARIANTS
707 		panic("Control broken");
708 #else
709 		return;
710 #endif
711 	}
712 	if (control->tail_mbuf == NULL) {
713 		/* TSNH */
714 		control->data = m;
715 		sctp_setup_tail_pointer(control);
716 		return;
717 	}
718 	control->tail_mbuf->m_next = m;
719 	while (m) {
720 		if (SCTP_BUF_LEN(m) == 0) {
721 			/* Skip mbufs with NO length */
722 			if (prev == NULL) {
723 				/* First one */
724 				control->tail_mbuf->m_next = sctp_m_free(m);
725 				m = control->tail_mbuf->m_next;
726 			} else {
727 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 				m = SCTP_BUF_NEXT(prev);
729 			}
730 			if (m == NULL) {
731 				control->tail_mbuf = prev;
732 			}
733 			continue;
734 		}
735 		prev = m;
736 		if (control->on_read_q) {
737 			/*
738 			 * On read queue so we must increment the SB stuff,
739 			 * we assume caller has done any locks of SB.
740 			 */
741 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
742 		}
743 		*added += SCTP_BUF_LEN(m);
744 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 		m = SCTP_BUF_NEXT(m);
746 	}
747 	if (prev) {
748 		control->tail_mbuf = prev;
749 	}
750 }
751 
752 static void
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
754 {
755 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 	nc->sinfo_stream = control->sinfo_stream;
757 	nc->mid = control->mid;
758 	TAILQ_INIT(&nc->reasm);
759 	nc->top_fsn = control->top_fsn;
760 	nc->mid = control->mid;
761 	nc->sinfo_flags = control->sinfo_flags;
762 	nc->sinfo_ppid = control->sinfo_ppid;
763 	nc->sinfo_context = control->sinfo_context;
764 	nc->fsn_included = 0xffffffff;
765 	nc->sinfo_tsn = control->sinfo_tsn;
766 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 	nc->whoFrom = control->whoFrom;
769 	atomic_add_int(&nc->whoFrom->ref_count, 1);
770 	nc->stcb = control->stcb;
771 	nc->port_from = control->port_from;
772 }
773 
774 static void
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776     struct sctp_inpcb *inp, uint32_t tsn)
777 {
778 	control->fsn_included = tsn;
779 	if (control->on_read_q) {
780 		/*
781 		 * We have to purge it from there, hopefully this will work
782 		 * :-)
783 		 */
784 		TAILQ_REMOVE(&inp->read_queue, control, next);
785 		control->on_read_q = 0;
786 	}
787 }
788 
789 static int
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791     struct sctp_association *asoc,
792     struct sctp_stream_in *strm,
793     struct sctp_queued_to_read *control,
794     uint32_t pd_point,
795     int inp_read_lock_held)
796 {
797 	/*
798 	 * Special handling for the old un-ordered data chunk. All the
799 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 	 * to see if we have it all. If you return one, no other control
801 	 * entries on the un-ordered queue will be looked at. In theory
802 	 * there should be no others entries in reality, unless the guy is
803 	 * sending both unordered NDATA and unordered DATA...
804 	 */
805 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
806 	uint32_t fsn;
807 	struct sctp_queued_to_read *nc;
808 	int cnt_added;
809 
810 	if (control->first_frag_seen == 0) {
811 		/* Nothing we can do, we have not seen the first piece yet */
812 		return (1);
813 	}
814 	/* Collapse any we can */
815 	cnt_added = 0;
816 restart:
817 	fsn = control->fsn_included + 1;
818 	/* Now what can we add? */
819 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 		if (chk->rec.data.fsn == fsn) {
821 			/* Ok lets add it */
822 			sctp_alloc_a_readq(stcb, nc);
823 			if (nc == NULL) {
824 				break;
825 			}
826 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
829 			fsn++;
830 			cnt_added++;
831 			chk = NULL;
832 			if (control->end_added) {
833 				/* We are done */
834 				if (!TAILQ_EMPTY(&control->reasm)) {
835 					/*
836 					 * Ok we have to move anything left
837 					 * on the control queue to a new
838 					 * control.
839 					 */
840 					sctp_build_readq_entry_from_ctl(nc, control);
841 					tchk = TAILQ_FIRST(&control->reasm);
842 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 							asoc->size_on_reasm_queue -= tchk->send_size;
846 						} else {
847 #ifdef INVARIANTS
848 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
849 #else
850 							asoc->size_on_reasm_queue = 0;
851 #endif
852 						}
853 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 						nc->first_frag_seen = 1;
855 						nc->fsn_included = tchk->rec.data.fsn;
856 						nc->data = tchk->data;
857 						nc->sinfo_ppid = tchk->rec.data.ppid;
858 						nc->sinfo_tsn = tchk->rec.data.tsn;
859 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
860 						tchk->data = NULL;
861 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 						sctp_setup_tail_pointer(nc);
863 						tchk = TAILQ_FIRST(&control->reasm);
864 					}
865 					/* Spin the rest onto the queue */
866 					while (tchk) {
867 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 						tchk = TAILQ_FIRST(&control->reasm);
870 					}
871 					/*
872 					 * Now lets add it to the queue
873 					 * after removing control
874 					 */
875 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 					nc->on_strm_q = SCTP_ON_UNORDERED;
877 					if (control->on_strm_q) {
878 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 						control->on_strm_q = 0;
880 					}
881 				}
882 				if (control->pdapi_started) {
883 					strm->pd_api_started = 0;
884 					control->pdapi_started = 0;
885 				}
886 				if (control->on_strm_q) {
887 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 					control->on_strm_q = 0;
889 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
890 				}
891 				if (control->on_read_q == 0) {
892 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 					    &stcb->sctp_socket->so_rcv, control->end_added,
894 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
895 				}
896 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
898 					/*
899 					 * Switch to the new guy and
900 					 * continue
901 					 */
902 					control = nc;
903 					goto restart;
904 				} else {
905 					if (nc->on_strm_q == 0) {
906 						sctp_free_a_readq(stcb, nc);
907 					}
908 				}
909 				return (1);
910 			} else {
911 				sctp_free_a_readq(stcb, nc);
912 			}
913 		} else {
914 			/* Can't add more */
915 			break;
916 		}
917 	}
918 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
919 		strm->pd_api_started = 1;
920 		control->pdapi_started = 1;
921 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
922 		    &stcb->sctp_socket->so_rcv, control->end_added,
923 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
924 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
925 		return (0);
926 	} else {
927 		return (1);
928 	}
929 }
930 
931 static void
932 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
933     struct sctp_association *asoc,
934     struct sctp_queued_to_read *control,
935     struct sctp_tmit_chunk *chk,
936     int *abort_flag)
937 {
938 	struct sctp_tmit_chunk *at;
939 	int inserted;
940 
941 	/*
942 	 * Here we need to place the chunk into the control structure sorted
943 	 * in the correct order.
944 	 */
945 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
946 		/* Its the very first one. */
947 		SCTPDBG(SCTP_DEBUG_XXX,
948 		    "chunk is a first fsn: %u becomes fsn_included\n",
949 		    chk->rec.data.fsn);
950 		if (control->first_frag_seen) {
951 			/*
952 			 * In old un-ordered we can reassembly on one
953 			 * control multiple messages. As long as the next
954 			 * FIRST is greater then the old first (TSN i.e. FSN
955 			 * wise)
956 			 */
957 			struct mbuf *tdata;
958 			uint32_t tmp;
959 
960 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
961 				/*
962 				 * Easy way the start of a new guy beyond
963 				 * the lowest
964 				 */
965 				goto place_chunk;
966 			}
967 			if ((chk->rec.data.fsn == control->fsn_included) ||
968 			    (control->pdapi_started)) {
969 				/*
970 				 * Ok this should not happen, if it does we
971 				 * started the pd-api on the higher TSN
972 				 * (since the equals part is a TSN failure
973 				 * it must be that).
974 				 *
975 				 * We are completly hosed in that case since
976 				 * I have no way to recover. This really
977 				 * will only happen if we can get more TSN's
978 				 * higher before the pd-api-point.
979 				 */
980 				sctp_abort_in_reasm(stcb, control, chk,
981 				    abort_flag,
982 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
983 
984 				return;
985 			}
986 			/*
987 			 * Ok we have two firsts and the one we just got is
988 			 * smaller than the one we previously placed.. yuck!
989 			 * We must swap them out.
990 			 */
991 			/* swap the mbufs */
992 			tdata = control->data;
993 			control->data = chk->data;
994 			chk->data = tdata;
995 			/* Save the lengths */
996 			chk->send_size = control->length;
997 			/* Recompute length of control and tail pointer */
998 			sctp_setup_tail_pointer(control);
999 			/* Fix the FSN included */
1000 			tmp = control->fsn_included;
1001 			control->fsn_included = chk->rec.data.fsn;
1002 			chk->rec.data.fsn = tmp;
1003 			/* Fix the TSN included */
1004 			tmp = control->sinfo_tsn;
1005 			control->sinfo_tsn = chk->rec.data.tsn;
1006 			chk->rec.data.tsn = tmp;
1007 			/* Fix the PPID included */
1008 			tmp = control->sinfo_ppid;
1009 			control->sinfo_ppid = chk->rec.data.ppid;
1010 			chk->rec.data.ppid = tmp;
1011 			/* Fix tail pointer */
1012 			goto place_chunk;
1013 		}
1014 		control->first_frag_seen = 1;
1015 		control->fsn_included = chk->rec.data.fsn;
1016 		control->top_fsn = chk->rec.data.fsn;
1017 		control->sinfo_tsn = chk->rec.data.tsn;
1018 		control->sinfo_ppid = chk->rec.data.ppid;
1019 		control->data = chk->data;
1020 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1021 		chk->data = NULL;
1022 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1023 		sctp_setup_tail_pointer(control);
1024 		return;
1025 	}
1026 place_chunk:
1027 	inserted = 0;
1028 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1029 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1030 			/*
1031 			 * This one in queue is bigger than the new one,
1032 			 * insert the new one before at.
1033 			 */
1034 			asoc->size_on_reasm_queue += chk->send_size;
1035 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036 			inserted = 1;
1037 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1038 			break;
1039 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1040 			/*
1041 			 * They sent a duplicate fsn number. This really
1042 			 * should not happen since the FSN is a TSN and it
1043 			 * should have been dropped earlier.
1044 			 */
1045 			sctp_abort_in_reasm(stcb, control, chk,
1046 			    abort_flag,
1047 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1048 			return;
1049 		}
1050 
1051 	}
1052 	if (inserted == 0) {
1053 		/* Its at the end */
1054 		asoc->size_on_reasm_queue += chk->send_size;
1055 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1056 		control->top_fsn = chk->rec.data.fsn;
1057 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1058 	}
1059 }
1060 
1061 static int
1062 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1063     struct sctp_stream_in *strm, int inp_read_lock_held)
1064 {
1065 	/*
1066 	 * Given a stream, strm, see if any of the SSN's on it that are
1067 	 * fragmented are ready to deliver. If so go ahead and place them on
1068 	 * the read queue. In so placing if we have hit the end, then we
1069 	 * need to remove them from the stream's queue.
1070 	 */
1071 	struct sctp_queued_to_read *control, *nctl = NULL;
1072 	uint32_t next_to_del;
1073 	uint32_t pd_point;
1074 	int ret = 0;
1075 
1076 	if (stcb->sctp_socket) {
1077 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1078 		    stcb->sctp_ep->partial_delivery_point);
1079 	} else {
1080 		pd_point = stcb->sctp_ep->partial_delivery_point;
1081 	}
1082 	control = TAILQ_FIRST(&strm->uno_inqueue);
1083 
1084 	if ((control != NULL) &&
1085 	    (asoc->idata_supported == 0)) {
1086 		/* Special handling needed for "old" data format */
1087 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1088 			goto done_un;
1089 		}
1090 	}
1091 	if (strm->pd_api_started) {
1092 		/* Can't add more */
1093 		return (0);
1094 	}
1095 	while (control) {
1096 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1097 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1098 		nctl = TAILQ_NEXT(control, next_instrm);
1099 		if (control->end_added) {
1100 			/* We just put the last bit on */
1101 			if (control->on_strm_q) {
1102 #ifdef INVARIANTS
1103 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1104 					panic("Huh control: %p on_q: %d -- not unordered?",
1105 					    control, control->on_strm_q);
1106 				}
1107 #endif
1108 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1109 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1110 				control->on_strm_q = 0;
1111 			}
1112 			if (control->on_read_q == 0) {
1113 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1114 				    control,
1115 				    &stcb->sctp_socket->so_rcv, control->end_added,
1116 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1117 			}
1118 		} else {
1119 			/* Can we do a PD-API for this un-ordered guy? */
1120 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1121 				strm->pd_api_started = 1;
1122 				control->pdapi_started = 1;
1123 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1124 				    control,
1125 				    &stcb->sctp_socket->so_rcv, control->end_added,
1126 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1127 
1128 				break;
1129 			}
1130 		}
1131 		control = nctl;
1132 	}
1133 done_un:
1134 	control = TAILQ_FIRST(&strm->inqueue);
1135 	if (strm->pd_api_started) {
1136 		/* Can't add more */
1137 		return (0);
1138 	}
1139 	if (control == NULL) {
1140 		return (ret);
1141 	}
1142 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1143 		/*
1144 		 * Ok the guy at the top was being partially delivered
1145 		 * completed, so we remove it. Note the pd_api flag was
1146 		 * taken off when the chunk was merged on in
1147 		 * sctp_queue_data_for_reasm below.
1148 		 */
1149 		nctl = TAILQ_NEXT(control, next_instrm);
1150 		SCTPDBG(SCTP_DEBUG_XXX,
1151 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1152 		    control, control->end_added, control->mid,
1153 		    control->top_fsn, control->fsn_included,
1154 		    strm->last_mid_delivered);
1155 		if (control->end_added) {
1156 			if (control->on_strm_q) {
1157 #ifdef INVARIANTS
1158 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1159 					panic("Huh control: %p on_q: %d -- not ordered?",
1160 					    control, control->on_strm_q);
1161 				}
1162 #endif
1163 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1164 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1165 				if (asoc->size_on_all_streams >= control->length) {
1166 					asoc->size_on_all_streams -= control->length;
1167 				} else {
1168 #ifdef INVARIANTS
1169 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1170 #else
1171 					asoc->size_on_all_streams = 0;
1172 #endif
1173 				}
1174 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1175 				control->on_strm_q = 0;
1176 			}
1177 			if (strm->pd_api_started && control->pdapi_started) {
1178 				control->pdapi_started = 0;
1179 				strm->pd_api_started = 0;
1180 			}
1181 			if (control->on_read_q == 0) {
1182 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1183 				    control,
1184 				    &stcb->sctp_socket->so_rcv, control->end_added,
1185 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1186 			}
1187 			control = nctl;
1188 		}
1189 	}
1190 	if (strm->pd_api_started) {
1191 		/*
1192 		 * Can't add more must have gotten an un-ordered above being
1193 		 * partially delivered.
1194 		 */
1195 		return (0);
1196 	}
1197 deliver_more:
1198 	next_to_del = strm->last_mid_delivered + 1;
1199 	if (control) {
1200 		SCTPDBG(SCTP_DEBUG_XXX,
1201 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1202 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1203 		    next_to_del);
1204 		nctl = TAILQ_NEXT(control, next_instrm);
1205 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1206 		    (control->first_frag_seen)) {
1207 			int done;
1208 
1209 			/* Ok we can deliver it onto the stream. */
1210 			if (control->end_added) {
1211 				/* We are done with it afterwards */
1212 				if (control->on_strm_q) {
1213 #ifdef INVARIANTS
1214 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1215 						panic("Huh control: %p on_q: %d -- not ordered?",
1216 						    control, control->on_strm_q);
1217 					}
1218 #endif
1219 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1220 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 					if (asoc->size_on_all_streams >= control->length) {
1222 						asoc->size_on_all_streams -= control->length;
1223 					} else {
1224 #ifdef INVARIANTS
1225 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1226 #else
1227 						asoc->size_on_all_streams = 0;
1228 #endif
1229 					}
1230 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1231 					control->on_strm_q = 0;
1232 				}
1233 				ret++;
1234 			}
1235 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1236 				/*
1237 				 * A singleton now slipping through - mark
1238 				 * it non-revokable too
1239 				 */
1240 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1241 			} else if (control->end_added == 0) {
1242 				/*
1243 				 * Check if we can defer adding until its
1244 				 * all there
1245 				 */
1246 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1247 					/*
1248 					 * Don't need it or cannot add more
1249 					 * (one being delivered that way)
1250 					 */
1251 					goto out;
1252 				}
1253 			}
1254 			done = (control->end_added) && (control->last_frag_seen);
1255 			if (control->on_read_q == 0) {
1256 				if (!done) {
1257 					if (asoc->size_on_all_streams >= control->length) {
1258 						asoc->size_on_all_streams -= control->length;
1259 					} else {
1260 #ifdef INVARIANTS
1261 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1262 #else
1263 						asoc->size_on_all_streams = 0;
1264 #endif
1265 					}
1266 					strm->pd_api_started = 1;
1267 					control->pdapi_started = 1;
1268 				}
1269 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1270 				    control,
1271 				    &stcb->sctp_socket->so_rcv, control->end_added,
1272 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1273 			}
1274 			strm->last_mid_delivered = next_to_del;
1275 			if (done) {
1276 				control = nctl;
1277 				goto deliver_more;
1278 			}
1279 		}
1280 	}
1281 out:
1282 	return (ret);
1283 }
1284 
1285 
1286 uint32_t
1287 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1288     struct sctp_stream_in *strm,
1289     struct sctp_tcb *stcb, struct sctp_association *asoc,
1290     struct sctp_tmit_chunk *chk, int hold_rlock)
1291 {
1292 	/*
1293 	 * Given a control and a chunk, merge the data from the chk onto the
1294 	 * control and free up the chunk resources.
1295 	 */
1296 	uint32_t added = 0;
1297 	int i_locked = 0;
1298 
1299 	if (control->on_read_q && (hold_rlock == 0)) {
1300 		/*
1301 		 * Its being pd-api'd so we must do some locks.
1302 		 */
1303 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1304 		i_locked = 1;
1305 	}
1306 	if (control->data == NULL) {
1307 		control->data = chk->data;
1308 		sctp_setup_tail_pointer(control);
1309 	} else {
1310 		sctp_add_to_tail_pointer(control, chk->data, &added);
1311 	}
1312 	control->fsn_included = chk->rec.data.fsn;
1313 	asoc->size_on_reasm_queue -= chk->send_size;
1314 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1315 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1316 	chk->data = NULL;
1317 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1318 		control->first_frag_seen = 1;
1319 		control->sinfo_tsn = chk->rec.data.tsn;
1320 		control->sinfo_ppid = chk->rec.data.ppid;
1321 	}
1322 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1323 		/* Its complete */
1324 		if ((control->on_strm_q) && (control->on_read_q)) {
1325 			if (control->pdapi_started) {
1326 				control->pdapi_started = 0;
1327 				strm->pd_api_started = 0;
1328 			}
1329 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1330 				/* Unordered */
1331 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1332 				control->on_strm_q = 0;
1333 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1334 				/* Ordered */
1335 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1336 				/*
1337 				 * Don't need to decrement
1338 				 * size_on_all_streams, since control is on
1339 				 * the read queue.
1340 				 */
1341 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1342 				control->on_strm_q = 0;
1343 #ifdef INVARIANTS
1344 			} else if (control->on_strm_q) {
1345 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1346 				    control->on_strm_q);
1347 #endif
1348 			}
1349 		}
1350 		control->end_added = 1;
1351 		control->last_frag_seen = 1;
1352 	}
1353 	if (i_locked) {
1354 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1355 	}
1356 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1357 	return (added);
1358 }
1359 
1360 /*
1361  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1362  * queue, see if anthing can be delivered. If so pull it off (or as much as
1363  * we can. If we run out of space then we must dump what we can and set the
1364  * appropriate flag to say we queued what we could.
1365  */
1366 static void
1367 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1368     struct sctp_queued_to_read *control,
1369     struct sctp_tmit_chunk *chk,
1370     int created_control,
1371     int *abort_flag, uint32_t tsn)
1372 {
1373 	uint32_t next_fsn;
1374 	struct sctp_tmit_chunk *at, *nat;
1375 	struct sctp_stream_in *strm;
1376 	int do_wakeup, unordered;
1377 	uint32_t lenadded;
1378 
1379 	strm = &asoc->strmin[control->sinfo_stream];
1380 	/*
1381 	 * For old un-ordered data chunks.
1382 	 */
1383 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1384 		unordered = 1;
1385 	} else {
1386 		unordered = 0;
1387 	}
1388 	/* Must be added to the stream-in queue */
1389 	if (created_control) {
1390 		if (unordered == 0) {
1391 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1392 		}
1393 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1394 			/* Duplicate SSN? */
1395 			sctp_abort_in_reasm(stcb, control, chk,
1396 			    abort_flag,
1397 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1398 			sctp_clean_up_control(stcb, control);
1399 			return;
1400 		}
1401 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1402 			/*
1403 			 * Ok we created this control and now lets validate
1404 			 * that its legal i.e. there is a B bit set, if not
1405 			 * and we have up to the cum-ack then its invalid.
1406 			 */
1407 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1408 				sctp_abort_in_reasm(stcb, control, chk,
1409 				    abort_flag,
1410 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1411 				return;
1412 			}
1413 		}
1414 	}
1415 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1416 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1417 		return;
1418 	}
1419 	/*
1420 	 * Ok we must queue the chunk into the reasembly portion: o if its
1421 	 * the first it goes to the control mbuf. o if its not first but the
1422 	 * next in sequence it goes to the control, and each succeeding one
1423 	 * in order also goes. o if its not in order we place it on the list
1424 	 * in its place.
1425 	 */
1426 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1427 		/* Its the very first one. */
1428 		SCTPDBG(SCTP_DEBUG_XXX,
1429 		    "chunk is a first fsn: %u becomes fsn_included\n",
1430 		    chk->rec.data.fsn);
1431 		if (control->first_frag_seen) {
1432 			/*
1433 			 * Error on senders part, they either sent us two
1434 			 * data chunks with FIRST, or they sent two
1435 			 * un-ordered chunks that were fragmented at the
1436 			 * same time in the same stream.
1437 			 */
1438 			sctp_abort_in_reasm(stcb, control, chk,
1439 			    abort_flag,
1440 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1441 			return;
1442 		}
1443 		control->first_frag_seen = 1;
1444 		control->sinfo_ppid = chk->rec.data.ppid;
1445 		control->sinfo_tsn = chk->rec.data.tsn;
1446 		control->fsn_included = chk->rec.data.fsn;
1447 		control->data = chk->data;
1448 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1449 		chk->data = NULL;
1450 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1451 		sctp_setup_tail_pointer(control);
1452 		asoc->size_on_all_streams += control->length;
1453 	} else {
1454 		/* Place the chunk in our list */
1455 		int inserted = 0;
1456 
1457 		if (control->last_frag_seen == 0) {
1458 			/* Still willing to raise highest FSN seen */
1459 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1460 				SCTPDBG(SCTP_DEBUG_XXX,
1461 				    "We have a new top_fsn: %u\n",
1462 				    chk->rec.data.fsn);
1463 				control->top_fsn = chk->rec.data.fsn;
1464 			}
1465 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1466 				SCTPDBG(SCTP_DEBUG_XXX,
1467 				    "The last fsn is now in place fsn: %u\n",
1468 				    chk->rec.data.fsn);
1469 				control->last_frag_seen = 1;
1470 			}
1471 			if (asoc->idata_supported || control->first_frag_seen) {
1472 				/*
1473 				 * For IDATA we always check since we know
1474 				 * that the first fragment is 0. For old
1475 				 * DATA we have to receive the first before
1476 				 * we know the first FSN (which is the TSN).
1477 				 */
1478 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1479 					/*
1480 					 * We have already delivered up to
1481 					 * this so its a dup
1482 					 */
1483 					sctp_abort_in_reasm(stcb, control, chk,
1484 					    abort_flag,
1485 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1486 					return;
1487 				}
1488 			}
1489 		} else {
1490 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1491 				/* Second last? huh? */
1492 				SCTPDBG(SCTP_DEBUG_XXX,
1493 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1494 				    chk->rec.data.fsn, control->top_fsn);
1495 				sctp_abort_in_reasm(stcb, control,
1496 				    chk, abort_flag,
1497 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1498 				return;
1499 			}
1500 			if (asoc->idata_supported || control->first_frag_seen) {
1501 				/*
1502 				 * For IDATA we always check since we know
1503 				 * that the first fragment is 0. For old
1504 				 * DATA we have to receive the first before
1505 				 * we know the first FSN (which is the TSN).
1506 				 */
1507 
1508 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1509 					/*
1510 					 * We have already delivered up to
1511 					 * this so its a dup
1512 					 */
1513 					SCTPDBG(SCTP_DEBUG_XXX,
1514 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1515 					    chk->rec.data.fsn, control->fsn_included);
1516 					sctp_abort_in_reasm(stcb, control, chk,
1517 					    abort_flag,
1518 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1519 					return;
1520 				}
1521 			}
1522 			/*
1523 			 * validate not beyond top FSN if we have seen last
1524 			 * one
1525 			 */
1526 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1527 				SCTPDBG(SCTP_DEBUG_XXX,
1528 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1529 				    chk->rec.data.fsn,
1530 				    control->top_fsn);
1531 				sctp_abort_in_reasm(stcb, control, chk,
1532 				    abort_flag,
1533 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1534 				return;
1535 			}
1536 		}
1537 		/*
1538 		 * If we reach here, we need to place the new chunk in the
1539 		 * reassembly for this control.
1540 		 */
1541 		SCTPDBG(SCTP_DEBUG_XXX,
1542 		    "chunk is a not first fsn: %u needs to be inserted\n",
1543 		    chk->rec.data.fsn);
1544 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1545 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1546 				/*
1547 				 * This one in queue is bigger than the new
1548 				 * one, insert the new one before at.
1549 				 */
1550 				SCTPDBG(SCTP_DEBUG_XXX,
1551 				    "Insert it before fsn: %u\n",
1552 				    at->rec.data.fsn);
1553 				asoc->size_on_reasm_queue += chk->send_size;
1554 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1555 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1556 				inserted = 1;
1557 				break;
1558 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1559 				/*
1560 				 * Gak, He sent me a duplicate str seq
1561 				 * number
1562 				 */
1563 				/*
1564 				 * foo bar, I guess I will just free this
1565 				 * new guy, should we abort too? FIX ME
1566 				 * MAYBE? Or it COULD be that the SSN's have
1567 				 * wrapped. Maybe I should compare to TSN
1568 				 * somehow... sigh for now just blow away
1569 				 * the chunk!
1570 				 */
1571 				SCTPDBG(SCTP_DEBUG_XXX,
1572 				    "Duplicate to fsn: %u -- abort\n",
1573 				    at->rec.data.fsn);
1574 				sctp_abort_in_reasm(stcb, control,
1575 				    chk, abort_flag,
1576 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1577 				return;
1578 			}
1579 		}
1580 		if (inserted == 0) {
1581 			/* Goes on the end */
1582 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1583 			    chk->rec.data.fsn);
1584 			asoc->size_on_reasm_queue += chk->send_size;
1585 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1586 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1587 		}
1588 	}
1589 	/*
1590 	 * Ok lets see if we can suck any up into the control structure that
1591 	 * are in seq if it makes sense.
1592 	 */
1593 	do_wakeup = 0;
1594 	/*
1595 	 * If the first fragment has not been seen there is no sense in
1596 	 * looking.
1597 	 */
1598 	if (control->first_frag_seen) {
1599 		next_fsn = control->fsn_included + 1;
1600 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1601 			if (at->rec.data.fsn == next_fsn) {
1602 				/* We can add this one now to the control */
1603 				SCTPDBG(SCTP_DEBUG_XXX,
1604 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1605 				    control, at,
1606 				    at->rec.data.fsn,
1607 				    next_fsn, control->fsn_included);
1608 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1609 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1610 				if (control->on_read_q) {
1611 					do_wakeup = 1;
1612 				} else {
1613 					/*
1614 					 * We only add to the
1615 					 * size-on-all-streams if its not on
1616 					 * the read q. The read q flag will
1617 					 * cause a sballoc so its accounted
1618 					 * for there.
1619 					 */
1620 					asoc->size_on_all_streams += lenadded;
1621 				}
1622 				next_fsn++;
1623 				if (control->end_added && control->pdapi_started) {
1624 					if (strm->pd_api_started) {
1625 						strm->pd_api_started = 0;
1626 						control->pdapi_started = 0;
1627 					}
1628 					if (control->on_read_q == 0) {
1629 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1630 						    control,
1631 						    &stcb->sctp_socket->so_rcv, control->end_added,
1632 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1633 					}
1634 					break;
1635 				}
1636 			} else {
1637 				break;
1638 			}
1639 		}
1640 	}
1641 	if (do_wakeup) {
1642 		/* Need to wakeup the reader */
1643 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1644 	}
1645 }
1646 
1647 static struct sctp_queued_to_read *
1648 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1649 {
1650 	struct sctp_queued_to_read *control;
1651 
1652 	if (ordered) {
1653 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1654 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1655 				break;
1656 			}
1657 		}
1658 	} else {
1659 		if (idata_supported) {
1660 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1661 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1662 					break;
1663 				}
1664 			}
1665 		} else {
1666 			control = TAILQ_FIRST(&strm->uno_inqueue);
1667 		}
1668 	}
1669 	return (control);
1670 }
1671 
1672 static int
1673 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1674     struct mbuf **m, int offset, int chk_length,
1675     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1676     int *break_flag, int last_chunk, uint8_t chk_type)
1677 {
1678 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1679 	uint32_t tsn, fsn, gap, mid;
1680 	struct mbuf *dmbuf;
1681 	int the_len;
1682 	int need_reasm_check = 0;
1683 	uint16_t sid;
1684 	struct mbuf *op_err;
1685 	char msg[SCTP_DIAG_INFO_LEN];
1686 	struct sctp_queued_to_read *control, *ncontrol;
1687 	uint32_t ppid;
1688 	uint8_t chk_flags;
1689 	struct sctp_stream_reset_list *liste;
1690 	int ordered;
1691 	size_t clen;
1692 	int created_control = 0;
1693 
1694 	if (chk_type == SCTP_IDATA) {
1695 		struct sctp_idata_chunk *chunk, chunk_buf;
1696 
1697 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1698 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1699 		chk_flags = chunk->ch.chunk_flags;
1700 		clen = sizeof(struct sctp_idata_chunk);
1701 		tsn = ntohl(chunk->dp.tsn);
1702 		sid = ntohs(chunk->dp.sid);
1703 		mid = ntohl(chunk->dp.mid);
1704 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1705 			fsn = 0;
1706 			ppid = chunk->dp.ppid_fsn.ppid;
1707 		} else {
1708 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1709 			ppid = 0xffffffff;	/* Use as an invalid value. */
1710 		}
1711 	} else {
1712 		struct sctp_data_chunk *chunk, chunk_buf;
1713 
1714 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1715 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1716 		chk_flags = chunk->ch.chunk_flags;
1717 		clen = sizeof(struct sctp_data_chunk);
1718 		tsn = ntohl(chunk->dp.tsn);
1719 		sid = ntohs(chunk->dp.sid);
1720 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1721 		fsn = tsn;
1722 		ppid = chunk->dp.ppid;
1723 	}
1724 	if ((size_t)chk_length == clen) {
1725 		/*
1726 		 * Need to send an abort since we had a empty data chunk.
1727 		 */
1728 		op_err = sctp_generate_no_user_data_cause(tsn);
1729 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1730 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1731 		*abort_flag = 1;
1732 		return (0);
1733 	}
1734 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1735 		asoc->send_sack = 1;
1736 	}
1737 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1738 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1739 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1740 	}
1741 	if (stcb == NULL) {
1742 		return (0);
1743 	}
1744 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1745 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1746 		/* It is a duplicate */
1747 		SCTP_STAT_INCR(sctps_recvdupdata);
1748 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1749 			/* Record a dup for the next outbound sack */
1750 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1751 			asoc->numduptsns++;
1752 		}
1753 		asoc->send_sack = 1;
1754 		return (0);
1755 	}
1756 	/* Calculate the number of TSN's between the base and this TSN */
1757 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1758 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1759 		/* Can't hold the bit in the mapping at max array, toss it */
1760 		return (0);
1761 	}
1762 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1763 		SCTP_TCB_LOCK_ASSERT(stcb);
1764 		if (sctp_expand_mapping_array(asoc, gap)) {
1765 			/* Can't expand, drop it */
1766 			return (0);
1767 		}
1768 	}
1769 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1770 		*high_tsn = tsn;
1771 	}
1772 	/* See if we have received this one already */
1773 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1774 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1775 		SCTP_STAT_INCR(sctps_recvdupdata);
1776 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 			/* Record a dup for the next outbound sack */
1778 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 			asoc->numduptsns++;
1780 		}
1781 		asoc->send_sack = 1;
1782 		return (0);
1783 	}
1784 	/*
1785 	 * Check to see about the GONE flag, duplicates would cause a sack
1786 	 * to be sent up above
1787 	 */
1788 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1789 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1790 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1791 		/*
1792 		 * wait a minute, this guy is gone, there is no longer a
1793 		 * receiver. Send peer an ABORT!
1794 		 */
1795 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1796 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1797 		*abort_flag = 1;
1798 		return (0);
1799 	}
1800 	/*
1801 	 * Now before going further we see if there is room. If NOT then we
1802 	 * MAY let one through only IF this TSN is the one we are waiting
1803 	 * for on a partial delivery API.
1804 	 */
1805 
1806 	/* Is the stream valid? */
1807 	if (sid >= asoc->streamincnt) {
1808 		struct sctp_error_invalid_stream *cause;
1809 
1810 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1811 		    0, M_NOWAIT, 1, MT_DATA);
1812 		if (op_err != NULL) {
1813 			/* add some space up front so prepend will work well */
1814 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1815 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1816 			/*
1817 			 * Error causes are just param's and this one has
1818 			 * two back to back phdr, one with the error type
1819 			 * and size, the other with the streamid and a rsvd
1820 			 */
1821 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1822 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1823 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1824 			cause->stream_id = htons(sid);
1825 			cause->reserved = htons(0);
1826 			sctp_queue_op_err(stcb, op_err);
1827 		}
1828 		SCTP_STAT_INCR(sctps_badsid);
1829 		SCTP_TCB_LOCK_ASSERT(stcb);
1830 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1831 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1832 			asoc->highest_tsn_inside_nr_map = tsn;
1833 		}
1834 		if (tsn == (asoc->cumulative_tsn + 1)) {
1835 			/* Update cum-ack */
1836 			asoc->cumulative_tsn = tsn;
1837 		}
1838 		return (0);
1839 	}
1840 	/*
1841 	 * If its a fragmented message, lets see if we can find the control
1842 	 * on the reassembly queues.
1843 	 */
1844 	if ((chk_type == SCTP_IDATA) &&
1845 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1846 	    (fsn == 0)) {
1847 		/*
1848 		 * The first *must* be fsn 0, and other (middle/end) pieces
1849 		 * can *not* be fsn 0. XXX: This can happen in case of a
1850 		 * wrap around. Ignore is for now.
1851 		 */
1852 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1853 		    mid, chk_flags);
1854 		goto err_out;
1855 	}
1856 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1857 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1858 	    chk_flags, control);
1859 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1860 		/* See if we can find the re-assembly entity */
1861 		if (control != NULL) {
1862 			/* We found something, does it belong? */
1863 			if (ordered && (mid != control->mid)) {
1864 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1865 		err_out:
1866 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1867 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1868 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1869 				*abort_flag = 1;
1870 				return (0);
1871 			}
1872 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1873 				/*
1874 				 * We can't have a switched order with an
1875 				 * unordered chunk
1876 				 */
1877 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1878 				    tsn);
1879 				goto err_out;
1880 			}
1881 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1882 				/*
1883 				 * We can't have a switched unordered with a
1884 				 * ordered chunk
1885 				 */
1886 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1887 				    tsn);
1888 				goto err_out;
1889 			}
1890 		}
1891 	} else {
1892 		/*
1893 		 * Its a complete segment. Lets validate we don't have a
1894 		 * re-assembly going on with the same Stream/Seq (for
1895 		 * ordered) or in the same Stream for unordered.
1896 		 */
1897 		if (control != NULL) {
1898 			if (ordered || asoc->idata_supported) {
1899 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1900 				    chk_flags, mid);
1901 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1902 				goto err_out;
1903 			} else {
1904 				if ((tsn == control->fsn_included + 1) &&
1905 				    (control->end_added == 0)) {
1906 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1907 					goto err_out;
1908 				} else {
1909 					control = NULL;
1910 				}
1911 			}
1912 		}
1913 	}
1914 	/* now do the tests */
1915 	if (((asoc->cnt_on_all_streams +
1916 	    asoc->cnt_on_reasm_queue +
1917 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1918 	    (((int)asoc->my_rwnd) <= 0)) {
1919 		/*
1920 		 * When we have NO room in the rwnd we check to make sure
1921 		 * the reader is doing its job...
1922 		 */
1923 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1924 			/* some to read, wake-up */
1925 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1926 			struct socket *so;
1927 
1928 			so = SCTP_INP_SO(stcb->sctp_ep);
1929 			atomic_add_int(&stcb->asoc.refcnt, 1);
1930 			SCTP_TCB_UNLOCK(stcb);
1931 			SCTP_SOCKET_LOCK(so, 1);
1932 			SCTP_TCB_LOCK(stcb);
1933 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1934 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1935 				/* assoc was freed while we were unlocked */
1936 				SCTP_SOCKET_UNLOCK(so, 1);
1937 				return (0);
1938 			}
1939 #endif
1940 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1941 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1942 			SCTP_SOCKET_UNLOCK(so, 1);
1943 #endif
1944 		}
1945 		/* now is it in the mapping array of what we have accepted? */
1946 		if (chk_type == SCTP_DATA) {
1947 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1948 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1949 				/* Nope not in the valid range dump it */
1950 		dump_packet:
1951 				sctp_set_rwnd(stcb, asoc);
1952 				if ((asoc->cnt_on_all_streams +
1953 				    asoc->cnt_on_reasm_queue +
1954 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1955 					SCTP_STAT_INCR(sctps_datadropchklmt);
1956 				} else {
1957 					SCTP_STAT_INCR(sctps_datadroprwnd);
1958 				}
1959 				*break_flag = 1;
1960 				return (0);
1961 			}
1962 		} else {
1963 			if (control == NULL) {
1964 				goto dump_packet;
1965 			}
1966 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1967 				goto dump_packet;
1968 			}
1969 		}
1970 	}
1971 #ifdef SCTP_ASOCLOG_OF_TSNS
1972 	SCTP_TCB_LOCK_ASSERT(stcb);
1973 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1974 		asoc->tsn_in_at = 0;
1975 		asoc->tsn_in_wrapped = 1;
1976 	}
1977 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1978 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1979 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1980 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1981 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1982 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1983 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1984 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1985 	asoc->tsn_in_at++;
1986 #endif
1987 	/*
1988 	 * Before we continue lets validate that we are not being fooled by
1989 	 * an evil attacker. We can only have Nk chunks based on our TSN
1990 	 * spread allowed by the mapping array N * 8 bits, so there is no
1991 	 * way our stream sequence numbers could have wrapped. We of course
1992 	 * only validate the FIRST fragment so the bit must be set.
1993 	 */
1994 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1995 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1996 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1997 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1998 		/* The incoming sseq is behind where we last delivered? */
1999 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2000 		    mid, asoc->strmin[sid].last_mid_delivered);
2001 
2002 		if (asoc->idata_supported) {
2003 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2004 			    asoc->strmin[sid].last_mid_delivered,
2005 			    tsn,
2006 			    sid,
2007 			    mid);
2008 		} else {
2009 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2010 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2011 			    tsn,
2012 			    sid,
2013 			    (uint16_t)mid);
2014 		}
2015 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2016 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2017 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2018 		*abort_flag = 1;
2019 		return (0);
2020 	}
2021 	if (chk_type == SCTP_IDATA) {
2022 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2023 	} else {
2024 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2025 	}
2026 	if (last_chunk == 0) {
2027 		if (chk_type == SCTP_IDATA) {
2028 			dmbuf = SCTP_M_COPYM(*m,
2029 			    (offset + sizeof(struct sctp_idata_chunk)),
2030 			    the_len, M_NOWAIT);
2031 		} else {
2032 			dmbuf = SCTP_M_COPYM(*m,
2033 			    (offset + sizeof(struct sctp_data_chunk)),
2034 			    the_len, M_NOWAIT);
2035 		}
2036 #ifdef SCTP_MBUF_LOGGING
2037 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2038 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2039 		}
2040 #endif
2041 	} else {
2042 		/* We can steal the last chunk */
2043 		int l_len;
2044 
2045 		dmbuf = *m;
2046 		/* lop off the top part */
2047 		if (chk_type == SCTP_IDATA) {
2048 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2049 		} else {
2050 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2051 		}
2052 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2053 			l_len = SCTP_BUF_LEN(dmbuf);
2054 		} else {
2055 			/*
2056 			 * need to count up the size hopefully does not hit
2057 			 * this to often :-0
2058 			 */
2059 			struct mbuf *lat;
2060 
2061 			l_len = 0;
2062 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2063 				l_len += SCTP_BUF_LEN(lat);
2064 			}
2065 		}
2066 		if (l_len > the_len) {
2067 			/* Trim the end round bytes off  too */
2068 			m_adj(dmbuf, -(l_len - the_len));
2069 		}
2070 	}
2071 	if (dmbuf == NULL) {
2072 		SCTP_STAT_INCR(sctps_nomem);
2073 		return (0);
2074 	}
2075 	/*
2076 	 * Now no matter what, we need a control, get one if we don't have
2077 	 * one (we may have gotten it above when we found the message was
2078 	 * fragmented
2079 	 */
2080 	if (control == NULL) {
2081 		sctp_alloc_a_readq(stcb, control);
2082 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2083 		    ppid,
2084 		    sid,
2085 		    chk_flags,
2086 		    NULL, fsn, mid);
2087 		if (control == NULL) {
2088 			SCTP_STAT_INCR(sctps_nomem);
2089 			return (0);
2090 		}
2091 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2092 			struct mbuf *mm;
2093 
2094 			control->data = dmbuf;
2095 			for (mm = control->data; mm; mm = mm->m_next) {
2096 				control->length += SCTP_BUF_LEN(mm);
2097 			}
2098 			control->tail_mbuf = NULL;
2099 			control->end_added = 1;
2100 			control->last_frag_seen = 1;
2101 			control->first_frag_seen = 1;
2102 			control->fsn_included = fsn;
2103 			control->top_fsn = fsn;
2104 		}
2105 		created_control = 1;
2106 	}
2107 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2108 	    chk_flags, ordered, mid, control);
2109 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2110 	    TAILQ_EMPTY(&asoc->resetHead) &&
2111 	    ((ordered == 0) ||
2112 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2113 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2114 		/* Candidate for express delivery */
2115 		/*
2116 		 * Its not fragmented, No PD-API is up, Nothing in the
2117 		 * delivery queue, Its un-ordered OR ordered and the next to
2118 		 * deliver AND nothing else is stuck on the stream queue,
2119 		 * And there is room for it in the socket buffer. Lets just
2120 		 * stuff it up the buffer....
2121 		 */
2122 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2123 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2124 			asoc->highest_tsn_inside_nr_map = tsn;
2125 		}
2126 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2127 		    control, mid);
2128 
2129 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2130 		    control, &stcb->sctp_socket->so_rcv,
2131 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2132 
2133 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2134 			/* for ordered, bump what we delivered */
2135 			asoc->strmin[sid].last_mid_delivered++;
2136 		}
2137 		SCTP_STAT_INCR(sctps_recvexpress);
2138 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2139 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2140 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2141 		}
2142 		control = NULL;
2143 		goto finish_express_del;
2144 	}
2145 
2146 	/* Now will we need a chunk too? */
2147 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2148 		sctp_alloc_a_chunk(stcb, chk);
2149 		if (chk == NULL) {
2150 			/* No memory so we drop the chunk */
2151 			SCTP_STAT_INCR(sctps_nomem);
2152 			if (last_chunk == 0) {
2153 				/* we copied it, free the copy */
2154 				sctp_m_freem(dmbuf);
2155 			}
2156 			return (0);
2157 		}
2158 		chk->rec.data.tsn = tsn;
2159 		chk->no_fr_allowed = 0;
2160 		chk->rec.data.fsn = fsn;
2161 		chk->rec.data.mid = mid;
2162 		chk->rec.data.sid = sid;
2163 		chk->rec.data.ppid = ppid;
2164 		chk->rec.data.context = stcb->asoc.context;
2165 		chk->rec.data.doing_fast_retransmit = 0;
2166 		chk->rec.data.rcv_flags = chk_flags;
2167 		chk->asoc = asoc;
2168 		chk->send_size = the_len;
2169 		chk->whoTo = net;
2170 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2171 		    chk,
2172 		    control, mid);
2173 		atomic_add_int(&net->ref_count, 1);
2174 		chk->data = dmbuf;
2175 	}
2176 	/* Set the appropriate TSN mark */
2177 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2178 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2179 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2180 			asoc->highest_tsn_inside_nr_map = tsn;
2181 		}
2182 	} else {
2183 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2184 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2185 			asoc->highest_tsn_inside_map = tsn;
2186 		}
2187 	}
2188 	/* Now is it complete (i.e. not fragmented)? */
2189 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2190 		/*
2191 		 * Special check for when streams are resetting. We could be
2192 		 * more smart about this and check the actual stream to see
2193 		 * if it is not being reset.. that way we would not create a
2194 		 * HOLB when amongst streams being reset and those not being
2195 		 * reset.
2196 		 *
2197 		 */
2198 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2199 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2200 			/*
2201 			 * yep its past where we need to reset... go ahead
2202 			 * and queue it.
2203 			 */
2204 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2205 				/* first one on */
2206 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2207 			} else {
2208 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2209 				unsigned char inserted = 0;
2210 
2211 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2212 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2213 
2214 						continue;
2215 					} else {
2216 						/* found it */
2217 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2218 						inserted = 1;
2219 						break;
2220 					}
2221 				}
2222 				if (inserted == 0) {
2223 					/*
2224 					 * must be put at end, use prevP
2225 					 * (all setup from loop) to setup
2226 					 * nextP.
2227 					 */
2228 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2229 				}
2230 			}
2231 			goto finish_express_del;
2232 		}
2233 		if (chk_flags & SCTP_DATA_UNORDERED) {
2234 			/* queue directly into socket buffer */
2235 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2236 			    control, mid);
2237 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2238 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2239 			    control,
2240 			    &stcb->sctp_socket->so_rcv, 1,
2241 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2242 
2243 		} else {
2244 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2245 			    mid);
2246 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2247 			if (*abort_flag) {
2248 				if (last_chunk) {
2249 					*m = NULL;
2250 				}
2251 				return (0);
2252 			}
2253 		}
2254 		goto finish_express_del;
2255 	}
2256 	/* If we reach here its a reassembly */
2257 	need_reasm_check = 1;
2258 	SCTPDBG(SCTP_DEBUG_XXX,
2259 	    "Queue data to stream for reasm control: %p MID: %u\n",
2260 	    control, mid);
2261 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2262 	if (*abort_flag) {
2263 		/*
2264 		 * the assoc is now gone and chk was put onto the reasm
2265 		 * queue, which has all been freed.
2266 		 */
2267 		if (last_chunk) {
2268 			*m = NULL;
2269 		}
2270 		return (0);
2271 	}
2272 finish_express_del:
2273 	/* Here we tidy up things */
2274 	if (tsn == (asoc->cumulative_tsn + 1)) {
2275 		/* Update cum-ack */
2276 		asoc->cumulative_tsn = tsn;
2277 	}
2278 	if (last_chunk) {
2279 		*m = NULL;
2280 	}
2281 	if (ordered) {
2282 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2283 	} else {
2284 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2285 	}
2286 	SCTP_STAT_INCR(sctps_recvdata);
2287 	/* Set it present please */
2288 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2289 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2290 	}
2291 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2292 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2293 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2294 	}
2295 	if (need_reasm_check) {
2296 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2297 		need_reasm_check = 0;
2298 	}
2299 	/* check the special flag for stream resets */
2300 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2301 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2302 		/*
2303 		 * we have finished working through the backlogged TSN's now
2304 		 * time to reset streams. 1: call reset function. 2: free
2305 		 * pending_reply space 3: distribute any chunks in
2306 		 * pending_reply_queue.
2307 		 */
2308 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2309 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2310 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2311 		SCTP_FREE(liste, SCTP_M_STRESET);
2312 		/* sa_ignore FREED_MEMORY */
2313 		liste = TAILQ_FIRST(&asoc->resetHead);
2314 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2315 			/* All can be removed */
2316 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2317 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2318 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2319 				if (*abort_flag) {
2320 					return (0);
2321 				}
2322 				if (need_reasm_check) {
2323 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2324 					need_reasm_check = 0;
2325 				}
2326 			}
2327 		} else {
2328 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2330 					break;
2331 				}
2332 				/*
2333 				 * if control->sinfo_tsn is <= liste->tsn we
2334 				 * can process it which is the NOT of
2335 				 * control->sinfo_tsn > liste->tsn
2336 				 */
2337 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2338 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2339 				if (*abort_flag) {
2340 					return (0);
2341 				}
2342 				if (need_reasm_check) {
2343 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2344 					need_reasm_check = 0;
2345 				}
2346 			}
2347 		}
2348 	}
2349 	return (1);
2350 }
2351 
2352 static const int8_t sctp_map_lookup_tab[256] = {
2353 	0, 1, 0, 2, 0, 1, 0, 3,
2354 	0, 1, 0, 2, 0, 1, 0, 4,
2355 	0, 1, 0, 2, 0, 1, 0, 3,
2356 	0, 1, 0, 2, 0, 1, 0, 5,
2357 	0, 1, 0, 2, 0, 1, 0, 3,
2358 	0, 1, 0, 2, 0, 1, 0, 4,
2359 	0, 1, 0, 2, 0, 1, 0, 3,
2360 	0, 1, 0, 2, 0, 1, 0, 6,
2361 	0, 1, 0, 2, 0, 1, 0, 3,
2362 	0, 1, 0, 2, 0, 1, 0, 4,
2363 	0, 1, 0, 2, 0, 1, 0, 3,
2364 	0, 1, 0, 2, 0, 1, 0, 5,
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 7,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 5,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 4,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 6,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 5,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 4,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 8
2385 };
2386 
2387 
2388 void
2389 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2390 {
2391 	/*
2392 	 * Now we also need to check the mapping array in a couple of ways.
2393 	 * 1) Did we move the cum-ack point?
2394 	 *
2395 	 * When you first glance at this you might think that all entries
2396 	 * that make up the position of the cum-ack would be in the
2397 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2398 	 * deliverable. Thats true with one exception, when its a fragmented
2399 	 * message we may not deliver the data until some threshold (or all
2400 	 * of it) is in place. So we must OR the nr_mapping_array and
2401 	 * mapping_array to get a true picture of the cum-ack.
2402 	 */
2403 	struct sctp_association *asoc;
2404 	int at;
2405 	uint8_t val;
2406 	int slide_from, slide_end, lgap, distance;
2407 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2408 
2409 	asoc = &stcb->asoc;
2410 
2411 	old_cumack = asoc->cumulative_tsn;
2412 	old_base = asoc->mapping_array_base_tsn;
2413 	old_highest = asoc->highest_tsn_inside_map;
2414 	/*
2415 	 * We could probably improve this a small bit by calculating the
2416 	 * offset of the current cum-ack as the starting point.
2417 	 */
2418 	at = 0;
2419 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2420 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2421 		if (val == 0xff) {
2422 			at += 8;
2423 		} else {
2424 			/* there is a 0 bit */
2425 			at += sctp_map_lookup_tab[val];
2426 			break;
2427 		}
2428 	}
2429 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2430 
2431 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2432 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2433 #ifdef INVARIANTS
2434 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2435 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2436 #else
2437 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2438 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2439 		sctp_print_mapping_array(asoc);
2440 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2441 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2442 		}
2443 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2444 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2445 #endif
2446 	}
2447 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2448 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2449 	} else {
2450 		highest_tsn = asoc->highest_tsn_inside_map;
2451 	}
2452 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2453 		/* The complete array was completed by a single FR */
2454 		/* highest becomes the cum-ack */
2455 		int clr;
2456 #ifdef INVARIANTS
2457 		unsigned int i;
2458 #endif
2459 
2460 		/* clear the array */
2461 		clr = ((at + 7) >> 3);
2462 		if (clr > asoc->mapping_array_size) {
2463 			clr = asoc->mapping_array_size;
2464 		}
2465 		memset(asoc->mapping_array, 0, clr);
2466 		memset(asoc->nr_mapping_array, 0, clr);
2467 #ifdef INVARIANTS
2468 		for (i = 0; i < asoc->mapping_array_size; i++) {
2469 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2470 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2471 				sctp_print_mapping_array(asoc);
2472 			}
2473 		}
2474 #endif
2475 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2476 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2477 	} else if (at >= 8) {
2478 		/* we can slide the mapping array down */
2479 		/* slide_from holds where we hit the first NON 0xff byte */
2480 
2481 		/*
2482 		 * now calculate the ceiling of the move using our highest
2483 		 * TSN value
2484 		 */
2485 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2486 		slide_end = (lgap >> 3);
2487 		if (slide_end < slide_from) {
2488 			sctp_print_mapping_array(asoc);
2489 #ifdef INVARIANTS
2490 			panic("impossible slide");
2491 #else
2492 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2493 			    lgap, slide_end, slide_from, at);
2494 			return;
2495 #endif
2496 		}
2497 		if (slide_end > asoc->mapping_array_size) {
2498 #ifdef INVARIANTS
2499 			panic("would overrun buffer");
2500 #else
2501 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2502 			    asoc->mapping_array_size, slide_end);
2503 			slide_end = asoc->mapping_array_size;
2504 #endif
2505 		}
2506 		distance = (slide_end - slide_from) + 1;
2507 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2508 			sctp_log_map(old_base, old_cumack, old_highest,
2509 			    SCTP_MAP_PREPARE_SLIDE);
2510 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2511 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2512 		}
2513 		if (distance + slide_from > asoc->mapping_array_size ||
2514 		    distance < 0) {
2515 			/*
2516 			 * Here we do NOT slide forward the array so that
2517 			 * hopefully when more data comes in to fill it up
2518 			 * we will be able to slide it forward. Really I
2519 			 * don't think this should happen :-0
2520 			 */
2521 
2522 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2523 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2524 				    (uint32_t)asoc->mapping_array_size,
2525 				    SCTP_MAP_SLIDE_NONE);
2526 			}
2527 		} else {
2528 			int ii;
2529 
2530 			for (ii = 0; ii < distance; ii++) {
2531 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2532 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2533 
2534 			}
2535 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2536 				asoc->mapping_array[ii] = 0;
2537 				asoc->nr_mapping_array[ii] = 0;
2538 			}
2539 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2540 				asoc->highest_tsn_inside_map += (slide_from << 3);
2541 			}
2542 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2543 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2544 			}
2545 			asoc->mapping_array_base_tsn += (slide_from << 3);
2546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2547 				sctp_log_map(asoc->mapping_array_base_tsn,
2548 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2549 				    SCTP_MAP_SLIDE_RESULT);
2550 			}
2551 		}
2552 	}
2553 }
2554 
2555 void
2556 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2557 {
2558 	struct sctp_association *asoc;
2559 	uint32_t highest_tsn;
2560 	int is_a_gap;
2561 
2562 	sctp_slide_mapping_arrays(stcb);
2563 	asoc = &stcb->asoc;
2564 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2565 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2566 	} else {
2567 		highest_tsn = asoc->highest_tsn_inside_map;
2568 	}
2569 	/* Is there a gap now? */
2570 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2571 
2572 	/*
2573 	 * Now we need to see if we need to queue a sack or just start the
2574 	 * timer (if allowed).
2575 	 */
2576 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2577 		/*
2578 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2579 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2580 		 * SACK
2581 		 */
2582 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2583 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2584 			    stcb->sctp_ep, stcb, NULL,
2585 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2586 		}
2587 		sctp_send_shutdown(stcb,
2588 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2589 		if (is_a_gap) {
2590 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2591 		}
2592 	} else {
2593 		/*
2594 		 * CMT DAC algorithm: increase number of packets received
2595 		 * since last ack
2596 		 */
2597 		stcb->asoc.cmt_dac_pkts_rcvd++;
2598 
2599 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2600 							 * SACK */
2601 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2602 							 * longer is one */
2603 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2604 		    (is_a_gap) ||	/* is still a gap */
2605 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2606 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2607 		    ) {
2608 
2609 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2610 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2611 			    (stcb->asoc.send_sack == 0) &&
2612 			    (stcb->asoc.numduptsns == 0) &&
2613 			    (stcb->asoc.delayed_ack) &&
2614 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2615 
2616 				/*
2617 				 * CMT DAC algorithm: With CMT, delay acks
2618 				 * even in the face of
2619 				 *
2620 				 * reordering. Therefore, if acks that do
2621 				 * not have to be sent because of the above
2622 				 * reasons, will be delayed. That is, acks
2623 				 * that would have been sent due to gap
2624 				 * reports will be delayed with DAC. Start
2625 				 * the delayed ack timer.
2626 				 */
2627 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2628 				    stcb->sctp_ep, stcb, NULL);
2629 			} else {
2630 				/*
2631 				 * Ok we must build a SACK since the timer
2632 				 * is pending, we got our first packet OR
2633 				 * there are gaps or duplicates.
2634 				 */
2635 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2636 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2637 			}
2638 		} else {
2639 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2640 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2641 				    stcb->sctp_ep, stcb, NULL);
2642 			}
2643 		}
2644 	}
2645 }
2646 
2647 int
2648 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2649     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2650     struct sctp_nets *net, uint32_t *high_tsn)
2651 {
2652 	struct sctp_chunkhdr *ch, chunk_buf;
2653 	struct sctp_association *asoc;
2654 	int num_chunks = 0;	/* number of control chunks processed */
2655 	int stop_proc = 0;
2656 	int break_flag, last_chunk;
2657 	int abort_flag = 0, was_a_gap;
2658 	struct mbuf *m;
2659 	uint32_t highest_tsn;
2660 	uint16_t chk_length;
2661 
2662 	/* set the rwnd */
2663 	sctp_set_rwnd(stcb, &stcb->asoc);
2664 
2665 	m = *mm;
2666 	SCTP_TCB_LOCK_ASSERT(stcb);
2667 	asoc = &stcb->asoc;
2668 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2669 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2670 	} else {
2671 		highest_tsn = asoc->highest_tsn_inside_map;
2672 	}
2673 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2674 	/*
2675 	 * setup where we got the last DATA packet from for any SACK that
2676 	 * may need to go out. Don't bump the net. This is done ONLY when a
2677 	 * chunk is assigned.
2678 	 */
2679 	asoc->last_data_chunk_from = net;
2680 
2681 	/*-
2682 	 * Now before we proceed we must figure out if this is a wasted
2683 	 * cluster... i.e. it is a small packet sent in and yet the driver
2684 	 * underneath allocated a full cluster for it. If so we must copy it
2685 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2686 	 * with cluster starvation. Note for __Panda__ we don't do this
2687 	 * since it has clusters all the way down to 64 bytes.
2688 	 */
2689 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2690 		/* we only handle mbufs that are singletons.. not chains */
2691 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2692 		if (m) {
2693 			/* ok lets see if we can copy the data up */
2694 			caddr_t *from, *to;
2695 
2696 			/* get the pointers and copy */
2697 			to = mtod(m, caddr_t *);
2698 			from = mtod((*mm), caddr_t *);
2699 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2700 			/* copy the length and free up the old */
2701 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2702 			sctp_m_freem(*mm);
2703 			/* success, back copy */
2704 			*mm = m;
2705 		} else {
2706 			/* We are in trouble in the mbuf world .. yikes */
2707 			m = *mm;
2708 		}
2709 	}
2710 	/* get pointer to the first chunk header */
2711 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2712 	    sizeof(struct sctp_chunkhdr),
2713 	    (uint8_t *)&chunk_buf);
2714 	if (ch == NULL) {
2715 		return (1);
2716 	}
2717 	/*
2718 	 * process all DATA chunks...
2719 	 */
2720 	*high_tsn = asoc->cumulative_tsn;
2721 	break_flag = 0;
2722 	asoc->data_pkts_seen++;
2723 	while (stop_proc == 0) {
2724 		/* validate chunk length */
2725 		chk_length = ntohs(ch->chunk_length);
2726 		if (length - *offset < chk_length) {
2727 			/* all done, mutulated chunk */
2728 			stop_proc = 1;
2729 			continue;
2730 		}
2731 		if ((asoc->idata_supported == 1) &&
2732 		    (ch->chunk_type == SCTP_DATA)) {
2733 			struct mbuf *op_err;
2734 			char msg[SCTP_DIAG_INFO_LEN];
2735 
2736 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2737 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2738 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2739 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2740 			return (2);
2741 		}
2742 		if ((asoc->idata_supported == 0) &&
2743 		    (ch->chunk_type == SCTP_IDATA)) {
2744 			struct mbuf *op_err;
2745 			char msg[SCTP_DIAG_INFO_LEN];
2746 
2747 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2748 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2749 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2750 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2751 			return (2);
2752 		}
2753 		if ((ch->chunk_type == SCTP_DATA) ||
2754 		    (ch->chunk_type == SCTP_IDATA)) {
2755 			uint16_t clen;
2756 
2757 			if (ch->chunk_type == SCTP_DATA) {
2758 				clen = sizeof(struct sctp_data_chunk);
2759 			} else {
2760 				clen = sizeof(struct sctp_idata_chunk);
2761 			}
2762 			if (chk_length < clen) {
2763 				/*
2764 				 * Need to send an abort since we had a
2765 				 * invalid data chunk.
2766 				 */
2767 				struct mbuf *op_err;
2768 				char msg[SCTP_DIAG_INFO_LEN];
2769 
2770 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2771 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2772 				    chk_length);
2773 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2774 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2775 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2776 				return (2);
2777 			}
2778 #ifdef SCTP_AUDITING_ENABLED
2779 			sctp_audit_log(0xB1, 0);
2780 #endif
2781 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2782 				last_chunk = 1;
2783 			} else {
2784 				last_chunk = 0;
2785 			}
2786 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2787 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2788 			    last_chunk, ch->chunk_type)) {
2789 				num_chunks++;
2790 			}
2791 			if (abort_flag)
2792 				return (2);
2793 
2794 			if (break_flag) {
2795 				/*
2796 				 * Set because of out of rwnd space and no
2797 				 * drop rep space left.
2798 				 */
2799 				stop_proc = 1;
2800 				continue;
2801 			}
2802 		} else {
2803 			/* not a data chunk in the data region */
2804 			switch (ch->chunk_type) {
2805 			case SCTP_INITIATION:
2806 			case SCTP_INITIATION_ACK:
2807 			case SCTP_SELECTIVE_ACK:
2808 			case SCTP_NR_SELECTIVE_ACK:
2809 			case SCTP_HEARTBEAT_REQUEST:
2810 			case SCTP_HEARTBEAT_ACK:
2811 			case SCTP_ABORT_ASSOCIATION:
2812 			case SCTP_SHUTDOWN:
2813 			case SCTP_SHUTDOWN_ACK:
2814 			case SCTP_OPERATION_ERROR:
2815 			case SCTP_COOKIE_ECHO:
2816 			case SCTP_COOKIE_ACK:
2817 			case SCTP_ECN_ECHO:
2818 			case SCTP_ECN_CWR:
2819 			case SCTP_SHUTDOWN_COMPLETE:
2820 			case SCTP_AUTHENTICATION:
2821 			case SCTP_ASCONF_ACK:
2822 			case SCTP_PACKET_DROPPED:
2823 			case SCTP_STREAM_RESET:
2824 			case SCTP_FORWARD_CUM_TSN:
2825 			case SCTP_ASCONF:
2826 				{
2827 					/*
2828 					 * Now, what do we do with KNOWN
2829 					 * chunks that are NOT in the right
2830 					 * place?
2831 					 *
2832 					 * For now, I do nothing but ignore
2833 					 * them. We may later want to add
2834 					 * sysctl stuff to switch out and do
2835 					 * either an ABORT() or possibly
2836 					 * process them.
2837 					 */
2838 					struct mbuf *op_err;
2839 					char msg[SCTP_DIAG_INFO_LEN];
2840 
2841 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2842 					    ch->chunk_type);
2843 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2844 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2845 					return (2);
2846 				}
2847 			default:
2848 				/*
2849 				 * Unknown chunk type: use bit rules after
2850 				 * checking length
2851 				 */
2852 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2853 					/*
2854 					 * Need to send an abort since we
2855 					 * had a invalid chunk.
2856 					 */
2857 					struct mbuf *op_err;
2858 					char msg[SCTP_DIAG_INFO_LEN];
2859 
2860 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2861 					    chk_length);
2862 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2863 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2864 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2865 					return (2);
2866 				}
2867 				if (ch->chunk_type & 0x40) {
2868 					/* Add a error report to the queue */
2869 					struct mbuf *op_err;
2870 					struct sctp_gen_error_cause *cause;
2871 
2872 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2873 					    0, M_NOWAIT, 1, MT_DATA);
2874 					if (op_err != NULL) {
2875 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2876 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2877 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2878 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2879 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2880 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2881 							sctp_queue_op_err(stcb, op_err);
2882 						} else {
2883 							sctp_m_freem(op_err);
2884 						}
2885 					}
2886 				}
2887 				if ((ch->chunk_type & 0x80) == 0) {
2888 					/* discard the rest of this packet */
2889 					stop_proc = 1;
2890 				}	/* else skip this bad chunk and
2891 					 * continue... */
2892 				break;
2893 			}	/* switch of chunk type */
2894 		}
2895 		*offset += SCTP_SIZE32(chk_length);
2896 		if ((*offset >= length) || stop_proc) {
2897 			/* no more data left in the mbuf chain */
2898 			stop_proc = 1;
2899 			continue;
2900 		}
2901 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2902 		    sizeof(struct sctp_chunkhdr),
2903 		    (uint8_t *)&chunk_buf);
2904 		if (ch == NULL) {
2905 			*offset = length;
2906 			stop_proc = 1;
2907 			continue;
2908 		}
2909 	}
2910 	if (break_flag) {
2911 		/*
2912 		 * we need to report rwnd overrun drops.
2913 		 */
2914 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2915 	}
2916 	if (num_chunks) {
2917 		/*
2918 		 * Did we get data, if so update the time for auto-close and
2919 		 * give peer credit for being alive.
2920 		 */
2921 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2922 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2923 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2924 			    stcb->asoc.overall_error_count,
2925 			    0,
2926 			    SCTP_FROM_SCTP_INDATA,
2927 			    __LINE__);
2928 		}
2929 		stcb->asoc.overall_error_count = 0;
2930 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2931 	}
2932 	/* now service all of the reassm queue if needed */
2933 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2934 		/* Assure that we ack right away */
2935 		stcb->asoc.send_sack = 1;
2936 	}
2937 	/* Start a sack timer or QUEUE a SACK for sending */
2938 	sctp_sack_check(stcb, was_a_gap);
2939 	return (0);
2940 }
2941 
2942 static int
2943 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2944     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2945     int *num_frs,
2946     uint32_t *biggest_newly_acked_tsn,
2947     uint32_t *this_sack_lowest_newack,
2948     int *rto_ok)
2949 {
2950 	struct sctp_tmit_chunk *tp1;
2951 	unsigned int theTSN;
2952 	int j, wake_him = 0, circled = 0;
2953 
2954 	/* Recover the tp1 we last saw */
2955 	tp1 = *p_tp1;
2956 	if (tp1 == NULL) {
2957 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2958 	}
2959 	for (j = frag_strt; j <= frag_end; j++) {
2960 		theTSN = j + last_tsn;
2961 		while (tp1) {
2962 			if (tp1->rec.data.doing_fast_retransmit)
2963 				(*num_frs) += 1;
2964 
2965 			/*-
2966 			 * CMT: CUCv2 algorithm. For each TSN being
2967 			 * processed from the sent queue, track the
2968 			 * next expected pseudo-cumack, or
2969 			 * rtx_pseudo_cumack, if required. Separate
2970 			 * cumack trackers for first transmissions,
2971 			 * and retransmissions.
2972 			 */
2973 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2974 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2975 			    (tp1->snd_count == 1)) {
2976 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2977 				tp1->whoTo->find_pseudo_cumack = 0;
2978 			}
2979 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2981 			    (tp1->snd_count > 1)) {
2982 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2983 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2984 			}
2985 			if (tp1->rec.data.tsn == theTSN) {
2986 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2987 					/*-
2988 					 * must be held until
2989 					 * cum-ack passes
2990 					 */
2991 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2992 						/*-
2993 						 * If it is less than RESEND, it is
2994 						 * now no-longer in flight.
2995 						 * Higher values may already be set
2996 						 * via previous Gap Ack Blocks...
2997 						 * i.e. ACKED or RESEND.
2998 						 */
2999 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3000 						    *biggest_newly_acked_tsn)) {
3001 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3002 						}
3003 						/*-
3004 						 * CMT: SFR algo (and HTNA) - set
3005 						 * saw_newack to 1 for dest being
3006 						 * newly acked. update
3007 						 * this_sack_highest_newack if
3008 						 * appropriate.
3009 						 */
3010 						if (tp1->rec.data.chunk_was_revoked == 0)
3011 							tp1->whoTo->saw_newack = 1;
3012 
3013 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 						    tp1->whoTo->this_sack_highest_newack)) {
3015 							tp1->whoTo->this_sack_highest_newack =
3016 							    tp1->rec.data.tsn;
3017 						}
3018 						/*-
3019 						 * CMT DAC algo: also update
3020 						 * this_sack_lowest_newack
3021 						 */
3022 						if (*this_sack_lowest_newack == 0) {
3023 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3024 								sctp_log_sack(*this_sack_lowest_newack,
3025 								    last_tsn,
3026 								    tp1->rec.data.tsn,
3027 								    0,
3028 								    0,
3029 								    SCTP_LOG_TSN_ACKED);
3030 							}
3031 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3032 						}
3033 						/*-
3034 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3035 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3036 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3037 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3038 						 * Separate pseudo_cumack trackers for first transmissions and
3039 						 * retransmissions.
3040 						 */
3041 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3042 							if (tp1->rec.data.chunk_was_revoked == 0) {
3043 								tp1->whoTo->new_pseudo_cumack = 1;
3044 							}
3045 							tp1->whoTo->find_pseudo_cumack = 1;
3046 						}
3047 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3048 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3049 						}
3050 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3051 							if (tp1->rec.data.chunk_was_revoked == 0) {
3052 								tp1->whoTo->new_pseudo_cumack = 1;
3053 							}
3054 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3055 						}
3056 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3057 							sctp_log_sack(*biggest_newly_acked_tsn,
3058 							    last_tsn,
3059 							    tp1->rec.data.tsn,
3060 							    frag_strt,
3061 							    frag_end,
3062 							    SCTP_LOG_TSN_ACKED);
3063 						}
3064 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3065 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3066 							    tp1->whoTo->flight_size,
3067 							    tp1->book_size,
3068 							    (uint32_t)(uintptr_t)tp1->whoTo,
3069 							    tp1->rec.data.tsn);
3070 						}
3071 						sctp_flight_size_decrease(tp1);
3072 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3073 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3074 							    tp1);
3075 						}
3076 						sctp_total_flight_decrease(stcb, tp1);
3077 
3078 						tp1->whoTo->net_ack += tp1->send_size;
3079 						if (tp1->snd_count < 2) {
3080 							/*-
3081 							 * True non-retransmited chunk
3082 							 */
3083 							tp1->whoTo->net_ack2 += tp1->send_size;
3084 
3085 							/*-
3086 							 * update RTO too ?
3087 							 */
3088 							if (tp1->do_rtt) {
3089 								if (*rto_ok) {
3090 									tp1->whoTo->RTO =
3091 									    sctp_calculate_rto(stcb,
3092 									    &stcb->asoc,
3093 									    tp1->whoTo,
3094 									    &tp1->sent_rcv_time,
3095 									    SCTP_RTT_FROM_DATA);
3096 									*rto_ok = 0;
3097 								}
3098 								if (tp1->whoTo->rto_needed == 0) {
3099 									tp1->whoTo->rto_needed = 1;
3100 								}
3101 								tp1->do_rtt = 0;
3102 							}
3103 						}
3104 
3105 					}
3106 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3107 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3108 						    stcb->asoc.this_sack_highest_gap)) {
3109 							stcb->asoc.this_sack_highest_gap =
3110 							    tp1->rec.data.tsn;
3111 						}
3112 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3113 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3114 #ifdef SCTP_AUDITING_ENABLED
3115 							sctp_audit_log(0xB2,
3116 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3117 #endif
3118 						}
3119 					}
3120 					/*-
3121 					 * All chunks NOT UNSENT fall through here and are marked
3122 					 * (leave PR-SCTP ones that are to skip alone though)
3123 					 */
3124 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3125 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3126 						tp1->sent = SCTP_DATAGRAM_MARKED;
3127 					}
3128 					if (tp1->rec.data.chunk_was_revoked) {
3129 						/* deflate the cwnd */
3130 						tp1->whoTo->cwnd -= tp1->book_size;
3131 						tp1->rec.data.chunk_was_revoked = 0;
3132 					}
3133 					/* NR Sack code here */
3134 					if (nr_sacking &&
3135 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3137 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3138 #ifdef INVARIANTS
3139 						} else {
3140 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3141 #endif
3142 						}
3143 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3144 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3145 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3146 							stcb->asoc.trigger_reset = 1;
3147 						}
3148 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3149 						if (tp1->data) {
3150 							/*
3151 							 * sa_ignore
3152 							 * NO_NULL_CHK
3153 							 */
3154 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3155 							sctp_m_freem(tp1->data);
3156 							tp1->data = NULL;
3157 						}
3158 						wake_him++;
3159 					}
3160 				}
3161 				break;
3162 			}	/* if (tp1->tsn == theTSN) */
3163 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3164 				break;
3165 			}
3166 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3167 			if ((tp1 == NULL) && (circled == 0)) {
3168 				circled++;
3169 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3170 			}
3171 		}		/* end while (tp1) */
3172 		if (tp1 == NULL) {
3173 			circled = 0;
3174 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3175 		}
3176 		/* In case the fragments were not in order we must reset */
3177 	}			/* end for (j = fragStart */
3178 	*p_tp1 = tp1;
3179 	return (wake_him);	/* Return value only used for nr-sack */
3180 }
3181 
3182 
3183 static int
3184 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187     int num_seg, int num_nr_seg, int *rto_ok)
3188 {
3189 	struct sctp_gap_ack_block *frag, block;
3190 	struct sctp_tmit_chunk *tp1;
3191 	int i;
3192 	int num_frs = 0;
3193 	int chunk_freed;
3194 	int non_revocable;
3195 	uint16_t frag_strt, frag_end, prev_frag_end;
3196 
3197 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3198 	prev_frag_end = 0;
3199 	chunk_freed = 0;
3200 
3201 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3202 		if (i == num_seg) {
3203 			prev_frag_end = 0;
3204 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205 		}
3206 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3208 		*offset += sizeof(block);
3209 		if (frag == NULL) {
3210 			return (chunk_freed);
3211 		}
3212 		frag_strt = ntohs(frag->start);
3213 		frag_end = ntohs(frag->end);
3214 
3215 		if (frag_strt > frag_end) {
3216 			/* This gap report is malformed, skip it. */
3217 			continue;
3218 		}
3219 		if (frag_strt <= prev_frag_end) {
3220 			/* This gap report is not in order, so restart. */
3221 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3222 		}
3223 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224 			*biggest_tsn_acked = last_tsn + frag_end;
3225 		}
3226 		if (i < num_seg) {
3227 			non_revocable = 0;
3228 		} else {
3229 			non_revocable = 1;
3230 		}
3231 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3233 		    this_sack_lowest_newack, rto_ok)) {
3234 			chunk_freed = 1;
3235 		}
3236 		prev_frag_end = frag_end;
3237 	}
3238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3239 		if (num_frs)
3240 			sctp_log_fr(*biggest_tsn_acked,
3241 			    *biggest_newly_acked_tsn,
3242 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3243 	}
3244 	return (chunk_freed);
3245 }
3246 
3247 static void
3248 sctp_check_for_revoked(struct sctp_tcb *stcb,
3249     struct sctp_association *asoc, uint32_t cumack,
3250     uint32_t biggest_tsn_acked)
3251 {
3252 	struct sctp_tmit_chunk *tp1;
3253 
3254 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3256 			/*
3257 			 * ok this guy is either ACK or MARKED. If it is
3258 			 * ACKED it has been previously acked but not this
3259 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3260 			 * again.
3261 			 */
3262 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3263 				break;
3264 			}
3265 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266 				/* it has been revoked */
3267 				tp1->sent = SCTP_DATAGRAM_SENT;
3268 				tp1->rec.data.chunk_was_revoked = 1;
3269 				/*
3270 				 * We must add this stuff back in to assure
3271 				 * timers and such get started.
3272 				 */
3273 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3274 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3275 					    tp1->whoTo->flight_size,
3276 					    tp1->book_size,
3277 					    (uint32_t)(uintptr_t)tp1->whoTo,
3278 					    tp1->rec.data.tsn);
3279 				}
3280 				sctp_flight_size_increase(tp1);
3281 				sctp_total_flight_increase(stcb, tp1);
3282 				/*
3283 				 * We inflate the cwnd to compensate for our
3284 				 * artificial inflation of the flight_size.
3285 				 */
3286 				tp1->whoTo->cwnd += tp1->book_size;
3287 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3288 					sctp_log_sack(asoc->last_acked_seq,
3289 					    cumack,
3290 					    tp1->rec.data.tsn,
3291 					    0,
3292 					    0,
3293 					    SCTP_LOG_TSN_REVOKED);
3294 				}
3295 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3296 				/* it has been re-acked in this SACK */
3297 				tp1->sent = SCTP_DATAGRAM_ACKED;
3298 			}
3299 		}
3300 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3301 			break;
3302 	}
3303 }
3304 
3305 
3306 static void
3307 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3308     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3309 {
3310 	struct sctp_tmit_chunk *tp1;
3311 	int strike_flag = 0;
3312 	struct timeval now;
3313 	int tot_retrans = 0;
3314 	uint32_t sending_seq;
3315 	struct sctp_nets *net;
3316 	int num_dests_sacked = 0;
3317 
3318 	/*
3319 	 * select the sending_seq, this is either the next thing ready to be
3320 	 * sent but not transmitted, OR, the next seq we assign.
3321 	 */
3322 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3323 	if (tp1 == NULL) {
3324 		sending_seq = asoc->sending_seq;
3325 	} else {
3326 		sending_seq = tp1->rec.data.tsn;
3327 	}
3328 
3329 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3330 	if ((asoc->sctp_cmt_on_off > 0) &&
3331 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3332 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3333 			if (net->saw_newack)
3334 				num_dests_sacked++;
3335 		}
3336 	}
3337 	if (stcb->asoc.prsctp_supported) {
3338 		(void)SCTP_GETTIME_TIMEVAL(&now);
3339 	}
3340 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3341 		strike_flag = 0;
3342 		if (tp1->no_fr_allowed) {
3343 			/* this one had a timeout or something */
3344 			continue;
3345 		}
3346 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3348 				sctp_log_fr(biggest_tsn_newly_acked,
3349 				    tp1->rec.data.tsn,
3350 				    tp1->sent,
3351 				    SCTP_FR_LOG_CHECK_STRIKE);
3352 		}
3353 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3354 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3355 			/* done */
3356 			break;
3357 		}
3358 		if (stcb->asoc.prsctp_supported) {
3359 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3360 				/* Is it expired? */
3361 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3362 					/* Yes so drop it */
3363 					if (tp1->data != NULL) {
3364 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365 						    SCTP_SO_NOT_LOCKED);
3366 					}
3367 					continue;
3368 				}
3369 			}
3370 
3371 		}
3372 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3373 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3374 			/* we are beyond the tsn in the sack  */
3375 			break;
3376 		}
3377 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3378 			/* either a RESEND, ACKED, or MARKED */
3379 			/* skip */
3380 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3381 				/* Continue strikin FWD-TSN chunks */
3382 				tp1->rec.data.fwd_tsn_cnt++;
3383 			}
3384 			continue;
3385 		}
3386 		/*
3387 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3388 		 */
3389 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3390 			/*
3391 			 * No new acks were receieved for data sent to this
3392 			 * dest. Therefore, according to the SFR algo for
3393 			 * CMT, no data sent to this dest can be marked for
3394 			 * FR using this SACK.
3395 			 */
3396 			continue;
3397 		} else if (tp1->whoTo &&
3398 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3399 			    tp1->whoTo->this_sack_highest_newack) &&
3400 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3401 			/*
3402 			 * CMT: New acks were receieved for data sent to
3403 			 * this dest. But no new acks were seen for data
3404 			 * sent after tp1. Therefore, according to the SFR
3405 			 * algo for CMT, tp1 cannot be marked for FR using
3406 			 * this SACK. This step covers part of the DAC algo
3407 			 * and the HTNA algo as well.
3408 			 */
3409 			continue;
3410 		}
3411 		/*
3412 		 * Here we check to see if we were have already done a FR
3413 		 * and if so we see if the biggest TSN we saw in the sack is
3414 		 * smaller than the recovery point. If so we don't strike
3415 		 * the tsn... otherwise we CAN strike the TSN.
3416 		 */
3417 		/*
3418 		 * @@@ JRI: Check for CMT if (accum_moved &&
3419 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3420 		 * 0)) {
3421 		 */
3422 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3423 			/*
3424 			 * Strike the TSN if in fast-recovery and cum-ack
3425 			 * moved.
3426 			 */
3427 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3428 				sctp_log_fr(biggest_tsn_newly_acked,
3429 				    tp1->rec.data.tsn,
3430 				    tp1->sent,
3431 				    SCTP_FR_LOG_STRIKE_CHUNK);
3432 			}
3433 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3434 				tp1->sent++;
3435 			}
3436 			if ((asoc->sctp_cmt_on_off > 0) &&
3437 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3438 				/*
3439 				 * CMT DAC algorithm: If SACK flag is set to
3440 				 * 0, then lowest_newack test will not pass
3441 				 * because it would have been set to the
3442 				 * cumack earlier. If not already to be
3443 				 * rtx'd, If not a mixed sack and if tp1 is
3444 				 * not between two sacked TSNs, then mark by
3445 				 * one more. NOTE that we are marking by one
3446 				 * additional time since the SACK DAC flag
3447 				 * indicates that two packets have been
3448 				 * received after this missing TSN.
3449 				 */
3450 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3451 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3452 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 						sctp_log_fr(16 + num_dests_sacked,
3454 						    tp1->rec.data.tsn,
3455 						    tp1->sent,
3456 						    SCTP_FR_LOG_STRIKE_CHUNK);
3457 					}
3458 					tp1->sent++;
3459 				}
3460 			}
3461 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3462 		    (asoc->sctp_cmt_on_off == 0)) {
3463 			/*
3464 			 * For those that have done a FR we must take
3465 			 * special consideration if we strike. I.e the
3466 			 * biggest_newly_acked must be higher than the
3467 			 * sending_seq at the time we did the FR.
3468 			 */
3469 			if (
3470 #ifdef SCTP_FR_TO_ALTERNATE
3471 			/*
3472 			 * If FR's go to new networks, then we must only do
3473 			 * this for singly homed asoc's. However if the FR's
3474 			 * go to the same network (Armando's work) then its
3475 			 * ok to FR multiple times.
3476 			 */
3477 			    (asoc->numnets < 2)
3478 #else
3479 			    (1)
3480 #endif
3481 			    ) {
3482 
3483 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3484 				    tp1->rec.data.fast_retran_tsn)) {
3485 					/*
3486 					 * Strike the TSN, since this ack is
3487 					 * beyond where things were when we
3488 					 * did a FR.
3489 					 */
3490 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3491 						sctp_log_fr(biggest_tsn_newly_acked,
3492 						    tp1->rec.data.tsn,
3493 						    tp1->sent,
3494 						    SCTP_FR_LOG_STRIKE_CHUNK);
3495 					}
3496 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3497 						tp1->sent++;
3498 					}
3499 					strike_flag = 1;
3500 					if ((asoc->sctp_cmt_on_off > 0) &&
3501 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3502 						/*
3503 						 * CMT DAC algorithm: If
3504 						 * SACK flag is set to 0,
3505 						 * then lowest_newack test
3506 						 * will not pass because it
3507 						 * would have been set to
3508 						 * the cumack earlier. If
3509 						 * not already to be rtx'd,
3510 						 * If not a mixed sack and
3511 						 * if tp1 is not between two
3512 						 * sacked TSNs, then mark by
3513 						 * one more. NOTE that we
3514 						 * are marking by one
3515 						 * additional time since the
3516 						 * SACK DAC flag indicates
3517 						 * that two packets have
3518 						 * been received after this
3519 						 * missing TSN.
3520 						 */
3521 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3522 						    (num_dests_sacked == 1) &&
3523 						    SCTP_TSN_GT(this_sack_lowest_newack,
3524 						    tp1->rec.data.tsn)) {
3525 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3526 								sctp_log_fr(32 + num_dests_sacked,
3527 								    tp1->rec.data.tsn,
3528 								    tp1->sent,
3529 								    SCTP_FR_LOG_STRIKE_CHUNK);
3530 							}
3531 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3532 								tp1->sent++;
3533 							}
3534 						}
3535 					}
3536 				}
3537 			}
3538 			/*
3539 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3540 			 * algo covers HTNA.
3541 			 */
3542 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3543 		    biggest_tsn_newly_acked)) {
3544 			/*
3545 			 * We don't strike these: This is the  HTNA
3546 			 * algorithm i.e. we don't strike If our TSN is
3547 			 * larger than the Highest TSN Newly Acked.
3548 			 */
3549 			;
3550 		} else {
3551 			/* Strike the TSN */
3552 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3553 				sctp_log_fr(biggest_tsn_newly_acked,
3554 				    tp1->rec.data.tsn,
3555 				    tp1->sent,
3556 				    SCTP_FR_LOG_STRIKE_CHUNK);
3557 			}
3558 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3559 				tp1->sent++;
3560 			}
3561 			if ((asoc->sctp_cmt_on_off > 0) &&
3562 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3563 				/*
3564 				 * CMT DAC algorithm: If SACK flag is set to
3565 				 * 0, then lowest_newack test will not pass
3566 				 * because it would have been set to the
3567 				 * cumack earlier. If not already to be
3568 				 * rtx'd, If not a mixed sack and if tp1 is
3569 				 * not between two sacked TSNs, then mark by
3570 				 * one more. NOTE that we are marking by one
3571 				 * additional time since the SACK DAC flag
3572 				 * indicates that two packets have been
3573 				 * received after this missing TSN.
3574 				 */
3575 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3576 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3577 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3578 						sctp_log_fr(48 + num_dests_sacked,
3579 						    tp1->rec.data.tsn,
3580 						    tp1->sent,
3581 						    SCTP_FR_LOG_STRIKE_CHUNK);
3582 					}
3583 					tp1->sent++;
3584 				}
3585 			}
3586 		}
3587 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3588 			struct sctp_nets *alt;
3589 
3590 			/* fix counts and things */
3591 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3592 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3593 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3594 				    tp1->book_size,
3595 				    (uint32_t)(uintptr_t)tp1->whoTo,
3596 				    tp1->rec.data.tsn);
3597 			}
3598 			if (tp1->whoTo) {
3599 				tp1->whoTo->net_ack++;
3600 				sctp_flight_size_decrease(tp1);
3601 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3602 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3603 					    tp1);
3604 				}
3605 			}
3606 
3607 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3608 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3609 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3610 			}
3611 			/* add back to the rwnd */
3612 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3613 
3614 			/* remove from the total flight */
3615 			sctp_total_flight_decrease(stcb, tp1);
3616 
3617 			if ((stcb->asoc.prsctp_supported) &&
3618 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3619 				/*
3620 				 * Has it been retransmitted tv_sec times? -
3621 				 * we store the retran count there.
3622 				 */
3623 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3624 					/* Yes, so drop it */
3625 					if (tp1->data != NULL) {
3626 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3627 						    SCTP_SO_NOT_LOCKED);
3628 					}
3629 					/* Make sure to flag we had a FR */
3630 					if (tp1->whoTo != NULL) {
3631 						tp1->whoTo->net_ack++;
3632 					}
3633 					continue;
3634 				}
3635 			}
3636 			/*
3637 			 * SCTP_PRINTF("OK, we are now ready to FR this
3638 			 * guy\n");
3639 			 */
3640 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3641 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3642 				    0, SCTP_FR_MARKED);
3643 			}
3644 			if (strike_flag) {
3645 				/* This is a subsequent FR */
3646 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3647 			}
3648 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3649 			if (asoc->sctp_cmt_on_off > 0) {
3650 				/*
3651 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3652 				 * If CMT is being used, then pick dest with
3653 				 * largest ssthresh for any retransmission.
3654 				 */
3655 				tp1->no_fr_allowed = 1;
3656 				alt = tp1->whoTo;
3657 				/* sa_ignore NO_NULL_CHK */
3658 				if (asoc->sctp_cmt_pf > 0) {
3659 					/*
3660 					 * JRS 5/18/07 - If CMT PF is on,
3661 					 * use the PF version of
3662 					 * find_alt_net()
3663 					 */
3664 					alt = sctp_find_alternate_net(stcb, alt, 2);
3665 				} else {
3666 					/*
3667 					 * JRS 5/18/07 - If only CMT is on,
3668 					 * use the CMT version of
3669 					 * find_alt_net()
3670 					 */
3671 					/* sa_ignore NO_NULL_CHK */
3672 					alt = sctp_find_alternate_net(stcb, alt, 1);
3673 				}
3674 				if (alt == NULL) {
3675 					alt = tp1->whoTo;
3676 				}
3677 				/*
3678 				 * CUCv2: If a different dest is picked for
3679 				 * the retransmission, then new
3680 				 * (rtx-)pseudo_cumack needs to be tracked
3681 				 * for orig dest. Let CUCv2 track new (rtx-)
3682 				 * pseudo-cumack always.
3683 				 */
3684 				if (tp1->whoTo) {
3685 					tp1->whoTo->find_pseudo_cumack = 1;
3686 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3687 				}
3688 
3689 			} else {	/* CMT is OFF */
3690 
3691 #ifdef SCTP_FR_TO_ALTERNATE
3692 				/* Can we find an alternate? */
3693 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3694 #else
3695 				/*
3696 				 * default behavior is to NOT retransmit
3697 				 * FR's to an alternate. Armando Caro's
3698 				 * paper details why.
3699 				 */
3700 				alt = tp1->whoTo;
3701 #endif
3702 			}
3703 
3704 			tp1->rec.data.doing_fast_retransmit = 1;
3705 			tot_retrans++;
3706 			/* mark the sending seq for possible subsequent FR's */
3707 			/*
3708 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3709 			 * (uint32_t)tpi->rec.data.tsn);
3710 			 */
3711 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3712 				/*
3713 				 * If the queue of send is empty then its
3714 				 * the next sequence number that will be
3715 				 * assigned so we subtract one from this to
3716 				 * get the one we last sent.
3717 				 */
3718 				tp1->rec.data.fast_retran_tsn = sending_seq;
3719 			} else {
3720 				/*
3721 				 * If there are chunks on the send queue
3722 				 * (unsent data that has made it from the
3723 				 * stream queues but not out the door, we
3724 				 * take the first one (which will have the
3725 				 * lowest TSN) and subtract one to get the
3726 				 * one we last sent.
3727 				 */
3728 				struct sctp_tmit_chunk *ttt;
3729 
3730 				ttt = TAILQ_FIRST(&asoc->send_queue);
3731 				tp1->rec.data.fast_retran_tsn =
3732 				    ttt->rec.data.tsn;
3733 			}
3734 
3735 			if (tp1->do_rtt) {
3736 				/*
3737 				 * this guy had a RTO calculation pending on
3738 				 * it, cancel it
3739 				 */
3740 				if ((tp1->whoTo != NULL) &&
3741 				    (tp1->whoTo->rto_needed == 0)) {
3742 					tp1->whoTo->rto_needed = 1;
3743 				}
3744 				tp1->do_rtt = 0;
3745 			}
3746 			if (alt != tp1->whoTo) {
3747 				/* yes, there is an alternate. */
3748 				sctp_free_remote_addr(tp1->whoTo);
3749 				/* sa_ignore FREED_MEMORY */
3750 				tp1->whoTo = alt;
3751 				atomic_add_int(&alt->ref_count, 1);
3752 			}
3753 		}
3754 	}
3755 }
3756 
3757 struct sctp_tmit_chunk *
3758 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3759     struct sctp_association *asoc)
3760 {
3761 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3762 	struct timeval now;
3763 	int now_filled = 0;
3764 
3765 	if (asoc->prsctp_supported == 0) {
3766 		return (NULL);
3767 	}
3768 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3769 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3770 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3771 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3772 			/* no chance to advance, out of here */
3773 			break;
3774 		}
3775 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3776 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3778 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3779 				    asoc->advanced_peer_ack_point,
3780 				    tp1->rec.data.tsn, 0, 0);
3781 			}
3782 		}
3783 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3784 			/*
3785 			 * We can't fwd-tsn past any that are reliable aka
3786 			 * retransmitted until the asoc fails.
3787 			 */
3788 			break;
3789 		}
3790 		if (!now_filled) {
3791 			(void)SCTP_GETTIME_TIMEVAL(&now);
3792 			now_filled = 1;
3793 		}
3794 		/*
3795 		 * now we got a chunk which is marked for another
3796 		 * retransmission to a PR-stream but has run out its chances
3797 		 * already maybe OR has been marked to skip now. Can we skip
3798 		 * it if its a resend?
3799 		 */
3800 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3801 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3802 			/*
3803 			 * Now is this one marked for resend and its time is
3804 			 * now up?
3805 			 */
3806 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3807 				/* Yes so drop it */
3808 				if (tp1->data) {
3809 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3810 					    1, SCTP_SO_NOT_LOCKED);
3811 				}
3812 			} else {
3813 				/*
3814 				 * No, we are done when hit one for resend
3815 				 * whos time as not expired.
3816 				 */
3817 				break;
3818 			}
3819 		}
3820 		/*
3821 		 * Ok now if this chunk is marked to drop it we can clean up
3822 		 * the chunk, advance our peer ack point and we can check
3823 		 * the next chunk.
3824 		 */
3825 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3826 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3827 			/* advance PeerAckPoint goes forward */
3828 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3829 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3830 				a_adv = tp1;
3831 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3832 				/* No update but we do save the chk */
3833 				a_adv = tp1;
3834 			}
3835 		} else {
3836 			/*
3837 			 * If it is still in RESEND we can advance no
3838 			 * further
3839 			 */
3840 			break;
3841 		}
3842 	}
3843 	return (a_adv);
3844 }
3845 
3846 static int
3847 sctp_fs_audit(struct sctp_association *asoc)
3848 {
3849 	struct sctp_tmit_chunk *chk;
3850 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3851 	int ret;
3852 #ifndef INVARIANTS
3853 	int entry_flight, entry_cnt;
3854 #endif
3855 
3856 	ret = 0;
3857 #ifndef INVARIANTS
3858 	entry_flight = asoc->total_flight;
3859 	entry_cnt = asoc->total_flight_count;
3860 #endif
3861 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3862 		return (0);
3863 
3864 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3865 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3866 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3867 			    chk->rec.data.tsn,
3868 			    chk->send_size,
3869 			    chk->snd_count);
3870 			inflight++;
3871 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3872 			resend++;
3873 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3874 			inbetween++;
3875 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3876 			above++;
3877 		} else {
3878 			acked++;
3879 		}
3880 	}
3881 
3882 	if ((inflight > 0) || (inbetween > 0)) {
3883 #ifdef INVARIANTS
3884 		panic("Flight size-express incorrect? \n");
3885 #else
3886 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3887 		    entry_flight, entry_cnt);
3888 
3889 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3890 		    inflight, inbetween, resend, above, acked);
3891 		ret = 1;
3892 #endif
3893 	}
3894 	return (ret);
3895 }
3896 
3897 
3898 static void
3899 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3900     struct sctp_association *asoc,
3901     struct sctp_tmit_chunk *tp1)
3902 {
3903 	tp1->window_probe = 0;
3904 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3905 		/* TSN's skipped we do NOT move back. */
3906 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3907 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3908 		    tp1->book_size,
3909 		    (uint32_t)(uintptr_t)tp1->whoTo,
3910 		    tp1->rec.data.tsn);
3911 		return;
3912 	}
3913 	/* First setup this by shrinking flight */
3914 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3915 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3916 		    tp1);
3917 	}
3918 	sctp_flight_size_decrease(tp1);
3919 	sctp_total_flight_decrease(stcb, tp1);
3920 	/* Now mark for resend */
3921 	tp1->sent = SCTP_DATAGRAM_RESEND;
3922 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3923 
3924 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3926 		    tp1->whoTo->flight_size,
3927 		    tp1->book_size,
3928 		    (uint32_t)(uintptr_t)tp1->whoTo,
3929 		    tp1->rec.data.tsn);
3930 	}
3931 }
3932 
3933 void
3934 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3935     uint32_t rwnd, int *abort_now, int ecne_seen)
3936 {
3937 	struct sctp_nets *net;
3938 	struct sctp_association *asoc;
3939 	struct sctp_tmit_chunk *tp1, *tp2;
3940 	uint32_t old_rwnd;
3941 	int win_probe_recovery = 0;
3942 	int win_probe_recovered = 0;
3943 	int j, done_once = 0;
3944 	int rto_ok = 1;
3945 	uint32_t send_s;
3946 
3947 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3948 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3949 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3950 	}
3951 	SCTP_TCB_LOCK_ASSERT(stcb);
3952 #ifdef SCTP_ASOCLOG_OF_TSNS
3953 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3954 	stcb->asoc.cumack_log_at++;
3955 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3956 		stcb->asoc.cumack_log_at = 0;
3957 	}
3958 #endif
3959 	asoc = &stcb->asoc;
3960 	old_rwnd = asoc->peers_rwnd;
3961 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3962 		/* old ack */
3963 		return;
3964 	} else if (asoc->last_acked_seq == cumack) {
3965 		/* Window update sack */
3966 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3967 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3968 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3969 			/* SWS sender side engages */
3970 			asoc->peers_rwnd = 0;
3971 		}
3972 		if (asoc->peers_rwnd > old_rwnd) {
3973 			goto again;
3974 		}
3975 		return;
3976 	}
3977 
3978 	/* First setup for CC stuff */
3979 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3980 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3981 			/* Drag along the window_tsn for cwr's */
3982 			net->cwr_window_tsn = cumack;
3983 		}
3984 		net->prev_cwnd = net->cwnd;
3985 		net->net_ack = 0;
3986 		net->net_ack2 = 0;
3987 
3988 		/*
3989 		 * CMT: Reset CUC and Fast recovery algo variables before
3990 		 * SACK processing
3991 		 */
3992 		net->new_pseudo_cumack = 0;
3993 		net->will_exit_fast_recovery = 0;
3994 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3995 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3996 		}
3997 	}
3998 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3999 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4000 		    sctpchunk_listhead);
4001 		send_s = tp1->rec.data.tsn + 1;
4002 	} else {
4003 		send_s = asoc->sending_seq;
4004 	}
4005 	if (SCTP_TSN_GE(cumack, send_s)) {
4006 		struct mbuf *op_err;
4007 		char msg[SCTP_DIAG_INFO_LEN];
4008 
4009 		*abort_now = 1;
4010 		/* XXX */
4011 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4012 		    cumack, send_s);
4013 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4014 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4015 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4016 		return;
4017 	}
4018 	asoc->this_sack_highest_gap = cumack;
4019 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4020 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4021 		    stcb->asoc.overall_error_count,
4022 		    0,
4023 		    SCTP_FROM_SCTP_INDATA,
4024 		    __LINE__);
4025 	}
4026 	stcb->asoc.overall_error_count = 0;
4027 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4028 		/* process the new consecutive TSN first */
4029 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4030 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4031 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4032 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4033 				}
4034 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4035 					/*
4036 					 * If it is less than ACKED, it is
4037 					 * now no-longer in flight. Higher
4038 					 * values may occur during marking
4039 					 */
4040 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4041 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4042 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4043 							    tp1->whoTo->flight_size,
4044 							    tp1->book_size,
4045 							    (uint32_t)(uintptr_t)tp1->whoTo,
4046 							    tp1->rec.data.tsn);
4047 						}
4048 						sctp_flight_size_decrease(tp1);
4049 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4050 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4051 							    tp1);
4052 						}
4053 						/* sa_ignore NO_NULL_CHK */
4054 						sctp_total_flight_decrease(stcb, tp1);
4055 					}
4056 					tp1->whoTo->net_ack += tp1->send_size;
4057 					if (tp1->snd_count < 2) {
4058 						/*
4059 						 * True non-retransmited
4060 						 * chunk
4061 						 */
4062 						tp1->whoTo->net_ack2 +=
4063 						    tp1->send_size;
4064 
4065 						/* update RTO too? */
4066 						if (tp1->do_rtt) {
4067 							if (rto_ok) {
4068 								tp1->whoTo->RTO =
4069 								/*
4070 								 * sa_ignore
4071 								 * NO_NULL_CHK
4072 								 */
4073 								    sctp_calculate_rto(stcb,
4074 								    asoc, tp1->whoTo,
4075 								    &tp1->sent_rcv_time,
4076 								    SCTP_RTT_FROM_DATA);
4077 								rto_ok = 0;
4078 							}
4079 							if (tp1->whoTo->rto_needed == 0) {
4080 								tp1->whoTo->rto_needed = 1;
4081 							}
4082 							tp1->do_rtt = 0;
4083 						}
4084 					}
4085 					/*
4086 					 * CMT: CUCv2 algorithm. From the
4087 					 * cumack'd TSNs, for each TSN being
4088 					 * acked for the first time, set the
4089 					 * following variables for the
4090 					 * corresp destination.
4091 					 * new_pseudo_cumack will trigger a
4092 					 * cwnd update.
4093 					 * find_(rtx_)pseudo_cumack will
4094 					 * trigger search for the next
4095 					 * expected (rtx-)pseudo-cumack.
4096 					 */
4097 					tp1->whoTo->new_pseudo_cumack = 1;
4098 					tp1->whoTo->find_pseudo_cumack = 1;
4099 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4100 
4101 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4102 						/* sa_ignore NO_NULL_CHK */
4103 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4104 					}
4105 				}
4106 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4107 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4108 				}
4109 				if (tp1->rec.data.chunk_was_revoked) {
4110 					/* deflate the cwnd */
4111 					tp1->whoTo->cwnd -= tp1->book_size;
4112 					tp1->rec.data.chunk_was_revoked = 0;
4113 				}
4114 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4115 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4116 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4117 #ifdef INVARIANTS
4118 					} else {
4119 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4120 #endif
4121 					}
4122 				}
4123 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4124 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4125 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4126 					asoc->trigger_reset = 1;
4127 				}
4128 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4129 				if (tp1->data) {
4130 					/* sa_ignore NO_NULL_CHK */
4131 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4132 					sctp_m_freem(tp1->data);
4133 					tp1->data = NULL;
4134 				}
4135 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136 					sctp_log_sack(asoc->last_acked_seq,
4137 					    cumack,
4138 					    tp1->rec.data.tsn,
4139 					    0,
4140 					    0,
4141 					    SCTP_LOG_FREE_SENT);
4142 				}
4143 				asoc->sent_queue_cnt--;
4144 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4145 			} else {
4146 				break;
4147 			}
4148 		}
4149 
4150 	}
4151 	/* sa_ignore NO_NULL_CHK */
4152 	if (stcb->sctp_socket) {
4153 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4154 		struct socket *so;
4155 
4156 #endif
4157 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4158 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4159 			/* sa_ignore NO_NULL_CHK */
4160 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4161 		}
4162 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4163 		so = SCTP_INP_SO(stcb->sctp_ep);
4164 		atomic_add_int(&stcb->asoc.refcnt, 1);
4165 		SCTP_TCB_UNLOCK(stcb);
4166 		SCTP_SOCKET_LOCK(so, 1);
4167 		SCTP_TCB_LOCK(stcb);
4168 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4169 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4170 			/* assoc was freed while we were unlocked */
4171 			SCTP_SOCKET_UNLOCK(so, 1);
4172 			return;
4173 		}
4174 #endif
4175 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4176 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4177 		SCTP_SOCKET_UNLOCK(so, 1);
4178 #endif
4179 	} else {
4180 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4181 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4182 		}
4183 	}
4184 
4185 	/* JRS - Use the congestion control given in the CC module */
4186 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4187 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4188 			if (net->net_ack2 > 0) {
4189 				/*
4190 				 * Karn's rule applies to clearing error
4191 				 * count, this is optional.
4192 				 */
4193 				net->error_count = 0;
4194 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4195 					/* addr came good */
4196 					net->dest_state |= SCTP_ADDR_REACHABLE;
4197 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4198 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4199 				}
4200 				if (net == stcb->asoc.primary_destination) {
4201 					if (stcb->asoc.alternate) {
4202 						/*
4203 						 * release the alternate,
4204 						 * primary is good
4205 						 */
4206 						sctp_free_remote_addr(stcb->asoc.alternate);
4207 						stcb->asoc.alternate = NULL;
4208 					}
4209 				}
4210 				if (net->dest_state & SCTP_ADDR_PF) {
4211 					net->dest_state &= ~SCTP_ADDR_PF;
4212 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4213 					    stcb->sctp_ep, stcb, net,
4214 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4215 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4216 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4217 					/* Done with this net */
4218 					net->net_ack = 0;
4219 				}
4220 				/* restore any doubled timers */
4221 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4222 				if (net->RTO < stcb->asoc.minrto) {
4223 					net->RTO = stcb->asoc.minrto;
4224 				}
4225 				if (net->RTO > stcb->asoc.maxrto) {
4226 					net->RTO = stcb->asoc.maxrto;
4227 				}
4228 			}
4229 		}
4230 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4231 	}
4232 	asoc->last_acked_seq = cumack;
4233 
4234 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4235 		/* nothing left in-flight */
4236 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4237 			net->flight_size = 0;
4238 			net->partial_bytes_acked = 0;
4239 		}
4240 		asoc->total_flight = 0;
4241 		asoc->total_flight_count = 0;
4242 	}
4243 
4244 	/* RWND update */
4245 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4246 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4247 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4248 		/* SWS sender side engages */
4249 		asoc->peers_rwnd = 0;
4250 	}
4251 	if (asoc->peers_rwnd > old_rwnd) {
4252 		win_probe_recovery = 1;
4253 	}
4254 	/* Now assure a timer where data is queued at */
4255 again:
4256 	j = 0;
4257 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4258 		if (win_probe_recovery && (net->window_probe)) {
4259 			win_probe_recovered = 1;
4260 			/*
4261 			 * Find first chunk that was used with window probe
4262 			 * and clear the sent
4263 			 */
4264 			/* sa_ignore FREED_MEMORY */
4265 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4266 				if (tp1->window_probe) {
4267 					/* move back to data send queue */
4268 					sctp_window_probe_recovery(stcb, asoc, tp1);
4269 					break;
4270 				}
4271 			}
4272 		}
4273 		if (net->flight_size) {
4274 			j++;
4275 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4276 			if (net->window_probe) {
4277 				net->window_probe = 0;
4278 			}
4279 		} else {
4280 			if (net->window_probe) {
4281 				/*
4282 				 * In window probes we must assure a timer
4283 				 * is still running there
4284 				 */
4285 				net->window_probe = 0;
4286 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4287 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4288 				}
4289 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4290 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4291 				    stcb, net,
4292 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4293 			}
4294 		}
4295 	}
4296 	if ((j == 0) &&
4297 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4298 	    (asoc->sent_queue_retran_cnt == 0) &&
4299 	    (win_probe_recovered == 0) &&
4300 	    (done_once == 0)) {
4301 		/*
4302 		 * huh, this should not happen unless all packets are
4303 		 * PR-SCTP and marked to skip of course.
4304 		 */
4305 		if (sctp_fs_audit(asoc)) {
4306 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4307 				net->flight_size = 0;
4308 			}
4309 			asoc->total_flight = 0;
4310 			asoc->total_flight_count = 0;
4311 			asoc->sent_queue_retran_cnt = 0;
4312 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4313 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4314 					sctp_flight_size_increase(tp1);
4315 					sctp_total_flight_increase(stcb, tp1);
4316 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4317 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4318 				}
4319 			}
4320 		}
4321 		done_once = 1;
4322 		goto again;
4323 	}
4324 	/**********************************/
4325 	/* Now what about shutdown issues */
4326 	/**********************************/
4327 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4328 		/* nothing left on sendqueue.. consider done */
4329 		/* clean up */
4330 		if ((asoc->stream_queue_cnt == 1) &&
4331 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4332 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4333 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4334 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4335 		}
4336 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4337 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4338 		    (asoc->stream_queue_cnt == 1) &&
4339 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4340 			struct mbuf *op_err;
4341 
4342 			*abort_now = 1;
4343 			/* XXX */
4344 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4345 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4346 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4347 			return;
4348 		}
4349 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4350 		    (asoc->stream_queue_cnt == 0)) {
4351 			struct sctp_nets *netp;
4352 
4353 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4354 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4355 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4356 			}
4357 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4358 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4359 			sctp_stop_timers_for_shutdown(stcb);
4360 			if (asoc->alternate) {
4361 				netp = asoc->alternate;
4362 			} else {
4363 				netp = asoc->primary_destination;
4364 			}
4365 			sctp_send_shutdown(stcb, netp);
4366 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4367 			    stcb->sctp_ep, stcb, netp);
4368 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4369 			    stcb->sctp_ep, stcb, netp);
4370 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4371 		    (asoc->stream_queue_cnt == 0)) {
4372 			struct sctp_nets *netp;
4373 
4374 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4375 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4376 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4377 			sctp_stop_timers_for_shutdown(stcb);
4378 			if (asoc->alternate) {
4379 				netp = asoc->alternate;
4380 			} else {
4381 				netp = asoc->primary_destination;
4382 			}
4383 			sctp_send_shutdown_ack(stcb, netp);
4384 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4385 			    stcb->sctp_ep, stcb, netp);
4386 		}
4387 	}
4388 	/*********************************************/
4389 	/* Here we perform PR-SCTP procedures        */
4390 	/* (section 4.2)                             */
4391 	/*********************************************/
4392 	/* C1. update advancedPeerAckPoint */
4393 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4394 		asoc->advanced_peer_ack_point = cumack;
4395 	}
4396 	/* PR-Sctp issues need to be addressed too */
4397 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4398 		struct sctp_tmit_chunk *lchk;
4399 		uint32_t old_adv_peer_ack_point;
4400 
4401 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4402 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4403 		/* C3. See if we need to send a Fwd-TSN */
4404 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4405 			/*
4406 			 * ISSUE with ECN, see FWD-TSN processing.
4407 			 */
4408 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4409 				send_forward_tsn(stcb, asoc);
4410 			} else if (lchk) {
4411 				/* try to FR fwd-tsn's that get lost too */
4412 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4413 					send_forward_tsn(stcb, asoc);
4414 				}
4415 			}
4416 		}
4417 		if (lchk) {
4418 			/* Assure a timer is up */
4419 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4420 			    stcb->sctp_ep, stcb, lchk->whoTo);
4421 		}
4422 	}
4423 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4424 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4425 		    rwnd,
4426 		    stcb->asoc.peers_rwnd,
4427 		    stcb->asoc.total_flight,
4428 		    stcb->asoc.total_output_queue_size);
4429 	}
4430 }
4431 
4432 void
4433 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4434     struct sctp_tcb *stcb,
4435     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4436     int *abort_now, uint8_t flags,
4437     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4438 {
4439 	struct sctp_association *asoc;
4440 	struct sctp_tmit_chunk *tp1, *tp2;
4441 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4442 	uint16_t wake_him = 0;
4443 	uint32_t send_s = 0;
4444 	long j;
4445 	int accum_moved = 0;
4446 	int will_exit_fast_recovery = 0;
4447 	uint32_t a_rwnd, old_rwnd;
4448 	int win_probe_recovery = 0;
4449 	int win_probe_recovered = 0;
4450 	struct sctp_nets *net = NULL;
4451 	int done_once;
4452 	int rto_ok = 1;
4453 	uint8_t reneged_all = 0;
4454 	uint8_t cmt_dac_flag;
4455 
4456 	/*
4457 	 * we take any chance we can to service our queues since we cannot
4458 	 * get awoken when the socket is read from :<
4459 	 */
4460 	/*
4461 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4462 	 * old sack, if so discard. 2) If there is nothing left in the send
4463 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4464 	 * too, update any rwnd change and verify no timers are running.
4465 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4466 	 * moved process these first and note that it moved. 4) Process any
4467 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4468 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4469 	 * sync up flightsizes and things, stop all timers and also check
4470 	 * for shutdown_pending state. If so then go ahead and send off the
4471 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4472 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4473 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4474 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4475 	 * if in shutdown_recv state.
4476 	 */
4477 	SCTP_TCB_LOCK_ASSERT(stcb);
4478 	/* CMT DAC algo */
4479 	this_sack_lowest_newack = 0;
4480 	SCTP_STAT_INCR(sctps_slowpath_sack);
4481 	last_tsn = cum_ack;
4482 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4483 #ifdef SCTP_ASOCLOG_OF_TSNS
4484 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4485 	stcb->asoc.cumack_log_at++;
4486 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4487 		stcb->asoc.cumack_log_at = 0;
4488 	}
4489 #endif
4490 	a_rwnd = rwnd;
4491 
4492 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4493 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4494 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4495 	}
4496 
4497 	old_rwnd = stcb->asoc.peers_rwnd;
4498 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4499 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4500 		    stcb->asoc.overall_error_count,
4501 		    0,
4502 		    SCTP_FROM_SCTP_INDATA,
4503 		    __LINE__);
4504 	}
4505 	stcb->asoc.overall_error_count = 0;
4506 	asoc = &stcb->asoc;
4507 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4508 		sctp_log_sack(asoc->last_acked_seq,
4509 		    cum_ack,
4510 		    0,
4511 		    num_seg,
4512 		    num_dup,
4513 		    SCTP_LOG_NEW_SACK);
4514 	}
4515 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4516 		uint16_t i;
4517 		uint32_t *dupdata, dblock;
4518 
4519 		for (i = 0; i < num_dup; i++) {
4520 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4521 			    sizeof(uint32_t), (uint8_t *)&dblock);
4522 			if (dupdata == NULL) {
4523 				break;
4524 			}
4525 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4526 		}
4527 	}
4528 	/* reality check */
4529 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4530 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4531 		    sctpchunk_listhead);
4532 		send_s = tp1->rec.data.tsn + 1;
4533 	} else {
4534 		tp1 = NULL;
4535 		send_s = asoc->sending_seq;
4536 	}
4537 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4538 		struct mbuf *op_err;
4539 		char msg[SCTP_DIAG_INFO_LEN];
4540 
4541 		/*
4542 		 * no way, we have not even sent this TSN out yet. Peer is
4543 		 * hopelessly messed up with us.
4544 		 */
4545 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4546 		    cum_ack, send_s);
4547 		if (tp1) {
4548 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4549 			    tp1->rec.data.tsn, (void *)tp1);
4550 		}
4551 hopeless_peer:
4552 		*abort_now = 1;
4553 		/* XXX */
4554 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4555 		    cum_ack, send_s);
4556 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4557 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4558 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4559 		return;
4560 	}
4561 	/**********************/
4562 	/* 1) check the range */
4563 	/**********************/
4564 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4565 		/* acking something behind */
4566 		return;
4567 	}
4568 
4569 	/* update the Rwnd of the peer */
4570 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4571 	    TAILQ_EMPTY(&asoc->send_queue) &&
4572 	    (asoc->stream_queue_cnt == 0)) {
4573 		/* nothing left on send/sent and strmq */
4574 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4575 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4576 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4577 		}
4578 		asoc->peers_rwnd = a_rwnd;
4579 		if (asoc->sent_queue_retran_cnt) {
4580 			asoc->sent_queue_retran_cnt = 0;
4581 		}
4582 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4583 			/* SWS sender side engages */
4584 			asoc->peers_rwnd = 0;
4585 		}
4586 		/* stop any timers */
4587 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4589 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4590 			net->partial_bytes_acked = 0;
4591 			net->flight_size = 0;
4592 		}
4593 		asoc->total_flight = 0;
4594 		asoc->total_flight_count = 0;
4595 		return;
4596 	}
4597 	/*
4598 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4599 	 * things. The total byte count acked is tracked in netAckSz AND
4600 	 * netAck2 is used to track the total bytes acked that are un-
4601 	 * amibguious and were never retransmitted. We track these on a per
4602 	 * destination address basis.
4603 	 */
4604 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4605 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4606 			/* Drag along the window_tsn for cwr's */
4607 			net->cwr_window_tsn = cum_ack;
4608 		}
4609 		net->prev_cwnd = net->cwnd;
4610 		net->net_ack = 0;
4611 		net->net_ack2 = 0;
4612 
4613 		/*
4614 		 * CMT: Reset CUC and Fast recovery algo variables before
4615 		 * SACK processing
4616 		 */
4617 		net->new_pseudo_cumack = 0;
4618 		net->will_exit_fast_recovery = 0;
4619 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4620 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4621 		}
4622 
4623 		/*
4624 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4625 		 * to be greater than the cumack. Also reset saw_newack to 0
4626 		 * for all dests.
4627 		 */
4628 		net->saw_newack = 0;
4629 		net->this_sack_highest_newack = last_tsn;
4630 	}
4631 	/* process the new consecutive TSN first */
4632 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4633 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4634 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4635 				accum_moved = 1;
4636 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4637 					/*
4638 					 * If it is less than ACKED, it is
4639 					 * now no-longer in flight. Higher
4640 					 * values may occur during marking
4641 					 */
4642 					if ((tp1->whoTo->dest_state &
4643 					    SCTP_ADDR_UNCONFIRMED) &&
4644 					    (tp1->snd_count < 2)) {
4645 						/*
4646 						 * If there was no retran
4647 						 * and the address is
4648 						 * un-confirmed and we sent
4649 						 * there and are now
4650 						 * sacked.. its confirmed,
4651 						 * mark it so.
4652 						 */
4653 						tp1->whoTo->dest_state &=
4654 						    ~SCTP_ADDR_UNCONFIRMED;
4655 					}
4656 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4657 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4658 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4659 							    tp1->whoTo->flight_size,
4660 							    tp1->book_size,
4661 							    (uint32_t)(uintptr_t)tp1->whoTo,
4662 							    tp1->rec.data.tsn);
4663 						}
4664 						sctp_flight_size_decrease(tp1);
4665 						sctp_total_flight_decrease(stcb, tp1);
4666 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4667 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4668 							    tp1);
4669 						}
4670 					}
4671 					tp1->whoTo->net_ack += tp1->send_size;
4672 
4673 					/* CMT SFR and DAC algos */
4674 					this_sack_lowest_newack = tp1->rec.data.tsn;
4675 					tp1->whoTo->saw_newack = 1;
4676 
4677 					if (tp1->snd_count < 2) {
4678 						/*
4679 						 * True non-retransmited
4680 						 * chunk
4681 						 */
4682 						tp1->whoTo->net_ack2 +=
4683 						    tp1->send_size;
4684 
4685 						/* update RTO too? */
4686 						if (tp1->do_rtt) {
4687 							if (rto_ok) {
4688 								tp1->whoTo->RTO =
4689 								    sctp_calculate_rto(stcb,
4690 								    asoc, tp1->whoTo,
4691 								    &tp1->sent_rcv_time,
4692 								    SCTP_RTT_FROM_DATA);
4693 								rto_ok = 0;
4694 							}
4695 							if (tp1->whoTo->rto_needed == 0) {
4696 								tp1->whoTo->rto_needed = 1;
4697 							}
4698 							tp1->do_rtt = 0;
4699 						}
4700 					}
4701 					/*
4702 					 * CMT: CUCv2 algorithm. From the
4703 					 * cumack'd TSNs, for each TSN being
4704 					 * acked for the first time, set the
4705 					 * following variables for the
4706 					 * corresp destination.
4707 					 * new_pseudo_cumack will trigger a
4708 					 * cwnd update.
4709 					 * find_(rtx_)pseudo_cumack will
4710 					 * trigger search for the next
4711 					 * expected (rtx-)pseudo-cumack.
4712 					 */
4713 					tp1->whoTo->new_pseudo_cumack = 1;
4714 					tp1->whoTo->find_pseudo_cumack = 1;
4715 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4716 
4717 
4718 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4719 						sctp_log_sack(asoc->last_acked_seq,
4720 						    cum_ack,
4721 						    tp1->rec.data.tsn,
4722 						    0,
4723 						    0,
4724 						    SCTP_LOG_TSN_ACKED);
4725 					}
4726 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4727 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4728 					}
4729 				}
4730 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4731 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4732 #ifdef SCTP_AUDITING_ENABLED
4733 					sctp_audit_log(0xB3,
4734 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4735 #endif
4736 				}
4737 				if (tp1->rec.data.chunk_was_revoked) {
4738 					/* deflate the cwnd */
4739 					tp1->whoTo->cwnd -= tp1->book_size;
4740 					tp1->rec.data.chunk_was_revoked = 0;
4741 				}
4742 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4743 					tp1->sent = SCTP_DATAGRAM_ACKED;
4744 				}
4745 			}
4746 		} else {
4747 			break;
4748 		}
4749 	}
4750 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4751 	/* always set this up to cum-ack */
4752 	asoc->this_sack_highest_gap = last_tsn;
4753 
4754 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4755 
4756 		/*
4757 		 * thisSackHighestGap will increase while handling NEW
4758 		 * segments this_sack_highest_newack will increase while
4759 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4760 		 * used for CMT DAC algo. saw_newack will also change.
4761 		 */
4762 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4763 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4764 		    num_seg, num_nr_seg, &rto_ok)) {
4765 			wake_him++;
4766 		}
4767 		/*
4768 		 * validate the biggest_tsn_acked in the gap acks if strict
4769 		 * adherence is wanted.
4770 		 */
4771 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4772 			/*
4773 			 * peer is either confused or we are under attack.
4774 			 * We must abort.
4775 			 */
4776 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4777 			    biggest_tsn_acked, send_s);
4778 			goto hopeless_peer;
4779 		}
4780 	}
4781 	/*******************************************/
4782 	/* cancel ALL T3-send timer if accum moved */
4783 	/*******************************************/
4784 	if (asoc->sctp_cmt_on_off > 0) {
4785 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4786 			if (net->new_pseudo_cumack)
4787 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4788 				    stcb, net,
4789 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4790 
4791 		}
4792 	} else {
4793 		if (accum_moved) {
4794 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4795 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4796 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4797 			}
4798 		}
4799 	}
4800 	/********************************************/
4801 	/* drop the acked chunks from the sentqueue */
4802 	/********************************************/
4803 	asoc->last_acked_seq = cum_ack;
4804 
4805 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4806 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4807 			break;
4808 		}
4809 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4810 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4811 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4812 #ifdef INVARIANTS
4813 			} else {
4814 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4815 #endif
4816 			}
4817 		}
4818 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4819 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4820 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4821 			asoc->trigger_reset = 1;
4822 		}
4823 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4824 		if (PR_SCTP_ENABLED(tp1->flags)) {
4825 			if (asoc->pr_sctp_cnt != 0)
4826 				asoc->pr_sctp_cnt--;
4827 		}
4828 		asoc->sent_queue_cnt--;
4829 		if (tp1->data) {
4830 			/* sa_ignore NO_NULL_CHK */
4831 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4832 			sctp_m_freem(tp1->data);
4833 			tp1->data = NULL;
4834 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4835 				asoc->sent_queue_cnt_removeable--;
4836 			}
4837 		}
4838 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4839 			sctp_log_sack(asoc->last_acked_seq,
4840 			    cum_ack,
4841 			    tp1->rec.data.tsn,
4842 			    0,
4843 			    0,
4844 			    SCTP_LOG_FREE_SENT);
4845 		}
4846 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4847 		wake_him++;
4848 	}
4849 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4850 #ifdef INVARIANTS
4851 		panic("Warning flight size is positive and should be 0");
4852 #else
4853 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4854 		    asoc->total_flight);
4855 #endif
4856 		asoc->total_flight = 0;
4857 	}
4858 
4859 	/* sa_ignore NO_NULL_CHK */
4860 	if ((wake_him) && (stcb->sctp_socket)) {
4861 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4862 		struct socket *so;
4863 
4864 #endif
4865 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4866 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4867 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4868 		}
4869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4870 		so = SCTP_INP_SO(stcb->sctp_ep);
4871 		atomic_add_int(&stcb->asoc.refcnt, 1);
4872 		SCTP_TCB_UNLOCK(stcb);
4873 		SCTP_SOCKET_LOCK(so, 1);
4874 		SCTP_TCB_LOCK(stcb);
4875 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4876 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4877 			/* assoc was freed while we were unlocked */
4878 			SCTP_SOCKET_UNLOCK(so, 1);
4879 			return;
4880 		}
4881 #endif
4882 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4883 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4884 		SCTP_SOCKET_UNLOCK(so, 1);
4885 #endif
4886 	} else {
4887 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4888 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4889 		}
4890 	}
4891 
4892 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4893 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4894 			/* Setup so we will exit RFC2582 fast recovery */
4895 			will_exit_fast_recovery = 1;
4896 		}
4897 	}
4898 	/*
4899 	 * Check for revoked fragments:
4900 	 *
4901 	 * if Previous sack - Had no frags then we can't have any revoked if
4902 	 * Previous sack - Had frag's then - If we now have frags aka
4903 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4904 	 * some of them. else - The peer revoked all ACKED fragments, since
4905 	 * we had some before and now we have NONE.
4906 	 */
4907 
4908 	if (num_seg) {
4909 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4910 		asoc->saw_sack_with_frags = 1;
4911 	} else if (asoc->saw_sack_with_frags) {
4912 		int cnt_revoked = 0;
4913 
4914 		/* Peer revoked all dg's marked or acked */
4915 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4916 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4917 				tp1->sent = SCTP_DATAGRAM_SENT;
4918 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4919 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4920 					    tp1->whoTo->flight_size,
4921 					    tp1->book_size,
4922 					    (uint32_t)(uintptr_t)tp1->whoTo,
4923 					    tp1->rec.data.tsn);
4924 				}
4925 				sctp_flight_size_increase(tp1);
4926 				sctp_total_flight_increase(stcb, tp1);
4927 				tp1->rec.data.chunk_was_revoked = 1;
4928 				/*
4929 				 * To ensure that this increase in
4930 				 * flightsize, which is artificial, does not
4931 				 * throttle the sender, we also increase the
4932 				 * cwnd artificially.
4933 				 */
4934 				tp1->whoTo->cwnd += tp1->book_size;
4935 				cnt_revoked++;
4936 			}
4937 		}
4938 		if (cnt_revoked) {
4939 			reneged_all = 1;
4940 		}
4941 		asoc->saw_sack_with_frags = 0;
4942 	}
4943 	if (num_nr_seg > 0)
4944 		asoc->saw_sack_with_nr_frags = 1;
4945 	else
4946 		asoc->saw_sack_with_nr_frags = 0;
4947 
4948 	/* JRS - Use the congestion control given in the CC module */
4949 	if (ecne_seen == 0) {
4950 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4951 			if (net->net_ack2 > 0) {
4952 				/*
4953 				 * Karn's rule applies to clearing error
4954 				 * count, this is optional.
4955 				 */
4956 				net->error_count = 0;
4957 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4958 					/* addr came good */
4959 					net->dest_state |= SCTP_ADDR_REACHABLE;
4960 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4961 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4962 				}
4963 
4964 				if (net == stcb->asoc.primary_destination) {
4965 					if (stcb->asoc.alternate) {
4966 						/*
4967 						 * release the alternate,
4968 						 * primary is good
4969 						 */
4970 						sctp_free_remote_addr(stcb->asoc.alternate);
4971 						stcb->asoc.alternate = NULL;
4972 					}
4973 				}
4974 
4975 				if (net->dest_state & SCTP_ADDR_PF) {
4976 					net->dest_state &= ~SCTP_ADDR_PF;
4977 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4978 					    stcb->sctp_ep, stcb, net,
4979 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4980 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4981 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4982 					/* Done with this net */
4983 					net->net_ack = 0;
4984 				}
4985 				/* restore any doubled timers */
4986 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4987 				if (net->RTO < stcb->asoc.minrto) {
4988 					net->RTO = stcb->asoc.minrto;
4989 				}
4990 				if (net->RTO > stcb->asoc.maxrto) {
4991 					net->RTO = stcb->asoc.maxrto;
4992 				}
4993 			}
4994 		}
4995 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4996 	}
4997 
4998 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4999 		/* nothing left in-flight */
5000 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5001 			/* stop all timers */
5002 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5003 			    stcb, net,
5004 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5005 			net->flight_size = 0;
5006 			net->partial_bytes_acked = 0;
5007 		}
5008 		asoc->total_flight = 0;
5009 		asoc->total_flight_count = 0;
5010 	}
5011 
5012 	/**********************************/
5013 	/* Now what about shutdown issues */
5014 	/**********************************/
5015 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5016 		/* nothing left on sendqueue.. consider done */
5017 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5018 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5019 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5020 		}
5021 		asoc->peers_rwnd = a_rwnd;
5022 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5023 			/* SWS sender side engages */
5024 			asoc->peers_rwnd = 0;
5025 		}
5026 		/* clean up */
5027 		if ((asoc->stream_queue_cnt == 1) &&
5028 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5029 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5030 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5031 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5032 		}
5033 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5034 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5035 		    (asoc->stream_queue_cnt == 1) &&
5036 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5037 			struct mbuf *op_err;
5038 
5039 			*abort_now = 1;
5040 			/* XXX */
5041 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5042 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5043 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5044 			return;
5045 		}
5046 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5047 		    (asoc->stream_queue_cnt == 0)) {
5048 			struct sctp_nets *netp;
5049 
5050 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5051 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5052 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5053 			}
5054 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5055 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5056 			sctp_stop_timers_for_shutdown(stcb);
5057 			if (asoc->alternate) {
5058 				netp = asoc->alternate;
5059 			} else {
5060 				netp = asoc->primary_destination;
5061 			}
5062 			sctp_send_shutdown(stcb, netp);
5063 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5064 			    stcb->sctp_ep, stcb, netp);
5065 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5066 			    stcb->sctp_ep, stcb, netp);
5067 			return;
5068 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5069 		    (asoc->stream_queue_cnt == 0)) {
5070 			struct sctp_nets *netp;
5071 
5072 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5073 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5074 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5075 			sctp_stop_timers_for_shutdown(stcb);
5076 			if (asoc->alternate) {
5077 				netp = asoc->alternate;
5078 			} else {
5079 				netp = asoc->primary_destination;
5080 			}
5081 			sctp_send_shutdown_ack(stcb, netp);
5082 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5083 			    stcb->sctp_ep, stcb, netp);
5084 			return;
5085 		}
5086 	}
5087 	/*
5088 	 * Now here we are going to recycle net_ack for a different use...
5089 	 * HEADS UP.
5090 	 */
5091 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5092 		net->net_ack = 0;
5093 	}
5094 
5095 	/*
5096 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5097 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5098 	 * automatically ensure that.
5099 	 */
5100 	if ((asoc->sctp_cmt_on_off > 0) &&
5101 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5102 	    (cmt_dac_flag == 0)) {
5103 		this_sack_lowest_newack = cum_ack;
5104 	}
5105 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5106 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5107 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5108 	}
5109 	/* JRS - Use the congestion control given in the CC module */
5110 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5111 
5112 	/* Now are we exiting loss recovery ? */
5113 	if (will_exit_fast_recovery) {
5114 		/* Ok, we must exit fast recovery */
5115 		asoc->fast_retran_loss_recovery = 0;
5116 	}
5117 	if ((asoc->sat_t3_loss_recovery) &&
5118 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5119 		/* end satellite t3 loss recovery */
5120 		asoc->sat_t3_loss_recovery = 0;
5121 	}
5122 	/*
5123 	 * CMT Fast recovery
5124 	 */
5125 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5126 		if (net->will_exit_fast_recovery) {
5127 			/* Ok, we must exit fast recovery */
5128 			net->fast_retran_loss_recovery = 0;
5129 		}
5130 	}
5131 
5132 	/* Adjust and set the new rwnd value */
5133 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5134 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5135 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5136 	}
5137 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5138 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5139 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5140 		/* SWS sender side engages */
5141 		asoc->peers_rwnd = 0;
5142 	}
5143 	if (asoc->peers_rwnd > old_rwnd) {
5144 		win_probe_recovery = 1;
5145 	}
5146 
5147 	/*
5148 	 * Now we must setup so we have a timer up for anyone with
5149 	 * outstanding data.
5150 	 */
5151 	done_once = 0;
5152 again:
5153 	j = 0;
5154 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5155 		if (win_probe_recovery && (net->window_probe)) {
5156 			win_probe_recovered = 1;
5157 			/*-
5158 			 * Find first chunk that was used with
5159 			 * window probe and clear the event. Put
5160 			 * it back into the send queue as if has
5161 			 * not been sent.
5162 			 */
5163 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5164 				if (tp1->window_probe) {
5165 					sctp_window_probe_recovery(stcb, asoc, tp1);
5166 					break;
5167 				}
5168 			}
5169 		}
5170 		if (net->flight_size) {
5171 			j++;
5172 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5173 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5174 				    stcb->sctp_ep, stcb, net);
5175 			}
5176 			if (net->window_probe) {
5177 				net->window_probe = 0;
5178 			}
5179 		} else {
5180 			if (net->window_probe) {
5181 				/*
5182 				 * In window probes we must assure a timer
5183 				 * is still running there
5184 				 */
5185 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5186 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5187 					    stcb->sctp_ep, stcb, net);
5188 
5189 				}
5190 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5191 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5192 				    stcb, net,
5193 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5194 			}
5195 		}
5196 	}
5197 	if ((j == 0) &&
5198 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5199 	    (asoc->sent_queue_retran_cnt == 0) &&
5200 	    (win_probe_recovered == 0) &&
5201 	    (done_once == 0)) {
5202 		/*
5203 		 * huh, this should not happen unless all packets are
5204 		 * PR-SCTP and marked to skip of course.
5205 		 */
5206 		if (sctp_fs_audit(asoc)) {
5207 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5208 				net->flight_size = 0;
5209 			}
5210 			asoc->total_flight = 0;
5211 			asoc->total_flight_count = 0;
5212 			asoc->sent_queue_retran_cnt = 0;
5213 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5214 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5215 					sctp_flight_size_increase(tp1);
5216 					sctp_total_flight_increase(stcb, tp1);
5217 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5218 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5219 				}
5220 			}
5221 		}
5222 		done_once = 1;
5223 		goto again;
5224 	}
5225 	/*********************************************/
5226 	/* Here we perform PR-SCTP procedures        */
5227 	/* (section 4.2)                             */
5228 	/*********************************************/
5229 	/* C1. update advancedPeerAckPoint */
5230 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5231 		asoc->advanced_peer_ack_point = cum_ack;
5232 	}
5233 	/* C2. try to further move advancedPeerAckPoint ahead */
5234 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5235 		struct sctp_tmit_chunk *lchk;
5236 		uint32_t old_adv_peer_ack_point;
5237 
5238 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5239 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5240 		/* C3. See if we need to send a Fwd-TSN */
5241 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5242 			/*
5243 			 * ISSUE with ECN, see FWD-TSN processing.
5244 			 */
5245 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5246 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5247 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5248 				    old_adv_peer_ack_point);
5249 			}
5250 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5251 				send_forward_tsn(stcb, asoc);
5252 			} else if (lchk) {
5253 				/* try to FR fwd-tsn's that get lost too */
5254 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5255 					send_forward_tsn(stcb, asoc);
5256 				}
5257 			}
5258 		}
5259 		if (lchk) {
5260 			/* Assure a timer is up */
5261 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5262 			    stcb->sctp_ep, stcb, lchk->whoTo);
5263 		}
5264 	}
5265 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5266 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5267 		    a_rwnd,
5268 		    stcb->asoc.peers_rwnd,
5269 		    stcb->asoc.total_flight,
5270 		    stcb->asoc.total_output_queue_size);
5271 	}
5272 }
5273 
5274 void
5275 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5276 {
5277 	/* Copy cum-ack */
5278 	uint32_t cum_ack, a_rwnd;
5279 
5280 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5281 	/* Arrange so a_rwnd does NOT change */
5282 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5283 
5284 	/* Now call the express sack handling */
5285 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5286 }
5287 
5288 static void
5289 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5290     struct sctp_stream_in *strmin)
5291 {
5292 	struct sctp_queued_to_read *control, *ncontrol;
5293 	struct sctp_association *asoc;
5294 	uint32_t mid;
5295 	int need_reasm_check = 0;
5296 
5297 	asoc = &stcb->asoc;
5298 	mid = strmin->last_mid_delivered;
5299 	/*
5300 	 * First deliver anything prior to and including the stream no that
5301 	 * came in.
5302 	 */
5303 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5304 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5305 			/* this is deliverable now */
5306 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5307 				if (control->on_strm_q) {
5308 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5309 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5310 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5311 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5312 #ifdef INVARIANTS
5313 					} else {
5314 						panic("strmin: %p ctl: %p unknown %d",
5315 						    strmin, control, control->on_strm_q);
5316 #endif
5317 					}
5318 					control->on_strm_q = 0;
5319 				}
5320 				/* subtract pending on streams */
5321 				if (asoc->size_on_all_streams >= control->length) {
5322 					asoc->size_on_all_streams -= control->length;
5323 				} else {
5324 #ifdef INVARIANTS
5325 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5326 #else
5327 					asoc->size_on_all_streams = 0;
5328 #endif
5329 				}
5330 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5331 				/* deliver it to at least the delivery-q */
5332 				if (stcb->sctp_socket) {
5333 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5334 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5335 					    control,
5336 					    &stcb->sctp_socket->so_rcv,
5337 					    1, SCTP_READ_LOCK_HELD,
5338 					    SCTP_SO_NOT_LOCKED);
5339 				}
5340 			} else {
5341 				/* Its a fragmented message */
5342 				if (control->first_frag_seen) {
5343 					/*
5344 					 * Make it so this is next to
5345 					 * deliver, we restore later
5346 					 */
5347 					strmin->last_mid_delivered = control->mid - 1;
5348 					need_reasm_check = 1;
5349 					break;
5350 				}
5351 			}
5352 		} else {
5353 			/* no more delivery now. */
5354 			break;
5355 		}
5356 	}
5357 	if (need_reasm_check) {
5358 		int ret;
5359 
5360 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5361 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5362 			/* Restore the next to deliver unless we are ahead */
5363 			strmin->last_mid_delivered = mid;
5364 		}
5365 		if (ret == 0) {
5366 			/* Left the front Partial one on */
5367 			return;
5368 		}
5369 		need_reasm_check = 0;
5370 	}
5371 	/*
5372 	 * now we must deliver things in queue the normal way  if any are
5373 	 * now ready.
5374 	 */
5375 	mid = strmin->last_mid_delivered + 1;
5376 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5377 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5378 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5379 				/* this is deliverable now */
5380 				if (control->on_strm_q) {
5381 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5382 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5383 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5384 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5385 #ifdef INVARIANTS
5386 					} else {
5387 						panic("strmin: %p ctl: %p unknown %d",
5388 						    strmin, control, control->on_strm_q);
5389 #endif
5390 					}
5391 					control->on_strm_q = 0;
5392 				}
5393 				/* subtract pending on streams */
5394 				if (asoc->size_on_all_streams >= control->length) {
5395 					asoc->size_on_all_streams -= control->length;
5396 				} else {
5397 #ifdef INVARIANTS
5398 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5399 #else
5400 					asoc->size_on_all_streams = 0;
5401 #endif
5402 				}
5403 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5404 				/* deliver it to at least the delivery-q */
5405 				strmin->last_mid_delivered = control->mid;
5406 				if (stcb->sctp_socket) {
5407 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5408 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5409 					    control,
5410 					    &stcb->sctp_socket->so_rcv, 1,
5411 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5412 
5413 				}
5414 				mid = strmin->last_mid_delivered + 1;
5415 			} else {
5416 				/* Its a fragmented message */
5417 				if (control->first_frag_seen) {
5418 					/*
5419 					 * Make it so this is next to
5420 					 * deliver
5421 					 */
5422 					strmin->last_mid_delivered = control->mid - 1;
5423 					need_reasm_check = 1;
5424 					break;
5425 				}
5426 			}
5427 		} else {
5428 			break;
5429 		}
5430 	}
5431 	if (need_reasm_check) {
5432 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5433 	}
5434 }
5435 
5436 
5437 
5438 static void
5439 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5440     struct sctp_association *asoc,
5441     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5442 {
5443 	struct sctp_queued_to_read *control;
5444 	struct sctp_stream_in *strm;
5445 	struct sctp_tmit_chunk *chk, *nchk;
5446 	int cnt_removed = 0;
5447 
5448 	/*
5449 	 * For now large messages held on the stream reasm that are complete
5450 	 * will be tossed too. We could in theory do more work to spin
5451 	 * through and stop after dumping one msg aka seeing the start of a
5452 	 * new msg at the head, and call the delivery function... to see if
5453 	 * it can be delivered... But for now we just dump everything on the
5454 	 * queue.
5455 	 */
5456 	strm = &asoc->strmin[stream];
5457 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5458 	if (control == NULL) {
5459 		/* Not found */
5460 		return;
5461 	}
5462 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5463 		return;
5464 	}
5465 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5466 		/* Purge hanging chunks */
5467 		if (!asoc->idata_supported && (ordered == 0)) {
5468 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5469 				break;
5470 			}
5471 		}
5472 		cnt_removed++;
5473 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5474 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5475 			asoc->size_on_reasm_queue -= chk->send_size;
5476 		} else {
5477 #ifdef INVARIANTS
5478 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5479 #else
5480 			asoc->size_on_reasm_queue = 0;
5481 #endif
5482 		}
5483 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5484 		if (chk->data) {
5485 			sctp_m_freem(chk->data);
5486 			chk->data = NULL;
5487 		}
5488 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5489 	}
5490 	if (!TAILQ_EMPTY(&control->reasm)) {
5491 		/* This has to be old data, unordered */
5492 		if (control->data) {
5493 			sctp_m_freem(control->data);
5494 			control->data = NULL;
5495 		}
5496 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5497 		chk = TAILQ_FIRST(&control->reasm);
5498 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5499 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5500 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5501 			    chk, SCTP_READ_LOCK_HELD);
5502 		}
5503 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5504 		return;
5505 	}
5506 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5507 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5508 		if (asoc->size_on_all_streams >= control->length) {
5509 			asoc->size_on_all_streams -= control->length;
5510 		} else {
5511 #ifdef INVARIANTS
5512 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5513 #else
5514 			asoc->size_on_all_streams = 0;
5515 #endif
5516 		}
5517 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5518 		control->on_strm_q = 0;
5519 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5520 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5521 		control->on_strm_q = 0;
5522 #ifdef INVARIANTS
5523 	} else if (control->on_strm_q) {
5524 		panic("strm: %p ctl: %p unknown %d",
5525 		    strm, control, control->on_strm_q);
5526 #endif
5527 	}
5528 	control->on_strm_q = 0;
5529 	if (control->on_read_q == 0) {
5530 		sctp_free_remote_addr(control->whoFrom);
5531 		if (control->data) {
5532 			sctp_m_freem(control->data);
5533 			control->data = NULL;
5534 		}
5535 		sctp_free_a_readq(stcb, control);
5536 	}
5537 }
5538 
5539 void
5540 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5541     struct sctp_forward_tsn_chunk *fwd,
5542     int *abort_flag, struct mbuf *m, int offset)
5543 {
5544 	/* The pr-sctp fwd tsn */
5545 	/*
5546 	 * here we will perform all the data receiver side steps for
5547 	 * processing FwdTSN, as required in by pr-sctp draft:
5548 	 *
5549 	 * Assume we get FwdTSN(x):
5550 	 *
5551 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5552 	 * + others we have 3) examine and update re-ordering queue on
5553 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5554 	 * report where we are.
5555 	 */
5556 	struct sctp_association *asoc;
5557 	uint32_t new_cum_tsn, gap;
5558 	unsigned int i, fwd_sz, m_size;
5559 	uint32_t str_seq;
5560 	struct sctp_stream_in *strm;
5561 	struct sctp_queued_to_read *control, *sv;
5562 
5563 	asoc = &stcb->asoc;
5564 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5565 		SCTPDBG(SCTP_DEBUG_INDATA1,
5566 		    "Bad size too small/big fwd-tsn\n");
5567 		return;
5568 	}
5569 	m_size = (stcb->asoc.mapping_array_size << 3);
5570 	/*************************************************************/
5571 	/* 1. Here we update local cumTSN and shift the bitmap array */
5572 	/*************************************************************/
5573 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5574 
5575 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5576 		/* Already got there ... */
5577 		return;
5578 	}
5579 	/*
5580 	 * now we know the new TSN is more advanced, let's find the actual
5581 	 * gap
5582 	 */
5583 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5584 	asoc->cumulative_tsn = new_cum_tsn;
5585 	if (gap >= m_size) {
5586 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5587 			struct mbuf *op_err;
5588 			char msg[SCTP_DIAG_INFO_LEN];
5589 
5590 			/*
5591 			 * out of range (of single byte chunks in the rwnd I
5592 			 * give out). This must be an attacker.
5593 			 */
5594 			*abort_flag = 1;
5595 			snprintf(msg, sizeof(msg),
5596 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5597 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5598 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5599 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5600 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5601 			return;
5602 		}
5603 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5604 
5605 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5606 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5607 		asoc->highest_tsn_inside_map = new_cum_tsn;
5608 
5609 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5610 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5611 
5612 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5613 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5614 		}
5615 	} else {
5616 		SCTP_TCB_LOCK_ASSERT(stcb);
5617 		for (i = 0; i <= gap; i++) {
5618 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5619 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5620 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5621 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5622 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5623 				}
5624 			}
5625 		}
5626 	}
5627 	/*************************************************************/
5628 	/* 2. Clear up re-assembly queue                             */
5629 	/*************************************************************/
5630 
5631 	/* This is now done as part of clearing up the stream/seq */
5632 	if (asoc->idata_supported == 0) {
5633 		uint16_t sid;
5634 
5635 		/* Flush all the un-ordered data based on cum-tsn */
5636 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5637 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5638 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5639 		}
5640 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5641 	}
5642 	/*******************************************************/
5643 	/* 3. Update the PR-stream re-ordering queues and fix  */
5644 	/* delivery issues as needed.                       */
5645 	/*******************************************************/
5646 	fwd_sz -= sizeof(*fwd);
5647 	if (m && fwd_sz) {
5648 		/* New method. */
5649 		unsigned int num_str;
5650 		uint32_t mid, cur_mid;
5651 		uint16_t sid;
5652 		uint16_t ordered, flags;
5653 		struct sctp_strseq *stseq, strseqbuf;
5654 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5655 
5656 		offset += sizeof(*fwd);
5657 
5658 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5659 		if (asoc->idata_supported) {
5660 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5661 		} else {
5662 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5663 		}
5664 		for (i = 0; i < num_str; i++) {
5665 			if (asoc->idata_supported) {
5666 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5667 				    sizeof(struct sctp_strseq_mid),
5668 				    (uint8_t *)&strseqbuf_m);
5669 				offset += sizeof(struct sctp_strseq_mid);
5670 				if (stseq_m == NULL) {
5671 					break;
5672 				}
5673 				sid = ntohs(stseq_m->sid);
5674 				mid = ntohl(stseq_m->mid);
5675 				flags = ntohs(stseq_m->flags);
5676 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5677 					ordered = 0;
5678 				} else {
5679 					ordered = 1;
5680 				}
5681 			} else {
5682 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5683 				    sizeof(struct sctp_strseq),
5684 				    (uint8_t *)&strseqbuf);
5685 				offset += sizeof(struct sctp_strseq);
5686 				if (stseq == NULL) {
5687 					break;
5688 				}
5689 				sid = ntohs(stseq->sid);
5690 				mid = (uint32_t)ntohs(stseq->ssn);
5691 				ordered = 1;
5692 			}
5693 			/* Convert */
5694 
5695 			/* now process */
5696 
5697 			/*
5698 			 * Ok we now look for the stream/seq on the read
5699 			 * queue where its not all delivered. If we find it
5700 			 * we transmute the read entry into a PDI_ABORTED.
5701 			 */
5702 			if (sid >= asoc->streamincnt) {
5703 				/* screwed up streams, stop!  */
5704 				break;
5705 			}
5706 			if ((asoc->str_of_pdapi == sid) &&
5707 			    (asoc->ssn_of_pdapi == mid)) {
5708 				/*
5709 				 * If this is the one we were partially
5710 				 * delivering now then we no longer are.
5711 				 * Note this will change with the reassembly
5712 				 * re-write.
5713 				 */
5714 				asoc->fragmented_delivery_inprogress = 0;
5715 			}
5716 			strm = &asoc->strmin[sid];
5717 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5718 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5719 			}
5720 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5721 				if ((control->sinfo_stream == sid) &&
5722 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5723 					str_seq = (sid << 16) | (0x0000ffff & mid);
5724 					control->pdapi_aborted = 1;
5725 					sv = stcb->asoc.control_pdapi;
5726 					control->end_added = 1;
5727 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5728 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5729 						if (asoc->size_on_all_streams >= control->length) {
5730 							asoc->size_on_all_streams -= control->length;
5731 						} else {
5732 #ifdef INVARIANTS
5733 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5734 #else
5735 							asoc->size_on_all_streams = 0;
5736 #endif
5737 						}
5738 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5739 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5740 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5741 #ifdef INVARIANTS
5742 					} else if (control->on_strm_q) {
5743 						panic("strm: %p ctl: %p unknown %d",
5744 						    strm, control, control->on_strm_q);
5745 #endif
5746 					}
5747 					control->on_strm_q = 0;
5748 					stcb->asoc.control_pdapi = control;
5749 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5750 					    stcb,
5751 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5752 					    (void *)&str_seq,
5753 					    SCTP_SO_NOT_LOCKED);
5754 					stcb->asoc.control_pdapi = sv;
5755 					break;
5756 				} else if ((control->sinfo_stream == sid) &&
5757 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5758 					/* We are past our victim SSN */
5759 					break;
5760 				}
5761 			}
5762 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5763 				/* Update the sequence number */
5764 				strm->last_mid_delivered = mid;
5765 			}
5766 			/* now kick the stream the new way */
5767 			/* sa_ignore NO_NULL_CHK */
5768 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5769 		}
5770 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5771 	}
5772 	/*
5773 	 * Now slide thing forward.
5774 	 */
5775 	sctp_slide_mapping_arrays(stcb);
5776 }
5777