xref: /freebsd/sys/netinet/sctp_indata.c (revision 4133f23624058951a3b66e3ad735de980a485f36)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 
286 static void
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 {
289 	uint32_t gap, i, cumackp1;
290 	int fnd = 0;
291 	int in_r = 0, in_nr = 0;
292 
293 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 		return;
295 	}
296 	cumackp1 = asoc->cumulative_tsn + 1;
297 	if (SCTP_TSN_GT(cumackp1, tsn)) {
298 		/*
299 		 * this tsn is behind the cum ack and thus we don't need to
300 		 * worry about it being moved from one to the other.
301 		 */
302 		return;
303 	}
304 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	if ((in_r == 0) && (in_nr == 0)) {
308 #ifdef INVARIANTS
309 		panic("Things are really messed up now");
310 #else
311 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 		sctp_print_mapping_array(asoc);
313 #endif
314 	}
315 	if (in_nr == 0)
316 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	if (in_r)
318 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 		asoc->highest_tsn_inside_nr_map = tsn;
321 	}
322 	if (tsn == asoc->highest_tsn_inside_map) {
323 		/* We must back down to see what the new highest is */
324 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 static int
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340     struct sctp_association *asoc,
341     struct sctp_queued_to_read *control)
342 {
343 	struct sctp_queued_to_read *at;
344 	struct sctp_readhead *q;
345 	uint8_t flags, unordered;
346 
347 	flags = (control->sinfo_flags >> 8);
348 	unordered = flags & SCTP_DATA_UNORDERED;
349 	if (unordered) {
350 		q = &strm->uno_inqueue;
351 		if (asoc->idata_supported == 0) {
352 			if (!TAILQ_EMPTY(q)) {
353 				/*
354 				 * Only one stream can be here in old style
355 				 * -- abort
356 				 */
357 				return (-1);
358 			}
359 			TAILQ_INSERT_TAIL(q, control, next_instrm);
360 			control->on_strm_q = SCTP_ON_UNORDERED;
361 			return (0);
362 		}
363 	} else {
364 		q = &strm->inqueue;
365 	}
366 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 		control->end_added = 1;
368 		control->first_frag_seen = 1;
369 		control->last_frag_seen = 1;
370 	}
371 	if (TAILQ_EMPTY(q)) {
372 		/* Empty queue */
373 		TAILQ_INSERT_HEAD(q, control, next_instrm);
374 		if (unordered) {
375 			control->on_strm_q = SCTP_ON_UNORDERED;
376 		} else {
377 			control->on_strm_q = SCTP_ON_ORDERED;
378 		}
379 		return (0);
380 	} else {
381 		TAILQ_FOREACH(at, q, next_instrm) {
382 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * one in queue is bigger than the new one,
385 				 * insert before this one
386 				 */
387 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 				if (unordered) {
389 					control->on_strm_q = SCTP_ON_UNORDERED;
390 				} else {
391 					control->on_strm_q = SCTP_ON_ORDERED;
392 				}
393 				break;
394 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 				/*
396 				 * Gak, He sent me a duplicate msg id
397 				 * number?? return -1 to abort.
398 				 */
399 				return (-1);
400 			} else {
401 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 					/*
403 					 * We are at the end, insert it
404 					 * after this one
405 					 */
406 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 						sctp_log_strm_del(control, at,
408 						    SCTP_STR_LOG_FROM_INSERT_TL);
409 					}
410 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 					if (unordered) {
412 						control->on_strm_q = SCTP_ON_UNORDERED;
413 					} else {
414 						control->on_strm_q = SCTP_ON_ORDERED;
415 					}
416 					break;
417 				}
418 			}
419 		}
420 	}
421 	return (0);
422 }
423 
424 static void
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426     struct sctp_queued_to_read *control,
427     struct sctp_tmit_chunk *chk,
428     int *abort_flag, int opspot)
429 {
430 	char msg[SCTP_DIAG_INFO_LEN];
431 	struct mbuf *oper;
432 
433 	if (stcb->asoc.idata_supported) {
434 		snprintf(msg, sizeof(msg),
435 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 		    opspot,
437 		    control->fsn_included,
438 		    chk->rec.data.tsn,
439 		    chk->rec.data.sid,
440 		    chk->rec.data.fsn, chk->rec.data.mid);
441 	} else {
442 		snprintf(msg, sizeof(msg),
443 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 		    opspot,
445 		    control->fsn_included,
446 		    chk->rec.data.tsn,
447 		    chk->rec.data.sid,
448 		    chk->rec.data.fsn,
449 		    (uint16_t)chk->rec.data.mid);
450 	}
451 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 	sctp_m_freem(chk->data);
453 	chk->data = NULL;
454 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
457 	*abort_flag = 1;
458 }
459 
460 static void
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 {
463 	/*
464 	 * The control could not be placed and must be cleaned.
465 	 */
466 	struct sctp_tmit_chunk *chk, *nchk;
467 
468 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 		if (chk->data)
471 			sctp_m_freem(chk->data);
472 		chk->data = NULL;
473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	}
475 	sctp_free_remote_addr(control->whoFrom);
476 	if (control->data) {
477 		sctp_m_freem(control->data);
478 		control->data = NULL;
479 	}
480 	sctp_free_a_readq(stcb, control);
481 }
482 
483 /*
484  * Queue the chunk either right into the socket buffer if it is the next one
485  * to go OR put it in the correct place in the delivery queue.  If we do
486  * append to the so_buf, keep doing so until we are out of order as
487  * long as the control's entered are non-fragmented.
488  */
489 static void
490 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
491     struct sctp_association *asoc,
492     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
493 {
494 	/*
495 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
496 	 * all the data in one stream this could happen quite rapidly. One
497 	 * could use the TSN to keep track of things, but this scheme breaks
498 	 * down in the other type of stream usage that could occur. Send a
499 	 * single msg to stream 0, send 4Billion messages to stream 1, now
500 	 * send a message to stream 0. You have a situation where the TSN
501 	 * has wrapped but not in the stream. Is this worth worrying about
502 	 * or should we just change our queue sort at the bottom to be by
503 	 * TSN.
504 	 *
505 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
506 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
507 	 * assignment this could happen... and I don't see how this would be
508 	 * a violation. So for now I am undecided an will leave the sort by
509 	 * SSN alone. Maybe a hybred approach is the answer
510 	 *
511 	 */
512 	struct sctp_queued_to_read *at;
513 	int queue_needed;
514 	uint32_t nxt_todel;
515 	struct mbuf *op_err;
516 	struct sctp_stream_in *strm;
517 	char msg[SCTP_DIAG_INFO_LEN];
518 
519 	strm = &asoc->strmin[control->sinfo_stream];
520 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
521 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
522 	}
523 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
524 		/* The incoming sseq is behind where we last delivered? */
525 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
526 		    strm->last_mid_delivered, control->mid);
527 		/*
528 		 * throw it in the stream so it gets cleaned up in
529 		 * association destruction
530 		 */
531 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
532 		if (asoc->idata_supported) {
533 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
534 			    strm->last_mid_delivered, control->sinfo_tsn,
535 			    control->sinfo_stream, control->mid);
536 		} else {
537 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
538 			    (uint16_t)strm->last_mid_delivered,
539 			    control->sinfo_tsn,
540 			    control->sinfo_stream,
541 			    (uint16_t)control->mid);
542 		}
543 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
544 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
545 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 		*abort_flag = 1;
547 		return;
548 
549 	}
550 	queue_needed = 1;
551 	asoc->size_on_all_streams += control->length;
552 	sctp_ucount_incr(asoc->cnt_on_all_streams);
553 	nxt_todel = strm->last_mid_delivered + 1;
554 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
556 		struct socket *so;
557 
558 		so = SCTP_INP_SO(stcb->sctp_ep);
559 		atomic_add_int(&stcb->asoc.refcnt, 1);
560 		SCTP_TCB_UNLOCK(stcb);
561 		SCTP_SOCKET_LOCK(so, 1);
562 		SCTP_TCB_LOCK(stcb);
563 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
564 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
565 			SCTP_SOCKET_UNLOCK(so, 1);
566 			return;
567 		}
568 #endif
569 		/* can be delivered right away? */
570 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
571 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
572 		}
573 		/* EY it wont be queued if it could be delivered directly */
574 		queue_needed = 0;
575 		if (asoc->size_on_all_streams >= control->length) {
576 			asoc->size_on_all_streams -= control->length;
577 		} else {
578 #ifdef INVARIANTS
579 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
580 #else
581 			asoc->size_on_all_streams = 0;
582 #endif
583 		}
584 		sctp_ucount_decr(asoc->cnt_on_all_streams);
585 		strm->last_mid_delivered++;
586 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
587 		sctp_add_to_readq(stcb->sctp_ep, stcb,
588 		    control,
589 		    &stcb->sctp_socket->so_rcv, 1,
590 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
591 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
592 			/* all delivered */
593 			nxt_todel = strm->last_mid_delivered + 1;
594 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
595 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
596 				if (control->on_strm_q == SCTP_ON_ORDERED) {
597 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
598 					if (asoc->size_on_all_streams >= control->length) {
599 						asoc->size_on_all_streams -= control->length;
600 					} else {
601 #ifdef INVARIANTS
602 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
603 #else
604 						asoc->size_on_all_streams = 0;
605 #endif
606 					}
607 					sctp_ucount_decr(asoc->cnt_on_all_streams);
608 #ifdef INVARIANTS
609 				} else {
610 					panic("Huh control: %p is on_strm_q: %d",
611 					    control, control->on_strm_q);
612 #endif
613 				}
614 				control->on_strm_q = 0;
615 				strm->last_mid_delivered++;
616 				/*
617 				 * We ignore the return of deliver_data here
618 				 * since we always can hold the chunk on the
619 				 * d-queue. And we have a finite number that
620 				 * can be delivered from the strq.
621 				 */
622 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
623 					sctp_log_strm_del(control, NULL,
624 					    SCTP_STR_LOG_FROM_IMMED_DEL);
625 				}
626 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
627 				sctp_add_to_readq(stcb->sctp_ep, stcb,
628 				    control,
629 				    &stcb->sctp_socket->so_rcv, 1,
630 				    SCTP_READ_LOCK_NOT_HELD,
631 				    SCTP_SO_LOCKED);
632 				continue;
633 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
634 				*need_reasm = 1;
635 			}
636 			break;
637 		}
638 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
639 		SCTP_SOCKET_UNLOCK(so, 1);
640 #endif
641 	}
642 	if (queue_needed) {
643 		/*
644 		 * Ok, we did not deliver this guy, find the correct place
645 		 * to put it on the queue.
646 		 */
647 		if (sctp_place_control_in_stream(strm, asoc, control)) {
648 			snprintf(msg, sizeof(msg),
649 			    "Queue to str MID: %u duplicate",
650 			    control->mid);
651 			sctp_clean_up_control(stcb, control);
652 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
653 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
654 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
655 			*abort_flag = 1;
656 		}
657 	}
658 }
659 
660 
661 static void
662 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
663 {
664 	struct mbuf *m, *prev = NULL;
665 	struct sctp_tcb *stcb;
666 
667 	stcb = control->stcb;
668 	control->held_length = 0;
669 	control->length = 0;
670 	m = control->data;
671 	while (m) {
672 		if (SCTP_BUF_LEN(m) == 0) {
673 			/* Skip mbufs with NO length */
674 			if (prev == NULL) {
675 				/* First one */
676 				control->data = sctp_m_free(m);
677 				m = control->data;
678 			} else {
679 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
680 				m = SCTP_BUF_NEXT(prev);
681 			}
682 			if (m == NULL) {
683 				control->tail_mbuf = prev;
684 			}
685 			continue;
686 		}
687 		prev = m;
688 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
689 		if (control->on_read_q) {
690 			/*
691 			 * On read queue so we must increment the SB stuff,
692 			 * we assume caller has done any locks of SB.
693 			 */
694 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
695 		}
696 		m = SCTP_BUF_NEXT(m);
697 	}
698 	if (prev) {
699 		control->tail_mbuf = prev;
700 	}
701 }
702 
703 static void
704 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
705 {
706 	struct mbuf *prev = NULL;
707 	struct sctp_tcb *stcb;
708 
709 	stcb = control->stcb;
710 	if (stcb == NULL) {
711 #ifdef INVARIANTS
712 		panic("Control broken");
713 #else
714 		return;
715 #endif
716 	}
717 	if (control->tail_mbuf == NULL) {
718 		/* TSNH */
719 		sctp_m_freem(control->data);
720 		control->data = m;
721 		sctp_setup_tail_pointer(control);
722 		return;
723 	}
724 	control->tail_mbuf->m_next = m;
725 	while (m) {
726 		if (SCTP_BUF_LEN(m) == 0) {
727 			/* Skip mbufs with NO length */
728 			if (prev == NULL) {
729 				/* First one */
730 				control->tail_mbuf->m_next = sctp_m_free(m);
731 				m = control->tail_mbuf->m_next;
732 			} else {
733 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
734 				m = SCTP_BUF_NEXT(prev);
735 			}
736 			if (m == NULL) {
737 				control->tail_mbuf = prev;
738 			}
739 			continue;
740 		}
741 		prev = m;
742 		if (control->on_read_q) {
743 			/*
744 			 * On read queue so we must increment the SB stuff,
745 			 * we assume caller has done any locks of SB.
746 			 */
747 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
748 		}
749 		*added += SCTP_BUF_LEN(m);
750 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
751 		m = SCTP_BUF_NEXT(m);
752 	}
753 	if (prev) {
754 		control->tail_mbuf = prev;
755 	}
756 }
757 
758 static void
759 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
760 {
761 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
762 	nc->sinfo_stream = control->sinfo_stream;
763 	nc->mid = control->mid;
764 	TAILQ_INIT(&nc->reasm);
765 	nc->top_fsn = control->top_fsn;
766 	nc->mid = control->mid;
767 	nc->sinfo_flags = control->sinfo_flags;
768 	nc->sinfo_ppid = control->sinfo_ppid;
769 	nc->sinfo_context = control->sinfo_context;
770 	nc->fsn_included = 0xffffffff;
771 	nc->sinfo_tsn = control->sinfo_tsn;
772 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
773 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
774 	nc->whoFrom = control->whoFrom;
775 	atomic_add_int(&nc->whoFrom->ref_count, 1);
776 	nc->stcb = control->stcb;
777 	nc->port_from = control->port_from;
778 }
779 
780 static void
781 sctp_reset_a_control(struct sctp_queued_to_read *control,
782     struct sctp_inpcb *inp, uint32_t tsn)
783 {
784 	control->fsn_included = tsn;
785 	if (control->on_read_q) {
786 		/*
787 		 * We have to purge it from there, hopefully this will work
788 		 * :-)
789 		 */
790 		TAILQ_REMOVE(&inp->read_queue, control, next);
791 		control->on_read_q = 0;
792 	}
793 }
794 
795 static int
796 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
797     struct sctp_association *asoc,
798     struct sctp_stream_in *strm,
799     struct sctp_queued_to_read *control,
800     uint32_t pd_point,
801     int inp_read_lock_held)
802 {
803 	/*
804 	 * Special handling for the old un-ordered data chunk. All the
805 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
806 	 * to see if we have it all. If you return one, no other control
807 	 * entries on the un-ordered queue will be looked at. In theory
808 	 * there should be no others entries in reality, unless the guy is
809 	 * sending both unordered NDATA and unordered DATA...
810 	 */
811 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
812 	uint32_t fsn;
813 	struct sctp_queued_to_read *nc;
814 	int cnt_added;
815 
816 	if (control->first_frag_seen == 0) {
817 		/* Nothing we can do, we have not seen the first piece yet */
818 		return (1);
819 	}
820 	/* Collapse any we can */
821 	cnt_added = 0;
822 restart:
823 	fsn = control->fsn_included + 1;
824 	/* Now what can we add? */
825 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
826 		if (chk->rec.data.fsn == fsn) {
827 			/* Ok lets add it */
828 			sctp_alloc_a_readq(stcb, nc);
829 			if (nc == NULL) {
830 				break;
831 			}
832 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
833 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
834 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
835 			fsn++;
836 			cnt_added++;
837 			chk = NULL;
838 			if (control->end_added) {
839 				/* We are done */
840 				if (!TAILQ_EMPTY(&control->reasm)) {
841 					/*
842 					 * Ok we have to move anything left
843 					 * on the control queue to a new
844 					 * control.
845 					 */
846 					sctp_build_readq_entry_from_ctl(nc, control);
847 					tchk = TAILQ_FIRST(&control->reasm);
848 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
849 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
850 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
851 							asoc->size_on_reasm_queue -= tchk->send_size;
852 						} else {
853 #ifdef INVARIANTS
854 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
855 #else
856 							asoc->size_on_reasm_queue = 0;
857 #endif
858 						}
859 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
860 						nc->first_frag_seen = 1;
861 						nc->fsn_included = tchk->rec.data.fsn;
862 						nc->data = tchk->data;
863 						nc->sinfo_ppid = tchk->rec.data.ppid;
864 						nc->sinfo_tsn = tchk->rec.data.tsn;
865 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
866 						tchk->data = NULL;
867 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
868 						sctp_setup_tail_pointer(nc);
869 						tchk = TAILQ_FIRST(&control->reasm);
870 					}
871 					/* Spin the rest onto the queue */
872 					while (tchk) {
873 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
874 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
875 						tchk = TAILQ_FIRST(&control->reasm);
876 					}
877 					/*
878 					 * Now lets add it to the queue
879 					 * after removing control
880 					 */
881 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
882 					nc->on_strm_q = SCTP_ON_UNORDERED;
883 					if (control->on_strm_q) {
884 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 						control->on_strm_q = 0;
886 					}
887 				}
888 				if (control->pdapi_started) {
889 					strm->pd_api_started = 0;
890 					control->pdapi_started = 0;
891 				}
892 				if (control->on_strm_q) {
893 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
894 					control->on_strm_q = 0;
895 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
896 				}
897 				if (control->on_read_q == 0) {
898 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
899 					    &stcb->sctp_socket->so_rcv, control->end_added,
900 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
901 				}
902 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
903 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
904 					/*
905 					 * Switch to the new guy and
906 					 * continue
907 					 */
908 					control = nc;
909 					goto restart;
910 				} else {
911 					if (nc->on_strm_q == 0) {
912 						sctp_free_a_readq(stcb, nc);
913 					}
914 				}
915 				return (1);
916 			} else {
917 				sctp_free_a_readq(stcb, nc);
918 			}
919 		} else {
920 			/* Can't add more */
921 			break;
922 		}
923 	}
924 	if (cnt_added && strm->pd_api_started) {
925 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
926 	}
927 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
928 		strm->pd_api_started = 1;
929 		control->pdapi_started = 1;
930 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
931 		    &stcb->sctp_socket->so_rcv, control->end_added,
932 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
933 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
934 		return (0);
935 	} else {
936 		return (1);
937 	}
938 }
939 
940 static void
941 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
942     struct sctp_association *asoc,
943     struct sctp_queued_to_read *control,
944     struct sctp_tmit_chunk *chk,
945     int *abort_flag)
946 {
947 	struct sctp_tmit_chunk *at;
948 	int inserted;
949 
950 	/*
951 	 * Here we need to place the chunk into the control structure sorted
952 	 * in the correct order.
953 	 */
954 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
955 		/* Its the very first one. */
956 		SCTPDBG(SCTP_DEBUG_XXX,
957 		    "chunk is a first fsn: %u becomes fsn_included\n",
958 		    chk->rec.data.fsn);
959 		at = TAILQ_FIRST(&control->reasm);
960 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
961 			/*
962 			 * The first chunk in the reassembly is a smaller
963 			 * TSN than this one, even though this has a first,
964 			 * it must be from a subsequent msg.
965 			 */
966 			goto place_chunk;
967 		}
968 		if (control->first_frag_seen) {
969 			/*
970 			 * In old un-ordered we can reassembly on one
971 			 * control multiple messages. As long as the next
972 			 * FIRST is greater then the old first (TSN i.e. FSN
973 			 * wise)
974 			 */
975 			struct mbuf *tdata;
976 			uint32_t tmp;
977 
978 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
979 				/*
980 				 * Easy way the start of a new guy beyond
981 				 * the lowest
982 				 */
983 				goto place_chunk;
984 			}
985 			if ((chk->rec.data.fsn == control->fsn_included) ||
986 			    (control->pdapi_started)) {
987 				/*
988 				 * Ok this should not happen, if it does we
989 				 * started the pd-api on the higher TSN
990 				 * (since the equals part is a TSN failure
991 				 * it must be that).
992 				 *
993 				 * We are completly hosed in that case since
994 				 * I have no way to recover. This really
995 				 * will only happen if we can get more TSN's
996 				 * higher before the pd-api-point.
997 				 */
998 				sctp_abort_in_reasm(stcb, control, chk,
999 				    abort_flag,
1000 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1001 
1002 				return;
1003 			}
1004 			/*
1005 			 * Ok we have two firsts and the one we just got is
1006 			 * smaller than the one we previously placed.. yuck!
1007 			 * We must swap them out.
1008 			 */
1009 			/* swap the mbufs */
1010 			tdata = control->data;
1011 			control->data = chk->data;
1012 			chk->data = tdata;
1013 			/* Save the lengths */
1014 			chk->send_size = control->length;
1015 			/* Recompute length of control and tail pointer */
1016 			sctp_setup_tail_pointer(control);
1017 			/* Fix the FSN included */
1018 			tmp = control->fsn_included;
1019 			control->fsn_included = chk->rec.data.fsn;
1020 			chk->rec.data.fsn = tmp;
1021 			/* Fix the TSN included */
1022 			tmp = control->sinfo_tsn;
1023 			control->sinfo_tsn = chk->rec.data.tsn;
1024 			chk->rec.data.tsn = tmp;
1025 			/* Fix the PPID included */
1026 			tmp = control->sinfo_ppid;
1027 			control->sinfo_ppid = chk->rec.data.ppid;
1028 			chk->rec.data.ppid = tmp;
1029 			/* Fix tail pointer */
1030 			goto place_chunk;
1031 		}
1032 		control->first_frag_seen = 1;
1033 		control->fsn_included = chk->rec.data.fsn;
1034 		control->top_fsn = chk->rec.data.fsn;
1035 		control->sinfo_tsn = chk->rec.data.tsn;
1036 		control->sinfo_ppid = chk->rec.data.ppid;
1037 		control->data = chk->data;
1038 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1039 		chk->data = NULL;
1040 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1041 		sctp_setup_tail_pointer(control);
1042 		return;
1043 	}
1044 place_chunk:
1045 	inserted = 0;
1046 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1047 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1048 			/*
1049 			 * This one in queue is bigger than the new one,
1050 			 * insert the new one before at.
1051 			 */
1052 			asoc->size_on_reasm_queue += chk->send_size;
1053 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1054 			inserted = 1;
1055 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1056 			break;
1057 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1058 			/*
1059 			 * They sent a duplicate fsn number. This really
1060 			 * should not happen since the FSN is a TSN and it
1061 			 * should have been dropped earlier.
1062 			 */
1063 			sctp_abort_in_reasm(stcb, control, chk,
1064 			    abort_flag,
1065 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1066 			return;
1067 		}
1068 
1069 	}
1070 	if (inserted == 0) {
1071 		/* Its at the end */
1072 		asoc->size_on_reasm_queue += chk->send_size;
1073 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1074 		control->top_fsn = chk->rec.data.fsn;
1075 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1076 	}
1077 }
1078 
1079 static int
1080 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1081     struct sctp_stream_in *strm, int inp_read_lock_held)
1082 {
1083 	/*
1084 	 * Given a stream, strm, see if any of the SSN's on it that are
1085 	 * fragmented are ready to deliver. If so go ahead and place them on
1086 	 * the read queue. In so placing if we have hit the end, then we
1087 	 * need to remove them from the stream's queue.
1088 	 */
1089 	struct sctp_queued_to_read *control, *nctl = NULL;
1090 	uint32_t next_to_del;
1091 	uint32_t pd_point;
1092 	int ret = 0;
1093 
1094 	if (stcb->sctp_socket) {
1095 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1096 		    stcb->sctp_ep->partial_delivery_point);
1097 	} else {
1098 		pd_point = stcb->sctp_ep->partial_delivery_point;
1099 	}
1100 	control = TAILQ_FIRST(&strm->uno_inqueue);
1101 
1102 	if ((control != NULL) &&
1103 	    (asoc->idata_supported == 0)) {
1104 		/* Special handling needed for "old" data format */
1105 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1106 			goto done_un;
1107 		}
1108 	}
1109 	if (strm->pd_api_started) {
1110 		/* Can't add more */
1111 		return (0);
1112 	}
1113 	while (control) {
1114 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1115 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1116 		nctl = TAILQ_NEXT(control, next_instrm);
1117 		if (control->end_added) {
1118 			/* We just put the last bit on */
1119 			if (control->on_strm_q) {
1120 #ifdef INVARIANTS
1121 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1122 					panic("Huh control: %p on_q: %d -- not unordered?",
1123 					    control, control->on_strm_q);
1124 				}
1125 #endif
1126 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1127 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1128 				control->on_strm_q = 0;
1129 			}
1130 			if (control->on_read_q == 0) {
1131 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 				    control,
1133 				    &stcb->sctp_socket->so_rcv, control->end_added,
1134 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1135 			}
1136 		} else {
1137 			/* Can we do a PD-API for this un-ordered guy? */
1138 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1139 				strm->pd_api_started = 1;
1140 				control->pdapi_started = 1;
1141 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1142 				    control,
1143 				    &stcb->sctp_socket->so_rcv, control->end_added,
1144 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1145 
1146 				break;
1147 			}
1148 		}
1149 		control = nctl;
1150 	}
1151 done_un:
1152 	control = TAILQ_FIRST(&strm->inqueue);
1153 	if (strm->pd_api_started) {
1154 		/* Can't add more */
1155 		return (0);
1156 	}
1157 	if (control == NULL) {
1158 		return (ret);
1159 	}
1160 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1161 		/*
1162 		 * Ok the guy at the top was being partially delivered
1163 		 * completed, so we remove it. Note the pd_api flag was
1164 		 * taken off when the chunk was merged on in
1165 		 * sctp_queue_data_for_reasm below.
1166 		 */
1167 		nctl = TAILQ_NEXT(control, next_instrm);
1168 		SCTPDBG(SCTP_DEBUG_XXX,
1169 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1170 		    control, control->end_added, control->mid,
1171 		    control->top_fsn, control->fsn_included,
1172 		    strm->last_mid_delivered);
1173 		if (control->end_added) {
1174 			if (control->on_strm_q) {
1175 #ifdef INVARIANTS
1176 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1177 					panic("Huh control: %p on_q: %d -- not ordered?",
1178 					    control, control->on_strm_q);
1179 				}
1180 #endif
1181 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1182 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1183 				if (asoc->size_on_all_streams >= control->length) {
1184 					asoc->size_on_all_streams -= control->length;
1185 				} else {
1186 #ifdef INVARIANTS
1187 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1188 #else
1189 					asoc->size_on_all_streams = 0;
1190 #endif
1191 				}
1192 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1193 				control->on_strm_q = 0;
1194 			}
1195 			if (strm->pd_api_started && control->pdapi_started) {
1196 				control->pdapi_started = 0;
1197 				strm->pd_api_started = 0;
1198 			}
1199 			if (control->on_read_q == 0) {
1200 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1201 				    control,
1202 				    &stcb->sctp_socket->so_rcv, control->end_added,
1203 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1204 			}
1205 			control = nctl;
1206 		}
1207 	}
1208 	if (strm->pd_api_started) {
1209 		/*
1210 		 * Can't add more must have gotten an un-ordered above being
1211 		 * partially delivered.
1212 		 */
1213 		return (0);
1214 	}
1215 deliver_more:
1216 	next_to_del = strm->last_mid_delivered + 1;
1217 	if (control) {
1218 		SCTPDBG(SCTP_DEBUG_XXX,
1219 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1220 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1221 		    next_to_del);
1222 		nctl = TAILQ_NEXT(control, next_instrm);
1223 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1224 		    (control->first_frag_seen)) {
1225 			int done;
1226 
1227 			/* Ok we can deliver it onto the stream. */
1228 			if (control->end_added) {
1229 				/* We are done with it afterwards */
1230 				if (control->on_strm_q) {
1231 #ifdef INVARIANTS
1232 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1233 						panic("Huh control: %p on_q: %d -- not ordered?",
1234 						    control, control->on_strm_q);
1235 					}
1236 #endif
1237 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1238 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1239 					if (asoc->size_on_all_streams >= control->length) {
1240 						asoc->size_on_all_streams -= control->length;
1241 					} else {
1242 #ifdef INVARIANTS
1243 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1244 #else
1245 						asoc->size_on_all_streams = 0;
1246 #endif
1247 					}
1248 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1249 					control->on_strm_q = 0;
1250 				}
1251 				ret++;
1252 			}
1253 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1254 				/*
1255 				 * A singleton now slipping through - mark
1256 				 * it non-revokable too
1257 				 */
1258 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1259 			} else if (control->end_added == 0) {
1260 				/*
1261 				 * Check if we can defer adding until its
1262 				 * all there
1263 				 */
1264 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1265 					/*
1266 					 * Don't need it or cannot add more
1267 					 * (one being delivered that way)
1268 					 */
1269 					goto out;
1270 				}
1271 			}
1272 			done = (control->end_added) && (control->last_frag_seen);
1273 			if (control->on_read_q == 0) {
1274 				if (!done) {
1275 					if (asoc->size_on_all_streams >= control->length) {
1276 						asoc->size_on_all_streams -= control->length;
1277 					} else {
1278 #ifdef INVARIANTS
1279 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1280 #else
1281 						asoc->size_on_all_streams = 0;
1282 #endif
1283 					}
1284 					strm->pd_api_started = 1;
1285 					control->pdapi_started = 1;
1286 				}
1287 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1288 				    control,
1289 				    &stcb->sctp_socket->so_rcv, control->end_added,
1290 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1291 			}
1292 			strm->last_mid_delivered = next_to_del;
1293 			if (done) {
1294 				control = nctl;
1295 				goto deliver_more;
1296 			}
1297 		}
1298 	}
1299 out:
1300 	return (ret);
1301 }
1302 
1303 
1304 uint32_t
1305 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1306     struct sctp_stream_in *strm,
1307     struct sctp_tcb *stcb, struct sctp_association *asoc,
1308     struct sctp_tmit_chunk *chk, int hold_rlock)
1309 {
1310 	/*
1311 	 * Given a control and a chunk, merge the data from the chk onto the
1312 	 * control and free up the chunk resources.
1313 	 */
1314 	uint32_t added = 0;
1315 	int i_locked = 0;
1316 
1317 	if (control->on_read_q && (hold_rlock == 0)) {
1318 		/*
1319 		 * Its being pd-api'd so we must do some locks.
1320 		 */
1321 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1322 		i_locked = 1;
1323 	}
1324 	if (control->data == NULL) {
1325 		control->data = chk->data;
1326 		sctp_setup_tail_pointer(control);
1327 	} else {
1328 		sctp_add_to_tail_pointer(control, chk->data, &added);
1329 	}
1330 	control->fsn_included = chk->rec.data.fsn;
1331 	asoc->size_on_reasm_queue -= chk->send_size;
1332 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1333 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1334 	chk->data = NULL;
1335 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336 		control->first_frag_seen = 1;
1337 		control->sinfo_tsn = chk->rec.data.tsn;
1338 		control->sinfo_ppid = chk->rec.data.ppid;
1339 	}
1340 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1341 		/* Its complete */
1342 		if ((control->on_strm_q) && (control->on_read_q)) {
1343 			if (control->pdapi_started) {
1344 				control->pdapi_started = 0;
1345 				strm->pd_api_started = 0;
1346 			}
1347 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1348 				/* Unordered */
1349 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1350 				control->on_strm_q = 0;
1351 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1352 				/* Ordered */
1353 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1354 				/*
1355 				 * Don't need to decrement
1356 				 * size_on_all_streams, since control is on
1357 				 * the read queue.
1358 				 */
1359 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1360 				control->on_strm_q = 0;
1361 #ifdef INVARIANTS
1362 			} else if (control->on_strm_q) {
1363 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1364 				    control->on_strm_q);
1365 #endif
1366 			}
1367 		}
1368 		control->end_added = 1;
1369 		control->last_frag_seen = 1;
1370 	}
1371 	if (i_locked) {
1372 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1373 	}
1374 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 	return (added);
1376 }
1377 
1378 /*
1379  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380  * queue, see if anthing can be delivered. If so pull it off (or as much as
1381  * we can. If we run out of space then we must dump what we can and set the
1382  * appropriate flag to say we queued what we could.
1383  */
1384 static void
1385 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1386     struct sctp_queued_to_read *control,
1387     struct sctp_tmit_chunk *chk,
1388     int created_control,
1389     int *abort_flag, uint32_t tsn)
1390 {
1391 	uint32_t next_fsn;
1392 	struct sctp_tmit_chunk *at, *nat;
1393 	struct sctp_stream_in *strm;
1394 	int do_wakeup, unordered;
1395 	uint32_t lenadded;
1396 
1397 	strm = &asoc->strmin[control->sinfo_stream];
1398 	/*
1399 	 * For old un-ordered data chunks.
1400 	 */
1401 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 		unordered = 1;
1403 	} else {
1404 		unordered = 0;
1405 	}
1406 	/* Must be added to the stream-in queue */
1407 	if (created_control) {
1408 		if (unordered == 0) {
1409 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1410 		}
1411 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1412 			/* Duplicate SSN? */
1413 			sctp_abort_in_reasm(stcb, control, chk,
1414 			    abort_flag,
1415 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1416 			sctp_clean_up_control(stcb, control);
1417 			return;
1418 		}
1419 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1420 			/*
1421 			 * Ok we created this control and now lets validate
1422 			 * that its legal i.e. there is a B bit set, if not
1423 			 * and we have up to the cum-ack then its invalid.
1424 			 */
1425 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1426 				sctp_abort_in_reasm(stcb, control, chk,
1427 				    abort_flag,
1428 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 				return;
1430 			}
1431 		}
1432 	}
1433 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1434 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1435 		return;
1436 	}
1437 	/*
1438 	 * Ok we must queue the chunk into the reasembly portion: o if its
1439 	 * the first it goes to the control mbuf. o if its not first but the
1440 	 * next in sequence it goes to the control, and each succeeding one
1441 	 * in order also goes. o if its not in order we place it on the list
1442 	 * in its place.
1443 	 */
1444 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445 		/* Its the very first one. */
1446 		SCTPDBG(SCTP_DEBUG_XXX,
1447 		    "chunk is a first fsn: %u becomes fsn_included\n",
1448 		    chk->rec.data.fsn);
1449 		if (control->first_frag_seen) {
1450 			/*
1451 			 * Error on senders part, they either sent us two
1452 			 * data chunks with FIRST, or they sent two
1453 			 * un-ordered chunks that were fragmented at the
1454 			 * same time in the same stream.
1455 			 */
1456 			sctp_abort_in_reasm(stcb, control, chk,
1457 			    abort_flag,
1458 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1459 			return;
1460 		}
1461 		control->first_frag_seen = 1;
1462 		control->sinfo_ppid = chk->rec.data.ppid;
1463 		control->sinfo_tsn = chk->rec.data.tsn;
1464 		control->fsn_included = chk->rec.data.fsn;
1465 		control->data = chk->data;
1466 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1467 		chk->data = NULL;
1468 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469 		sctp_setup_tail_pointer(control);
1470 		asoc->size_on_all_streams += control->length;
1471 	} else {
1472 		/* Place the chunk in our list */
1473 		int inserted = 0;
1474 
1475 		if (control->last_frag_seen == 0) {
1476 			/* Still willing to raise highest FSN seen */
1477 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1478 				SCTPDBG(SCTP_DEBUG_XXX,
1479 				    "We have a new top_fsn: %u\n",
1480 				    chk->rec.data.fsn);
1481 				control->top_fsn = chk->rec.data.fsn;
1482 			}
1483 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1484 				SCTPDBG(SCTP_DEBUG_XXX,
1485 				    "The last fsn is now in place fsn: %u\n",
1486 				    chk->rec.data.fsn);
1487 				control->last_frag_seen = 1;
1488 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1489 					SCTPDBG(SCTP_DEBUG_XXX,
1490 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1491 					    chk->rec.data.fsn,
1492 					    control->top_fsn);
1493 					sctp_abort_in_reasm(stcb, control, chk,
1494 					    abort_flag,
1495 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1496 					return;
1497 				}
1498 			}
1499 			if (asoc->idata_supported || control->first_frag_seen) {
1500 				/*
1501 				 * For IDATA we always check since we know
1502 				 * that the first fragment is 0. For old
1503 				 * DATA we have to receive the first before
1504 				 * we know the first FSN (which is the TSN).
1505 				 */
1506 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1507 					/*
1508 					 * We have already delivered up to
1509 					 * this so its a dup
1510 					 */
1511 					sctp_abort_in_reasm(stcb, control, chk,
1512 					    abort_flag,
1513 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1514 					return;
1515 				}
1516 			}
1517 		} else {
1518 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1519 				/* Second last? huh? */
1520 				SCTPDBG(SCTP_DEBUG_XXX,
1521 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1522 				    chk->rec.data.fsn, control->top_fsn);
1523 				sctp_abort_in_reasm(stcb, control,
1524 				    chk, abort_flag,
1525 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1526 				return;
1527 			}
1528 			if (asoc->idata_supported || control->first_frag_seen) {
1529 				/*
1530 				 * For IDATA we always check since we know
1531 				 * that the first fragment is 0. For old
1532 				 * DATA we have to receive the first before
1533 				 * we know the first FSN (which is the TSN).
1534 				 */
1535 
1536 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1537 					/*
1538 					 * We have already delivered up to
1539 					 * this so its a dup
1540 					 */
1541 					SCTPDBG(SCTP_DEBUG_XXX,
1542 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1543 					    chk->rec.data.fsn, control->fsn_included);
1544 					sctp_abort_in_reasm(stcb, control, chk,
1545 					    abort_flag,
1546 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1547 					return;
1548 				}
1549 			}
1550 			/*
1551 			 * validate not beyond top FSN if we have seen last
1552 			 * one
1553 			 */
1554 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1555 				SCTPDBG(SCTP_DEBUG_XXX,
1556 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1557 				    chk->rec.data.fsn,
1558 				    control->top_fsn);
1559 				sctp_abort_in_reasm(stcb, control, chk,
1560 				    abort_flag,
1561 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1562 				return;
1563 			}
1564 		}
1565 		/*
1566 		 * If we reach here, we need to place the new chunk in the
1567 		 * reassembly for this control.
1568 		 */
1569 		SCTPDBG(SCTP_DEBUG_XXX,
1570 		    "chunk is a not first fsn: %u needs to be inserted\n",
1571 		    chk->rec.data.fsn);
1572 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1573 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1574 				/*
1575 				 * This one in queue is bigger than the new
1576 				 * one, insert the new one before at.
1577 				 */
1578 				SCTPDBG(SCTP_DEBUG_XXX,
1579 				    "Insert it before fsn: %u\n",
1580 				    at->rec.data.fsn);
1581 				asoc->size_on_reasm_queue += chk->send_size;
1582 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1583 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1584 				inserted = 1;
1585 				break;
1586 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1587 				/*
1588 				 * Gak, He sent me a duplicate str seq
1589 				 * number
1590 				 */
1591 				/*
1592 				 * foo bar, I guess I will just free this
1593 				 * new guy, should we abort too? FIX ME
1594 				 * MAYBE? Or it COULD be that the SSN's have
1595 				 * wrapped. Maybe I should compare to TSN
1596 				 * somehow... sigh for now just blow away
1597 				 * the chunk!
1598 				 */
1599 				SCTPDBG(SCTP_DEBUG_XXX,
1600 				    "Duplicate to fsn: %u -- abort\n",
1601 				    at->rec.data.fsn);
1602 				sctp_abort_in_reasm(stcb, control,
1603 				    chk, abort_flag,
1604 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1605 				return;
1606 			}
1607 		}
1608 		if (inserted == 0) {
1609 			/* Goes on the end */
1610 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1611 			    chk->rec.data.fsn);
1612 			asoc->size_on_reasm_queue += chk->send_size;
1613 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1614 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1615 		}
1616 	}
1617 	/*
1618 	 * Ok lets see if we can suck any up into the control structure that
1619 	 * are in seq if it makes sense.
1620 	 */
1621 	do_wakeup = 0;
1622 	/*
1623 	 * If the first fragment has not been seen there is no sense in
1624 	 * looking.
1625 	 */
1626 	if (control->first_frag_seen) {
1627 		next_fsn = control->fsn_included + 1;
1628 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1629 			if (at->rec.data.fsn == next_fsn) {
1630 				/* We can add this one now to the control */
1631 				SCTPDBG(SCTP_DEBUG_XXX,
1632 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1633 				    control, at,
1634 				    at->rec.data.fsn,
1635 				    next_fsn, control->fsn_included);
1636 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1637 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1638 				if (control->on_read_q) {
1639 					do_wakeup = 1;
1640 				} else {
1641 					/*
1642 					 * We only add to the
1643 					 * size-on-all-streams if its not on
1644 					 * the read q. The read q flag will
1645 					 * cause a sballoc so its accounted
1646 					 * for there.
1647 					 */
1648 					asoc->size_on_all_streams += lenadded;
1649 				}
1650 				next_fsn++;
1651 				if (control->end_added && control->pdapi_started) {
1652 					if (strm->pd_api_started) {
1653 						strm->pd_api_started = 0;
1654 						control->pdapi_started = 0;
1655 					}
1656 					if (control->on_read_q == 0) {
1657 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1658 						    control,
1659 						    &stcb->sctp_socket->so_rcv, control->end_added,
1660 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1661 					}
1662 					break;
1663 				}
1664 			} else {
1665 				break;
1666 			}
1667 		}
1668 	}
1669 	if (do_wakeup) {
1670 		/* Need to wakeup the reader */
1671 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1672 	}
1673 }
1674 
1675 static struct sctp_queued_to_read *
1676 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1677 {
1678 	struct sctp_queued_to_read *control;
1679 
1680 	if (ordered) {
1681 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1682 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 				break;
1684 			}
1685 		}
1686 	} else {
1687 		if (idata_supported) {
1688 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1689 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1690 					break;
1691 				}
1692 			}
1693 		} else {
1694 			control = TAILQ_FIRST(&strm->uno_inqueue);
1695 		}
1696 	}
1697 	return (control);
1698 }
1699 
1700 static int
1701 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1702     struct mbuf **m, int offset, int chk_length,
1703     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1704     int *break_flag, int last_chunk, uint8_t chk_type)
1705 {
1706 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1707 	uint32_t tsn, fsn, gap, mid;
1708 	struct mbuf *dmbuf;
1709 	int the_len;
1710 	int need_reasm_check = 0;
1711 	uint16_t sid;
1712 	struct mbuf *op_err;
1713 	char msg[SCTP_DIAG_INFO_LEN];
1714 	struct sctp_queued_to_read *control, *ncontrol;
1715 	uint32_t ppid;
1716 	uint8_t chk_flags;
1717 	struct sctp_stream_reset_list *liste;
1718 	int ordered;
1719 	size_t clen;
1720 	int created_control = 0;
1721 
1722 	if (chk_type == SCTP_IDATA) {
1723 		struct sctp_idata_chunk *chunk, chunk_buf;
1724 
1725 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1726 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1727 		chk_flags = chunk->ch.chunk_flags;
1728 		clen = sizeof(struct sctp_idata_chunk);
1729 		tsn = ntohl(chunk->dp.tsn);
1730 		sid = ntohs(chunk->dp.sid);
1731 		mid = ntohl(chunk->dp.mid);
1732 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1733 			fsn = 0;
1734 			ppid = chunk->dp.ppid_fsn.ppid;
1735 		} else {
1736 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1737 			ppid = 0xffffffff;	/* Use as an invalid value. */
1738 		}
1739 	} else {
1740 		struct sctp_data_chunk *chunk, chunk_buf;
1741 
1742 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1743 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1744 		chk_flags = chunk->ch.chunk_flags;
1745 		clen = sizeof(struct sctp_data_chunk);
1746 		tsn = ntohl(chunk->dp.tsn);
1747 		sid = ntohs(chunk->dp.sid);
1748 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1749 		fsn = tsn;
1750 		ppid = chunk->dp.ppid;
1751 	}
1752 	if ((size_t)chk_length == clen) {
1753 		/*
1754 		 * Need to send an abort since we had a empty data chunk.
1755 		 */
1756 		op_err = sctp_generate_no_user_data_cause(tsn);
1757 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1758 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1759 		*abort_flag = 1;
1760 		return (0);
1761 	}
1762 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1763 		asoc->send_sack = 1;
1764 	}
1765 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1766 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768 	}
1769 	if (stcb == NULL) {
1770 		return (0);
1771 	}
1772 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1773 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774 		/* It is a duplicate */
1775 		SCTP_STAT_INCR(sctps_recvdupdata);
1776 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 			/* Record a dup for the next outbound sack */
1778 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 			asoc->numduptsns++;
1780 		}
1781 		asoc->send_sack = 1;
1782 		return (0);
1783 	}
1784 	/* Calculate the number of TSN's between the base and this TSN */
1785 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787 		/* Can't hold the bit in the mapping at max array, toss it */
1788 		return (0);
1789 	}
1790 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1791 		SCTP_TCB_LOCK_ASSERT(stcb);
1792 		if (sctp_expand_mapping_array(asoc, gap)) {
1793 			/* Can't expand, drop it */
1794 			return (0);
1795 		}
1796 	}
1797 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 		*high_tsn = tsn;
1799 	}
1800 	/* See if we have received this one already */
1801 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803 		SCTP_STAT_INCR(sctps_recvdupdata);
1804 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805 			/* Record a dup for the next outbound sack */
1806 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 			asoc->numduptsns++;
1808 		}
1809 		asoc->send_sack = 1;
1810 		return (0);
1811 	}
1812 	/*
1813 	 * Check to see about the GONE flag, duplicates would cause a sack
1814 	 * to be sent up above
1815 	 */
1816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1818 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1819 		/*
1820 		 * wait a minute, this guy is gone, there is no longer a
1821 		 * receiver. Send peer an ABORT!
1822 		 */
1823 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1824 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1825 		*abort_flag = 1;
1826 		return (0);
1827 	}
1828 	/*
1829 	 * Now before going further we see if there is room. If NOT then we
1830 	 * MAY let one through only IF this TSN is the one we are waiting
1831 	 * for on a partial delivery API.
1832 	 */
1833 
1834 	/* Is the stream valid? */
1835 	if (sid >= asoc->streamincnt) {
1836 		struct sctp_error_invalid_stream *cause;
1837 
1838 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839 		    0, M_NOWAIT, 1, MT_DATA);
1840 		if (op_err != NULL) {
1841 			/* add some space up front so prepend will work well */
1842 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1844 			/*
1845 			 * Error causes are just param's and this one has
1846 			 * two back to back phdr, one with the error type
1847 			 * and size, the other with the streamid and a rsvd
1848 			 */
1849 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1852 			cause->stream_id = htons(sid);
1853 			cause->reserved = htons(0);
1854 			sctp_queue_op_err(stcb, op_err);
1855 		}
1856 		SCTP_STAT_INCR(sctps_badsid);
1857 		SCTP_TCB_LOCK_ASSERT(stcb);
1858 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860 			asoc->highest_tsn_inside_nr_map = tsn;
1861 		}
1862 		if (tsn == (asoc->cumulative_tsn + 1)) {
1863 			/* Update cum-ack */
1864 			asoc->cumulative_tsn = tsn;
1865 		}
1866 		return (0);
1867 	}
1868 	/*
1869 	 * If its a fragmented message, lets see if we can find the control
1870 	 * on the reassembly queues.
1871 	 */
1872 	if ((chk_type == SCTP_IDATA) &&
1873 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 	    (fsn == 0)) {
1875 		/*
1876 		 * The first *must* be fsn 0, and other (middle/end) pieces
1877 		 * can *not* be fsn 0. XXX: This can happen in case of a
1878 		 * wrap around. Ignore is for now.
1879 		 */
1880 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1881 		    mid, chk_flags);
1882 		goto err_out;
1883 	}
1884 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1885 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1886 	    chk_flags, control);
1887 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1888 		/* See if we can find the re-assembly entity */
1889 		if (control != NULL) {
1890 			/* We found something, does it belong? */
1891 			if (ordered && (mid != control->mid)) {
1892 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1893 		err_out:
1894 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1896 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 				*abort_flag = 1;
1898 				return (0);
1899 			}
1900 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1901 				/*
1902 				 * We can't have a switched order with an
1903 				 * unordered chunk
1904 				 */
1905 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1906 				    tsn);
1907 				goto err_out;
1908 			}
1909 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1910 				/*
1911 				 * We can't have a switched unordered with a
1912 				 * ordered chunk
1913 				 */
1914 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1915 				    tsn);
1916 				goto err_out;
1917 			}
1918 		}
1919 	} else {
1920 		/*
1921 		 * Its a complete segment. Lets validate we don't have a
1922 		 * re-assembly going on with the same Stream/Seq (for
1923 		 * ordered) or in the same Stream for unordered.
1924 		 */
1925 		if (control != NULL) {
1926 			if (ordered || asoc->idata_supported) {
1927 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1928 				    chk_flags, mid);
1929 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1930 				goto err_out;
1931 			} else {
1932 				if ((tsn == control->fsn_included + 1) &&
1933 				    (control->end_added == 0)) {
1934 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1935 					goto err_out;
1936 				} else {
1937 					control = NULL;
1938 				}
1939 			}
1940 		}
1941 	}
1942 	/* now do the tests */
1943 	if (((asoc->cnt_on_all_streams +
1944 	    asoc->cnt_on_reasm_queue +
1945 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1946 	    (((int)asoc->my_rwnd) <= 0)) {
1947 		/*
1948 		 * When we have NO room in the rwnd we check to make sure
1949 		 * the reader is doing its job...
1950 		 */
1951 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1952 			/* some to read, wake-up */
1953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1954 			struct socket *so;
1955 
1956 			so = SCTP_INP_SO(stcb->sctp_ep);
1957 			atomic_add_int(&stcb->asoc.refcnt, 1);
1958 			SCTP_TCB_UNLOCK(stcb);
1959 			SCTP_SOCKET_LOCK(so, 1);
1960 			SCTP_TCB_LOCK(stcb);
1961 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1962 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1963 				/* assoc was freed while we were unlocked */
1964 				SCTP_SOCKET_UNLOCK(so, 1);
1965 				return (0);
1966 			}
1967 #endif
1968 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1969 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1970 			SCTP_SOCKET_UNLOCK(so, 1);
1971 #endif
1972 		}
1973 		/* now is it in the mapping array of what we have accepted? */
1974 		if (chk_type == SCTP_DATA) {
1975 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1976 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1977 				/* Nope not in the valid range dump it */
1978 		dump_packet:
1979 				sctp_set_rwnd(stcb, asoc);
1980 				if ((asoc->cnt_on_all_streams +
1981 				    asoc->cnt_on_reasm_queue +
1982 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1983 					SCTP_STAT_INCR(sctps_datadropchklmt);
1984 				} else {
1985 					SCTP_STAT_INCR(sctps_datadroprwnd);
1986 				}
1987 				*break_flag = 1;
1988 				return (0);
1989 			}
1990 		} else {
1991 			if (control == NULL) {
1992 				goto dump_packet;
1993 			}
1994 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1995 				goto dump_packet;
1996 			}
1997 		}
1998 	}
1999 #ifdef SCTP_ASOCLOG_OF_TSNS
2000 	SCTP_TCB_LOCK_ASSERT(stcb);
2001 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2002 		asoc->tsn_in_at = 0;
2003 		asoc->tsn_in_wrapped = 1;
2004 	}
2005 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2006 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2007 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2008 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2009 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2010 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2011 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2012 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2013 	asoc->tsn_in_at++;
2014 #endif
2015 	/*
2016 	 * Before we continue lets validate that we are not being fooled by
2017 	 * an evil attacker. We can only have Nk chunks based on our TSN
2018 	 * spread allowed by the mapping array N * 8 bits, so there is no
2019 	 * way our stream sequence numbers could have wrapped. We of course
2020 	 * only validate the FIRST fragment so the bit must be set.
2021 	 */
2022 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2023 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2024 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2025 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2026 		/* The incoming sseq is behind where we last delivered? */
2027 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2028 		    mid, asoc->strmin[sid].last_mid_delivered);
2029 
2030 		if (asoc->idata_supported) {
2031 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2032 			    asoc->strmin[sid].last_mid_delivered,
2033 			    tsn,
2034 			    sid,
2035 			    mid);
2036 		} else {
2037 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2038 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2039 			    tsn,
2040 			    sid,
2041 			    (uint16_t)mid);
2042 		}
2043 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2044 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2045 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2046 		*abort_flag = 1;
2047 		return (0);
2048 	}
2049 	if (chk_type == SCTP_IDATA) {
2050 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2051 	} else {
2052 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2053 	}
2054 	if (last_chunk == 0) {
2055 		if (chk_type == SCTP_IDATA) {
2056 			dmbuf = SCTP_M_COPYM(*m,
2057 			    (offset + sizeof(struct sctp_idata_chunk)),
2058 			    the_len, M_NOWAIT);
2059 		} else {
2060 			dmbuf = SCTP_M_COPYM(*m,
2061 			    (offset + sizeof(struct sctp_data_chunk)),
2062 			    the_len, M_NOWAIT);
2063 		}
2064 #ifdef SCTP_MBUF_LOGGING
2065 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2066 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2067 		}
2068 #endif
2069 	} else {
2070 		/* We can steal the last chunk */
2071 		int l_len;
2072 
2073 		dmbuf = *m;
2074 		/* lop off the top part */
2075 		if (chk_type == SCTP_IDATA) {
2076 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2077 		} else {
2078 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2079 		}
2080 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2081 			l_len = SCTP_BUF_LEN(dmbuf);
2082 		} else {
2083 			/*
2084 			 * need to count up the size hopefully does not hit
2085 			 * this to often :-0
2086 			 */
2087 			struct mbuf *lat;
2088 
2089 			l_len = 0;
2090 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2091 				l_len += SCTP_BUF_LEN(lat);
2092 			}
2093 		}
2094 		if (l_len > the_len) {
2095 			/* Trim the end round bytes off  too */
2096 			m_adj(dmbuf, -(l_len - the_len));
2097 		}
2098 	}
2099 	if (dmbuf == NULL) {
2100 		SCTP_STAT_INCR(sctps_nomem);
2101 		return (0);
2102 	}
2103 	/*
2104 	 * Now no matter what, we need a control, get one if we don't have
2105 	 * one (we may have gotten it above when we found the message was
2106 	 * fragmented
2107 	 */
2108 	if (control == NULL) {
2109 		sctp_alloc_a_readq(stcb, control);
2110 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2111 		    ppid,
2112 		    sid,
2113 		    chk_flags,
2114 		    NULL, fsn, mid);
2115 		if (control == NULL) {
2116 			SCTP_STAT_INCR(sctps_nomem);
2117 			return (0);
2118 		}
2119 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2120 			struct mbuf *mm;
2121 
2122 			control->data = dmbuf;
2123 			control->tail_mbuf = NULL;
2124 			for (mm = control->data; mm; mm = mm->m_next) {
2125 				control->length += SCTP_BUF_LEN(mm);
2126 				if (SCTP_BUF_NEXT(mm) == NULL) {
2127 					control->tail_mbuf = mm;
2128 				}
2129 			}
2130 			control->end_added = 1;
2131 			control->last_frag_seen = 1;
2132 			control->first_frag_seen = 1;
2133 			control->fsn_included = fsn;
2134 			control->top_fsn = fsn;
2135 		}
2136 		created_control = 1;
2137 	}
2138 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2139 	    chk_flags, ordered, mid, control);
2140 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2141 	    TAILQ_EMPTY(&asoc->resetHead) &&
2142 	    ((ordered == 0) ||
2143 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2144 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2145 		/* Candidate for express delivery */
2146 		/*
2147 		 * Its not fragmented, No PD-API is up, Nothing in the
2148 		 * delivery queue, Its un-ordered OR ordered and the next to
2149 		 * deliver AND nothing else is stuck on the stream queue,
2150 		 * And there is room for it in the socket buffer. Lets just
2151 		 * stuff it up the buffer....
2152 		 */
2153 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2154 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2155 			asoc->highest_tsn_inside_nr_map = tsn;
2156 		}
2157 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2158 		    control, mid);
2159 
2160 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2161 		    control, &stcb->sctp_socket->so_rcv,
2162 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2163 
2164 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2165 			/* for ordered, bump what we delivered */
2166 			asoc->strmin[sid].last_mid_delivered++;
2167 		}
2168 		SCTP_STAT_INCR(sctps_recvexpress);
2169 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2170 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2171 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2172 		}
2173 		control = NULL;
2174 		goto finish_express_del;
2175 	}
2176 
2177 	/* Now will we need a chunk too? */
2178 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2179 		sctp_alloc_a_chunk(stcb, chk);
2180 		if (chk == NULL) {
2181 			/* No memory so we drop the chunk */
2182 			SCTP_STAT_INCR(sctps_nomem);
2183 			if (last_chunk == 0) {
2184 				/* we copied it, free the copy */
2185 				sctp_m_freem(dmbuf);
2186 			}
2187 			return (0);
2188 		}
2189 		chk->rec.data.tsn = tsn;
2190 		chk->no_fr_allowed = 0;
2191 		chk->rec.data.fsn = fsn;
2192 		chk->rec.data.mid = mid;
2193 		chk->rec.data.sid = sid;
2194 		chk->rec.data.ppid = ppid;
2195 		chk->rec.data.context = stcb->asoc.context;
2196 		chk->rec.data.doing_fast_retransmit = 0;
2197 		chk->rec.data.rcv_flags = chk_flags;
2198 		chk->asoc = asoc;
2199 		chk->send_size = the_len;
2200 		chk->whoTo = net;
2201 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2202 		    chk,
2203 		    control, mid);
2204 		atomic_add_int(&net->ref_count, 1);
2205 		chk->data = dmbuf;
2206 	}
2207 	/* Set the appropriate TSN mark */
2208 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2209 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2210 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2211 			asoc->highest_tsn_inside_nr_map = tsn;
2212 		}
2213 	} else {
2214 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2215 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2216 			asoc->highest_tsn_inside_map = tsn;
2217 		}
2218 	}
2219 	/* Now is it complete (i.e. not fragmented)? */
2220 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2221 		/*
2222 		 * Special check for when streams are resetting. We could be
2223 		 * more smart about this and check the actual stream to see
2224 		 * if it is not being reset.. that way we would not create a
2225 		 * HOLB when amongst streams being reset and those not being
2226 		 * reset.
2227 		 *
2228 		 */
2229 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2230 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2231 			/*
2232 			 * yep its past where we need to reset... go ahead
2233 			 * and queue it.
2234 			 */
2235 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2236 				/* first one on */
2237 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2238 			} else {
2239 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2240 				unsigned char inserted = 0;
2241 
2242 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2243 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2244 
2245 						continue;
2246 					} else {
2247 						/* found it */
2248 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2249 						inserted = 1;
2250 						break;
2251 					}
2252 				}
2253 				if (inserted == 0) {
2254 					/*
2255 					 * must be put at end, use prevP
2256 					 * (all setup from loop) to setup
2257 					 * nextP.
2258 					 */
2259 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2260 				}
2261 			}
2262 			goto finish_express_del;
2263 		}
2264 		if (chk_flags & SCTP_DATA_UNORDERED) {
2265 			/* queue directly into socket buffer */
2266 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2267 			    control, mid);
2268 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2269 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2270 			    control,
2271 			    &stcb->sctp_socket->so_rcv, 1,
2272 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2273 
2274 		} else {
2275 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2276 			    mid);
2277 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2278 			if (*abort_flag) {
2279 				if (last_chunk) {
2280 					*m = NULL;
2281 				}
2282 				return (0);
2283 			}
2284 		}
2285 		goto finish_express_del;
2286 	}
2287 	/* If we reach here its a reassembly */
2288 	need_reasm_check = 1;
2289 	SCTPDBG(SCTP_DEBUG_XXX,
2290 	    "Queue data to stream for reasm control: %p MID: %u\n",
2291 	    control, mid);
2292 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2293 	if (*abort_flag) {
2294 		/*
2295 		 * the assoc is now gone and chk was put onto the reasm
2296 		 * queue, which has all been freed.
2297 		 */
2298 		if (last_chunk) {
2299 			*m = NULL;
2300 		}
2301 		return (0);
2302 	}
2303 finish_express_del:
2304 	/* Here we tidy up things */
2305 	if (tsn == (asoc->cumulative_tsn + 1)) {
2306 		/* Update cum-ack */
2307 		asoc->cumulative_tsn = tsn;
2308 	}
2309 	if (last_chunk) {
2310 		*m = NULL;
2311 	}
2312 	if (ordered) {
2313 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2314 	} else {
2315 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2316 	}
2317 	SCTP_STAT_INCR(sctps_recvdata);
2318 	/* Set it present please */
2319 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2320 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2321 	}
2322 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2323 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2324 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2325 	}
2326 	if (need_reasm_check) {
2327 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2328 		need_reasm_check = 0;
2329 	}
2330 	/* check the special flag for stream resets */
2331 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2332 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2333 		/*
2334 		 * we have finished working through the backlogged TSN's now
2335 		 * time to reset streams. 1: call reset function. 2: free
2336 		 * pending_reply space 3: distribute any chunks in
2337 		 * pending_reply_queue.
2338 		 */
2339 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2340 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2341 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2342 		SCTP_FREE(liste, SCTP_M_STRESET);
2343 		/* sa_ignore FREED_MEMORY */
2344 		liste = TAILQ_FIRST(&asoc->resetHead);
2345 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2346 			/* All can be removed */
2347 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2348 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2349 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2350 				if (*abort_flag) {
2351 					return (0);
2352 				}
2353 				if (need_reasm_check) {
2354 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2355 					need_reasm_check = 0;
2356 				}
2357 			}
2358 		} else {
2359 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2360 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2361 					break;
2362 				}
2363 				/*
2364 				 * if control->sinfo_tsn is <= liste->tsn we
2365 				 * can process it which is the NOT of
2366 				 * control->sinfo_tsn > liste->tsn
2367 				 */
2368 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2369 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2370 				if (*abort_flag) {
2371 					return (0);
2372 				}
2373 				if (need_reasm_check) {
2374 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2375 					need_reasm_check = 0;
2376 				}
2377 			}
2378 		}
2379 	}
2380 	return (1);
2381 }
2382 
2383 static const int8_t sctp_map_lookup_tab[256] = {
2384 	0, 1, 0, 2, 0, 1, 0, 3,
2385 	0, 1, 0, 2, 0, 1, 0, 4,
2386 	0, 1, 0, 2, 0, 1, 0, 3,
2387 	0, 1, 0, 2, 0, 1, 0, 5,
2388 	0, 1, 0, 2, 0, 1, 0, 3,
2389 	0, 1, 0, 2, 0, 1, 0, 4,
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 6,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 4,
2394 	0, 1, 0, 2, 0, 1, 0, 3,
2395 	0, 1, 0, 2, 0, 1, 0, 5,
2396 	0, 1, 0, 2, 0, 1, 0, 3,
2397 	0, 1, 0, 2, 0, 1, 0, 4,
2398 	0, 1, 0, 2, 0, 1, 0, 3,
2399 	0, 1, 0, 2, 0, 1, 0, 7,
2400 	0, 1, 0, 2, 0, 1, 0, 3,
2401 	0, 1, 0, 2, 0, 1, 0, 4,
2402 	0, 1, 0, 2, 0, 1, 0, 3,
2403 	0, 1, 0, 2, 0, 1, 0, 5,
2404 	0, 1, 0, 2, 0, 1, 0, 3,
2405 	0, 1, 0, 2, 0, 1, 0, 4,
2406 	0, 1, 0, 2, 0, 1, 0, 3,
2407 	0, 1, 0, 2, 0, 1, 0, 6,
2408 	0, 1, 0, 2, 0, 1, 0, 3,
2409 	0, 1, 0, 2, 0, 1, 0, 4,
2410 	0, 1, 0, 2, 0, 1, 0, 3,
2411 	0, 1, 0, 2, 0, 1, 0, 5,
2412 	0, 1, 0, 2, 0, 1, 0, 3,
2413 	0, 1, 0, 2, 0, 1, 0, 4,
2414 	0, 1, 0, 2, 0, 1, 0, 3,
2415 	0, 1, 0, 2, 0, 1, 0, 8
2416 };
2417 
2418 
2419 void
2420 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2421 {
2422 	/*
2423 	 * Now we also need to check the mapping array in a couple of ways.
2424 	 * 1) Did we move the cum-ack point?
2425 	 *
2426 	 * When you first glance at this you might think that all entries
2427 	 * that make up the position of the cum-ack would be in the
2428 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2429 	 * deliverable. Thats true with one exception, when its a fragmented
2430 	 * message we may not deliver the data until some threshold (or all
2431 	 * of it) is in place. So we must OR the nr_mapping_array and
2432 	 * mapping_array to get a true picture of the cum-ack.
2433 	 */
2434 	struct sctp_association *asoc;
2435 	int at;
2436 	uint8_t val;
2437 	int slide_from, slide_end, lgap, distance;
2438 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2439 
2440 	asoc = &stcb->asoc;
2441 
2442 	old_cumack = asoc->cumulative_tsn;
2443 	old_base = asoc->mapping_array_base_tsn;
2444 	old_highest = asoc->highest_tsn_inside_map;
2445 	/*
2446 	 * We could probably improve this a small bit by calculating the
2447 	 * offset of the current cum-ack as the starting point.
2448 	 */
2449 	at = 0;
2450 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2451 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2452 		if (val == 0xff) {
2453 			at += 8;
2454 		} else {
2455 			/* there is a 0 bit */
2456 			at += sctp_map_lookup_tab[val];
2457 			break;
2458 		}
2459 	}
2460 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2461 
2462 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2463 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2464 #ifdef INVARIANTS
2465 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2466 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2467 #else
2468 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2469 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2470 		sctp_print_mapping_array(asoc);
2471 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2472 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2473 		}
2474 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2475 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2476 #endif
2477 	}
2478 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2479 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2480 	} else {
2481 		highest_tsn = asoc->highest_tsn_inside_map;
2482 	}
2483 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2484 		/* The complete array was completed by a single FR */
2485 		/* highest becomes the cum-ack */
2486 		int clr;
2487 #ifdef INVARIANTS
2488 		unsigned int i;
2489 #endif
2490 
2491 		/* clear the array */
2492 		clr = ((at + 7) >> 3);
2493 		if (clr > asoc->mapping_array_size) {
2494 			clr = asoc->mapping_array_size;
2495 		}
2496 		memset(asoc->mapping_array, 0, clr);
2497 		memset(asoc->nr_mapping_array, 0, clr);
2498 #ifdef INVARIANTS
2499 		for (i = 0; i < asoc->mapping_array_size; i++) {
2500 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2501 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2502 				sctp_print_mapping_array(asoc);
2503 			}
2504 		}
2505 #endif
2506 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2507 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2508 	} else if (at >= 8) {
2509 		/* we can slide the mapping array down */
2510 		/* slide_from holds where we hit the first NON 0xff byte */
2511 
2512 		/*
2513 		 * now calculate the ceiling of the move using our highest
2514 		 * TSN value
2515 		 */
2516 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2517 		slide_end = (lgap >> 3);
2518 		if (slide_end < slide_from) {
2519 			sctp_print_mapping_array(asoc);
2520 #ifdef INVARIANTS
2521 			panic("impossible slide");
2522 #else
2523 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2524 			    lgap, slide_end, slide_from, at);
2525 			return;
2526 #endif
2527 		}
2528 		if (slide_end > asoc->mapping_array_size) {
2529 #ifdef INVARIANTS
2530 			panic("would overrun buffer");
2531 #else
2532 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2533 			    asoc->mapping_array_size, slide_end);
2534 			slide_end = asoc->mapping_array_size;
2535 #endif
2536 		}
2537 		distance = (slide_end - slide_from) + 1;
2538 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2539 			sctp_log_map(old_base, old_cumack, old_highest,
2540 			    SCTP_MAP_PREPARE_SLIDE);
2541 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2542 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2543 		}
2544 		if (distance + slide_from > asoc->mapping_array_size ||
2545 		    distance < 0) {
2546 			/*
2547 			 * Here we do NOT slide forward the array so that
2548 			 * hopefully when more data comes in to fill it up
2549 			 * we will be able to slide it forward. Really I
2550 			 * don't think this should happen :-0
2551 			 */
2552 
2553 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2554 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2555 				    (uint32_t)asoc->mapping_array_size,
2556 				    SCTP_MAP_SLIDE_NONE);
2557 			}
2558 		} else {
2559 			int ii;
2560 
2561 			for (ii = 0; ii < distance; ii++) {
2562 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2563 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2564 
2565 			}
2566 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2567 				asoc->mapping_array[ii] = 0;
2568 				asoc->nr_mapping_array[ii] = 0;
2569 			}
2570 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2571 				asoc->highest_tsn_inside_map += (slide_from << 3);
2572 			}
2573 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2574 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2575 			}
2576 			asoc->mapping_array_base_tsn += (slide_from << 3);
2577 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2578 				sctp_log_map(asoc->mapping_array_base_tsn,
2579 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2580 				    SCTP_MAP_SLIDE_RESULT);
2581 			}
2582 		}
2583 	}
2584 }
2585 
2586 void
2587 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2588 {
2589 	struct sctp_association *asoc;
2590 	uint32_t highest_tsn;
2591 	int is_a_gap;
2592 
2593 	sctp_slide_mapping_arrays(stcb);
2594 	asoc = &stcb->asoc;
2595 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2596 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2597 	} else {
2598 		highest_tsn = asoc->highest_tsn_inside_map;
2599 	}
2600 	/* Is there a gap now? */
2601 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2602 
2603 	/*
2604 	 * Now we need to see if we need to queue a sack or just start the
2605 	 * timer (if allowed).
2606 	 */
2607 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2608 		/*
2609 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2610 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2611 		 * SACK
2612 		 */
2613 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2614 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2615 			    stcb->sctp_ep, stcb, NULL,
2616 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2617 		}
2618 		sctp_send_shutdown(stcb,
2619 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2620 		if (is_a_gap) {
2621 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2622 		}
2623 	} else {
2624 		/*
2625 		 * CMT DAC algorithm: increase number of packets received
2626 		 * since last ack
2627 		 */
2628 		stcb->asoc.cmt_dac_pkts_rcvd++;
2629 
2630 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2631 							 * SACK */
2632 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2633 							 * longer is one */
2634 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2635 		    (is_a_gap) ||	/* is still a gap */
2636 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2637 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2638 		    ) {
2639 
2640 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2641 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2642 			    (stcb->asoc.send_sack == 0) &&
2643 			    (stcb->asoc.numduptsns == 0) &&
2644 			    (stcb->asoc.delayed_ack) &&
2645 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2646 
2647 				/*
2648 				 * CMT DAC algorithm: With CMT, delay acks
2649 				 * even in the face of
2650 				 *
2651 				 * reordering. Therefore, if acks that do
2652 				 * not have to be sent because of the above
2653 				 * reasons, will be delayed. That is, acks
2654 				 * that would have been sent due to gap
2655 				 * reports will be delayed with DAC. Start
2656 				 * the delayed ack timer.
2657 				 */
2658 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2659 				    stcb->sctp_ep, stcb, NULL);
2660 			} else {
2661 				/*
2662 				 * Ok we must build a SACK since the timer
2663 				 * is pending, we got our first packet OR
2664 				 * there are gaps or duplicates.
2665 				 */
2666 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2667 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2668 			}
2669 		} else {
2670 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2671 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2672 				    stcb->sctp_ep, stcb, NULL);
2673 			}
2674 		}
2675 	}
2676 }
2677 
2678 int
2679 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2680     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2681     struct sctp_nets *net, uint32_t *high_tsn)
2682 {
2683 	struct sctp_chunkhdr *ch, chunk_buf;
2684 	struct sctp_association *asoc;
2685 	int num_chunks = 0;	/* number of control chunks processed */
2686 	int stop_proc = 0;
2687 	int break_flag, last_chunk;
2688 	int abort_flag = 0, was_a_gap;
2689 	struct mbuf *m;
2690 	uint32_t highest_tsn;
2691 	uint16_t chk_length;
2692 
2693 	/* set the rwnd */
2694 	sctp_set_rwnd(stcb, &stcb->asoc);
2695 
2696 	m = *mm;
2697 	SCTP_TCB_LOCK_ASSERT(stcb);
2698 	asoc = &stcb->asoc;
2699 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2700 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2701 	} else {
2702 		highest_tsn = asoc->highest_tsn_inside_map;
2703 	}
2704 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2705 	/*
2706 	 * setup where we got the last DATA packet from for any SACK that
2707 	 * may need to go out. Don't bump the net. This is done ONLY when a
2708 	 * chunk is assigned.
2709 	 */
2710 	asoc->last_data_chunk_from = net;
2711 
2712 	/*-
2713 	 * Now before we proceed we must figure out if this is a wasted
2714 	 * cluster... i.e. it is a small packet sent in and yet the driver
2715 	 * underneath allocated a full cluster for it. If so we must copy it
2716 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2717 	 * with cluster starvation. Note for __Panda__ we don't do this
2718 	 * since it has clusters all the way down to 64 bytes.
2719 	 */
2720 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2721 		/* we only handle mbufs that are singletons.. not chains */
2722 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2723 		if (m) {
2724 			/* ok lets see if we can copy the data up */
2725 			caddr_t *from, *to;
2726 
2727 			/* get the pointers and copy */
2728 			to = mtod(m, caddr_t *);
2729 			from = mtod((*mm), caddr_t *);
2730 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2731 			/* copy the length and free up the old */
2732 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2733 			sctp_m_freem(*mm);
2734 			/* success, back copy */
2735 			*mm = m;
2736 		} else {
2737 			/* We are in trouble in the mbuf world .. yikes */
2738 			m = *mm;
2739 		}
2740 	}
2741 	/* get pointer to the first chunk header */
2742 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2743 	    sizeof(struct sctp_chunkhdr),
2744 	    (uint8_t *)&chunk_buf);
2745 	if (ch == NULL) {
2746 		return (1);
2747 	}
2748 	/*
2749 	 * process all DATA chunks...
2750 	 */
2751 	*high_tsn = asoc->cumulative_tsn;
2752 	break_flag = 0;
2753 	asoc->data_pkts_seen++;
2754 	while (stop_proc == 0) {
2755 		/* validate chunk length */
2756 		chk_length = ntohs(ch->chunk_length);
2757 		if (length - *offset < chk_length) {
2758 			/* all done, mutulated chunk */
2759 			stop_proc = 1;
2760 			continue;
2761 		}
2762 		if ((asoc->idata_supported == 1) &&
2763 		    (ch->chunk_type == SCTP_DATA)) {
2764 			struct mbuf *op_err;
2765 			char msg[SCTP_DIAG_INFO_LEN];
2766 
2767 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2768 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2769 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2770 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2771 			return (2);
2772 		}
2773 		if ((asoc->idata_supported == 0) &&
2774 		    (ch->chunk_type == SCTP_IDATA)) {
2775 			struct mbuf *op_err;
2776 			char msg[SCTP_DIAG_INFO_LEN];
2777 
2778 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2779 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2780 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2781 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2782 			return (2);
2783 		}
2784 		if ((ch->chunk_type == SCTP_DATA) ||
2785 		    (ch->chunk_type == SCTP_IDATA)) {
2786 			uint16_t clen;
2787 
2788 			if (ch->chunk_type == SCTP_DATA) {
2789 				clen = sizeof(struct sctp_data_chunk);
2790 			} else {
2791 				clen = sizeof(struct sctp_idata_chunk);
2792 			}
2793 			if (chk_length < clen) {
2794 				/*
2795 				 * Need to send an abort since we had a
2796 				 * invalid data chunk.
2797 				 */
2798 				struct mbuf *op_err;
2799 				char msg[SCTP_DIAG_INFO_LEN];
2800 
2801 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2802 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2803 				    chk_length);
2804 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2805 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2806 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2807 				return (2);
2808 			}
2809 #ifdef SCTP_AUDITING_ENABLED
2810 			sctp_audit_log(0xB1, 0);
2811 #endif
2812 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2813 				last_chunk = 1;
2814 			} else {
2815 				last_chunk = 0;
2816 			}
2817 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2818 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2819 			    last_chunk, ch->chunk_type)) {
2820 				num_chunks++;
2821 			}
2822 			if (abort_flag)
2823 				return (2);
2824 
2825 			if (break_flag) {
2826 				/*
2827 				 * Set because of out of rwnd space and no
2828 				 * drop rep space left.
2829 				 */
2830 				stop_proc = 1;
2831 				continue;
2832 			}
2833 		} else {
2834 			/* not a data chunk in the data region */
2835 			switch (ch->chunk_type) {
2836 			case SCTP_INITIATION:
2837 			case SCTP_INITIATION_ACK:
2838 			case SCTP_SELECTIVE_ACK:
2839 			case SCTP_NR_SELECTIVE_ACK:
2840 			case SCTP_HEARTBEAT_REQUEST:
2841 			case SCTP_HEARTBEAT_ACK:
2842 			case SCTP_ABORT_ASSOCIATION:
2843 			case SCTP_SHUTDOWN:
2844 			case SCTP_SHUTDOWN_ACK:
2845 			case SCTP_OPERATION_ERROR:
2846 			case SCTP_COOKIE_ECHO:
2847 			case SCTP_COOKIE_ACK:
2848 			case SCTP_ECN_ECHO:
2849 			case SCTP_ECN_CWR:
2850 			case SCTP_SHUTDOWN_COMPLETE:
2851 			case SCTP_AUTHENTICATION:
2852 			case SCTP_ASCONF_ACK:
2853 			case SCTP_PACKET_DROPPED:
2854 			case SCTP_STREAM_RESET:
2855 			case SCTP_FORWARD_CUM_TSN:
2856 			case SCTP_ASCONF:
2857 				{
2858 					/*
2859 					 * Now, what do we do with KNOWN
2860 					 * chunks that are NOT in the right
2861 					 * place?
2862 					 *
2863 					 * For now, I do nothing but ignore
2864 					 * them. We may later want to add
2865 					 * sysctl stuff to switch out and do
2866 					 * either an ABORT() or possibly
2867 					 * process them.
2868 					 */
2869 					struct mbuf *op_err;
2870 					char msg[SCTP_DIAG_INFO_LEN];
2871 
2872 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2873 					    ch->chunk_type);
2874 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2875 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2876 					return (2);
2877 				}
2878 			default:
2879 				/*
2880 				 * Unknown chunk type: use bit rules after
2881 				 * checking length
2882 				 */
2883 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2884 					/*
2885 					 * Need to send an abort since we
2886 					 * had a invalid chunk.
2887 					 */
2888 					struct mbuf *op_err;
2889 					char msg[SCTP_DIAG_INFO_LEN];
2890 
2891 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2892 					    chk_length);
2893 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2894 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2895 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2896 					return (2);
2897 				}
2898 				if (ch->chunk_type & 0x40) {
2899 					/* Add a error report to the queue */
2900 					struct mbuf *op_err;
2901 					struct sctp_gen_error_cause *cause;
2902 
2903 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2904 					    0, M_NOWAIT, 1, MT_DATA);
2905 					if (op_err != NULL) {
2906 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2907 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2908 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2909 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2910 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2911 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2912 							sctp_queue_op_err(stcb, op_err);
2913 						} else {
2914 							sctp_m_freem(op_err);
2915 						}
2916 					}
2917 				}
2918 				if ((ch->chunk_type & 0x80) == 0) {
2919 					/* discard the rest of this packet */
2920 					stop_proc = 1;
2921 				}	/* else skip this bad chunk and
2922 					 * continue... */
2923 				break;
2924 			}	/* switch of chunk type */
2925 		}
2926 		*offset += SCTP_SIZE32(chk_length);
2927 		if ((*offset >= length) || stop_proc) {
2928 			/* no more data left in the mbuf chain */
2929 			stop_proc = 1;
2930 			continue;
2931 		}
2932 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2933 		    sizeof(struct sctp_chunkhdr),
2934 		    (uint8_t *)&chunk_buf);
2935 		if (ch == NULL) {
2936 			*offset = length;
2937 			stop_proc = 1;
2938 			continue;
2939 		}
2940 	}
2941 	if (break_flag) {
2942 		/*
2943 		 * we need to report rwnd overrun drops.
2944 		 */
2945 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2946 	}
2947 	if (num_chunks) {
2948 		/*
2949 		 * Did we get data, if so update the time for auto-close and
2950 		 * give peer credit for being alive.
2951 		 */
2952 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2953 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2954 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2955 			    stcb->asoc.overall_error_count,
2956 			    0,
2957 			    SCTP_FROM_SCTP_INDATA,
2958 			    __LINE__);
2959 		}
2960 		stcb->asoc.overall_error_count = 0;
2961 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2962 	}
2963 	/* now service all of the reassm queue if needed */
2964 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2965 		/* Assure that we ack right away */
2966 		stcb->asoc.send_sack = 1;
2967 	}
2968 	/* Start a sack timer or QUEUE a SACK for sending */
2969 	sctp_sack_check(stcb, was_a_gap);
2970 	return (0);
2971 }
2972 
2973 static int
2974 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2975     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2976     int *num_frs,
2977     uint32_t *biggest_newly_acked_tsn,
2978     uint32_t *this_sack_lowest_newack,
2979     int *rto_ok)
2980 {
2981 	struct sctp_tmit_chunk *tp1;
2982 	unsigned int theTSN;
2983 	int j, wake_him = 0, circled = 0;
2984 
2985 	/* Recover the tp1 we last saw */
2986 	tp1 = *p_tp1;
2987 	if (tp1 == NULL) {
2988 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2989 	}
2990 	for (j = frag_strt; j <= frag_end; j++) {
2991 		theTSN = j + last_tsn;
2992 		while (tp1) {
2993 			if (tp1->rec.data.doing_fast_retransmit)
2994 				(*num_frs) += 1;
2995 
2996 			/*-
2997 			 * CMT: CUCv2 algorithm. For each TSN being
2998 			 * processed from the sent queue, track the
2999 			 * next expected pseudo-cumack, or
3000 			 * rtx_pseudo_cumack, if required. Separate
3001 			 * cumack trackers for first transmissions,
3002 			 * and retransmissions.
3003 			 */
3004 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3005 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
3006 			    (tp1->snd_count == 1)) {
3007 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3008 				tp1->whoTo->find_pseudo_cumack = 0;
3009 			}
3010 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3011 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3012 			    (tp1->snd_count > 1)) {
3013 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3014 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3015 			}
3016 			if (tp1->rec.data.tsn == theTSN) {
3017 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3018 					/*-
3019 					 * must be held until
3020 					 * cum-ack passes
3021 					 */
3022 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3023 						/*-
3024 						 * If it is less than RESEND, it is
3025 						 * now no-longer in flight.
3026 						 * Higher values may already be set
3027 						 * via previous Gap Ack Blocks...
3028 						 * i.e. ACKED or RESEND.
3029 						 */
3030 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3031 						    *biggest_newly_acked_tsn)) {
3032 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3033 						}
3034 						/*-
3035 						 * CMT: SFR algo (and HTNA) - set
3036 						 * saw_newack to 1 for dest being
3037 						 * newly acked. update
3038 						 * this_sack_highest_newack if
3039 						 * appropriate.
3040 						 */
3041 						if (tp1->rec.data.chunk_was_revoked == 0)
3042 							tp1->whoTo->saw_newack = 1;
3043 
3044 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3045 						    tp1->whoTo->this_sack_highest_newack)) {
3046 							tp1->whoTo->this_sack_highest_newack =
3047 							    tp1->rec.data.tsn;
3048 						}
3049 						/*-
3050 						 * CMT DAC algo: also update
3051 						 * this_sack_lowest_newack
3052 						 */
3053 						if (*this_sack_lowest_newack == 0) {
3054 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3055 								sctp_log_sack(*this_sack_lowest_newack,
3056 								    last_tsn,
3057 								    tp1->rec.data.tsn,
3058 								    0,
3059 								    0,
3060 								    SCTP_LOG_TSN_ACKED);
3061 							}
3062 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3063 						}
3064 						/*-
3065 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3066 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3067 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3068 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3069 						 * Separate pseudo_cumack trackers for first transmissions and
3070 						 * retransmissions.
3071 						 */
3072 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3073 							if (tp1->rec.data.chunk_was_revoked == 0) {
3074 								tp1->whoTo->new_pseudo_cumack = 1;
3075 							}
3076 							tp1->whoTo->find_pseudo_cumack = 1;
3077 						}
3078 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3079 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3080 						}
3081 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3082 							if (tp1->rec.data.chunk_was_revoked == 0) {
3083 								tp1->whoTo->new_pseudo_cumack = 1;
3084 							}
3085 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3086 						}
3087 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3088 							sctp_log_sack(*biggest_newly_acked_tsn,
3089 							    last_tsn,
3090 							    tp1->rec.data.tsn,
3091 							    frag_strt,
3092 							    frag_end,
3093 							    SCTP_LOG_TSN_ACKED);
3094 						}
3095 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3096 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3097 							    tp1->whoTo->flight_size,
3098 							    tp1->book_size,
3099 							    (uint32_t)(uintptr_t)tp1->whoTo,
3100 							    tp1->rec.data.tsn);
3101 						}
3102 						sctp_flight_size_decrease(tp1);
3103 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3104 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3105 							    tp1);
3106 						}
3107 						sctp_total_flight_decrease(stcb, tp1);
3108 
3109 						tp1->whoTo->net_ack += tp1->send_size;
3110 						if (tp1->snd_count < 2) {
3111 							/*-
3112 							 * True non-retransmitted chunk
3113 							 */
3114 							tp1->whoTo->net_ack2 += tp1->send_size;
3115 
3116 							/*-
3117 							 * update RTO too ?
3118 							 */
3119 							if (tp1->do_rtt) {
3120 								if (*rto_ok &&
3121 								    sctp_calculate_rto(stcb,
3122 								    &stcb->asoc,
3123 								    tp1->whoTo,
3124 								    &tp1->sent_rcv_time,
3125 								    SCTP_RTT_FROM_DATA)) {
3126 									*rto_ok = 0;
3127 								}
3128 								if (tp1->whoTo->rto_needed == 0) {
3129 									tp1->whoTo->rto_needed = 1;
3130 								}
3131 								tp1->do_rtt = 0;
3132 							}
3133 						}
3134 
3135 					}
3136 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3137 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3138 						    stcb->asoc.this_sack_highest_gap)) {
3139 							stcb->asoc.this_sack_highest_gap =
3140 							    tp1->rec.data.tsn;
3141 						}
3142 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3143 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3144 #ifdef SCTP_AUDITING_ENABLED
3145 							sctp_audit_log(0xB2,
3146 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3147 #endif
3148 						}
3149 					}
3150 					/*-
3151 					 * All chunks NOT UNSENT fall through here and are marked
3152 					 * (leave PR-SCTP ones that are to skip alone though)
3153 					 */
3154 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3155 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3156 						tp1->sent = SCTP_DATAGRAM_MARKED;
3157 					}
3158 					if (tp1->rec.data.chunk_was_revoked) {
3159 						/* deflate the cwnd */
3160 						tp1->whoTo->cwnd -= tp1->book_size;
3161 						tp1->rec.data.chunk_was_revoked = 0;
3162 					}
3163 					/* NR Sack code here */
3164 					if (nr_sacking &&
3165 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3166 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3167 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3168 #ifdef INVARIANTS
3169 						} else {
3170 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3171 #endif
3172 						}
3173 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3174 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3175 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3176 							stcb->asoc.trigger_reset = 1;
3177 						}
3178 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3179 						if (tp1->data) {
3180 							/*
3181 							 * sa_ignore
3182 							 * NO_NULL_CHK
3183 							 */
3184 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3185 							sctp_m_freem(tp1->data);
3186 							tp1->data = NULL;
3187 						}
3188 						wake_him++;
3189 					}
3190 				}
3191 				break;
3192 			}	/* if (tp1->tsn == theTSN) */
3193 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3194 				break;
3195 			}
3196 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3197 			if ((tp1 == NULL) && (circled == 0)) {
3198 				circled++;
3199 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3200 			}
3201 		}		/* end while (tp1) */
3202 		if (tp1 == NULL) {
3203 			circled = 0;
3204 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3205 		}
3206 		/* In case the fragments were not in order we must reset */
3207 	}			/* end for (j = fragStart */
3208 	*p_tp1 = tp1;
3209 	return (wake_him);	/* Return value only used for nr-sack */
3210 }
3211 
3212 
3213 static int
3214 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3215     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3216     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3217     int num_seg, int num_nr_seg, int *rto_ok)
3218 {
3219 	struct sctp_gap_ack_block *frag, block;
3220 	struct sctp_tmit_chunk *tp1;
3221 	int i;
3222 	int num_frs = 0;
3223 	int chunk_freed;
3224 	int non_revocable;
3225 	uint16_t frag_strt, frag_end, prev_frag_end;
3226 
3227 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3228 	prev_frag_end = 0;
3229 	chunk_freed = 0;
3230 
3231 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3232 		if (i == num_seg) {
3233 			prev_frag_end = 0;
3234 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 		}
3236 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3237 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3238 		*offset += sizeof(block);
3239 		if (frag == NULL) {
3240 			return (chunk_freed);
3241 		}
3242 		frag_strt = ntohs(frag->start);
3243 		frag_end = ntohs(frag->end);
3244 
3245 		if (frag_strt > frag_end) {
3246 			/* This gap report is malformed, skip it. */
3247 			continue;
3248 		}
3249 		if (frag_strt <= prev_frag_end) {
3250 			/* This gap report is not in order, so restart. */
3251 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3252 		}
3253 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3254 			*biggest_tsn_acked = last_tsn + frag_end;
3255 		}
3256 		if (i < num_seg) {
3257 			non_revocable = 0;
3258 		} else {
3259 			non_revocable = 1;
3260 		}
3261 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3262 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3263 		    this_sack_lowest_newack, rto_ok)) {
3264 			chunk_freed = 1;
3265 		}
3266 		prev_frag_end = frag_end;
3267 	}
3268 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3269 		if (num_frs)
3270 			sctp_log_fr(*biggest_tsn_acked,
3271 			    *biggest_newly_acked_tsn,
3272 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3273 	}
3274 	return (chunk_freed);
3275 }
3276 
3277 static void
3278 sctp_check_for_revoked(struct sctp_tcb *stcb,
3279     struct sctp_association *asoc, uint32_t cumack,
3280     uint32_t biggest_tsn_acked)
3281 {
3282 	struct sctp_tmit_chunk *tp1;
3283 
3284 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3285 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3286 			/*
3287 			 * ok this guy is either ACK or MARKED. If it is
3288 			 * ACKED it has been previously acked but not this
3289 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3290 			 * again.
3291 			 */
3292 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3293 				break;
3294 			}
3295 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3296 				/* it has been revoked */
3297 				tp1->sent = SCTP_DATAGRAM_SENT;
3298 				tp1->rec.data.chunk_was_revoked = 1;
3299 				/*
3300 				 * We must add this stuff back in to assure
3301 				 * timers and such get started.
3302 				 */
3303 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3304 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3305 					    tp1->whoTo->flight_size,
3306 					    tp1->book_size,
3307 					    (uint32_t)(uintptr_t)tp1->whoTo,
3308 					    tp1->rec.data.tsn);
3309 				}
3310 				sctp_flight_size_increase(tp1);
3311 				sctp_total_flight_increase(stcb, tp1);
3312 				/*
3313 				 * We inflate the cwnd to compensate for our
3314 				 * artificial inflation of the flight_size.
3315 				 */
3316 				tp1->whoTo->cwnd += tp1->book_size;
3317 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3318 					sctp_log_sack(asoc->last_acked_seq,
3319 					    cumack,
3320 					    tp1->rec.data.tsn,
3321 					    0,
3322 					    0,
3323 					    SCTP_LOG_TSN_REVOKED);
3324 				}
3325 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3326 				/* it has been re-acked in this SACK */
3327 				tp1->sent = SCTP_DATAGRAM_ACKED;
3328 			}
3329 		}
3330 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3331 			break;
3332 	}
3333 }
3334 
3335 
3336 static void
3337 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3338     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3339 {
3340 	struct sctp_tmit_chunk *tp1;
3341 	int strike_flag = 0;
3342 	struct timeval now;
3343 	int tot_retrans = 0;
3344 	uint32_t sending_seq;
3345 	struct sctp_nets *net;
3346 	int num_dests_sacked = 0;
3347 
3348 	/*
3349 	 * select the sending_seq, this is either the next thing ready to be
3350 	 * sent but not transmitted, OR, the next seq we assign.
3351 	 */
3352 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3353 	if (tp1 == NULL) {
3354 		sending_seq = asoc->sending_seq;
3355 	} else {
3356 		sending_seq = tp1->rec.data.tsn;
3357 	}
3358 
3359 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3360 	if ((asoc->sctp_cmt_on_off > 0) &&
3361 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3362 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3363 			if (net->saw_newack)
3364 				num_dests_sacked++;
3365 		}
3366 	}
3367 	if (stcb->asoc.prsctp_supported) {
3368 		(void)SCTP_GETTIME_TIMEVAL(&now);
3369 	}
3370 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3371 		strike_flag = 0;
3372 		if (tp1->no_fr_allowed) {
3373 			/* this one had a timeout or something */
3374 			continue;
3375 		}
3376 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3377 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3378 				sctp_log_fr(biggest_tsn_newly_acked,
3379 				    tp1->rec.data.tsn,
3380 				    tp1->sent,
3381 				    SCTP_FR_LOG_CHECK_STRIKE);
3382 		}
3383 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3384 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3385 			/* done */
3386 			break;
3387 		}
3388 		if (stcb->asoc.prsctp_supported) {
3389 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3390 				/* Is it expired? */
3391 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3392 					/* Yes so drop it */
3393 					if (tp1->data != NULL) {
3394 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3395 						    SCTP_SO_NOT_LOCKED);
3396 					}
3397 					continue;
3398 				}
3399 			}
3400 
3401 		}
3402 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3403 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3404 			/* we are beyond the tsn in the sack  */
3405 			break;
3406 		}
3407 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3408 			/* either a RESEND, ACKED, or MARKED */
3409 			/* skip */
3410 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3411 				/* Continue strikin FWD-TSN chunks */
3412 				tp1->rec.data.fwd_tsn_cnt++;
3413 			}
3414 			continue;
3415 		}
3416 		/*
3417 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3418 		 */
3419 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3420 			/*
3421 			 * No new acks were receieved for data sent to this
3422 			 * dest. Therefore, according to the SFR algo for
3423 			 * CMT, no data sent to this dest can be marked for
3424 			 * FR using this SACK.
3425 			 */
3426 			continue;
3427 		} else if (tp1->whoTo &&
3428 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3429 			    tp1->whoTo->this_sack_highest_newack) &&
3430 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3431 			/*
3432 			 * CMT: New acks were receieved for data sent to
3433 			 * this dest. But no new acks were seen for data
3434 			 * sent after tp1. Therefore, according to the SFR
3435 			 * algo for CMT, tp1 cannot be marked for FR using
3436 			 * this SACK. This step covers part of the DAC algo
3437 			 * and the HTNA algo as well.
3438 			 */
3439 			continue;
3440 		}
3441 		/*
3442 		 * Here we check to see if we were have already done a FR
3443 		 * and if so we see if the biggest TSN we saw in the sack is
3444 		 * smaller than the recovery point. If so we don't strike
3445 		 * the tsn... otherwise we CAN strike the TSN.
3446 		 */
3447 		/*
3448 		 * @@@ JRI: Check for CMT if (accum_moved &&
3449 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3450 		 * 0)) {
3451 		 */
3452 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3453 			/*
3454 			 * Strike the TSN if in fast-recovery and cum-ack
3455 			 * moved.
3456 			 */
3457 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3458 				sctp_log_fr(biggest_tsn_newly_acked,
3459 				    tp1->rec.data.tsn,
3460 				    tp1->sent,
3461 				    SCTP_FR_LOG_STRIKE_CHUNK);
3462 			}
3463 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3464 				tp1->sent++;
3465 			}
3466 			if ((asoc->sctp_cmt_on_off > 0) &&
3467 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3468 				/*
3469 				 * CMT DAC algorithm: If SACK flag is set to
3470 				 * 0, then lowest_newack test will not pass
3471 				 * because it would have been set to the
3472 				 * cumack earlier. If not already to be
3473 				 * rtx'd, If not a mixed sack and if tp1 is
3474 				 * not between two sacked TSNs, then mark by
3475 				 * one more. NOTE that we are marking by one
3476 				 * additional time since the SACK DAC flag
3477 				 * indicates that two packets have been
3478 				 * received after this missing TSN.
3479 				 */
3480 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3481 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3482 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3483 						sctp_log_fr(16 + num_dests_sacked,
3484 						    tp1->rec.data.tsn,
3485 						    tp1->sent,
3486 						    SCTP_FR_LOG_STRIKE_CHUNK);
3487 					}
3488 					tp1->sent++;
3489 				}
3490 			}
3491 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3492 		    (asoc->sctp_cmt_on_off == 0)) {
3493 			/*
3494 			 * For those that have done a FR we must take
3495 			 * special consideration if we strike. I.e the
3496 			 * biggest_newly_acked must be higher than the
3497 			 * sending_seq at the time we did the FR.
3498 			 */
3499 			if (
3500 #ifdef SCTP_FR_TO_ALTERNATE
3501 			/*
3502 			 * If FR's go to new networks, then we must only do
3503 			 * this for singly homed asoc's. However if the FR's
3504 			 * go to the same network (Armando's work) then its
3505 			 * ok to FR multiple times.
3506 			 */
3507 			    (asoc->numnets < 2)
3508 #else
3509 			    (1)
3510 #endif
3511 			    ) {
3512 
3513 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3514 				    tp1->rec.data.fast_retran_tsn)) {
3515 					/*
3516 					 * Strike the TSN, since this ack is
3517 					 * beyond where things were when we
3518 					 * did a FR.
3519 					 */
3520 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3521 						sctp_log_fr(biggest_tsn_newly_acked,
3522 						    tp1->rec.data.tsn,
3523 						    tp1->sent,
3524 						    SCTP_FR_LOG_STRIKE_CHUNK);
3525 					}
3526 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3527 						tp1->sent++;
3528 					}
3529 					strike_flag = 1;
3530 					if ((asoc->sctp_cmt_on_off > 0) &&
3531 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3532 						/*
3533 						 * CMT DAC algorithm: If
3534 						 * SACK flag is set to 0,
3535 						 * then lowest_newack test
3536 						 * will not pass because it
3537 						 * would have been set to
3538 						 * the cumack earlier. If
3539 						 * not already to be rtx'd,
3540 						 * If not a mixed sack and
3541 						 * if tp1 is not between two
3542 						 * sacked TSNs, then mark by
3543 						 * one more. NOTE that we
3544 						 * are marking by one
3545 						 * additional time since the
3546 						 * SACK DAC flag indicates
3547 						 * that two packets have
3548 						 * been received after this
3549 						 * missing TSN.
3550 						 */
3551 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3552 						    (num_dests_sacked == 1) &&
3553 						    SCTP_TSN_GT(this_sack_lowest_newack,
3554 						    tp1->rec.data.tsn)) {
3555 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3556 								sctp_log_fr(32 + num_dests_sacked,
3557 								    tp1->rec.data.tsn,
3558 								    tp1->sent,
3559 								    SCTP_FR_LOG_STRIKE_CHUNK);
3560 							}
3561 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3562 								tp1->sent++;
3563 							}
3564 						}
3565 					}
3566 				}
3567 			}
3568 			/*
3569 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3570 			 * algo covers HTNA.
3571 			 */
3572 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3573 		    biggest_tsn_newly_acked)) {
3574 			/*
3575 			 * We don't strike these: This is the  HTNA
3576 			 * algorithm i.e. we don't strike If our TSN is
3577 			 * larger than the Highest TSN Newly Acked.
3578 			 */
3579 			;
3580 		} else {
3581 			/* Strike the TSN */
3582 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3583 				sctp_log_fr(biggest_tsn_newly_acked,
3584 				    tp1->rec.data.tsn,
3585 				    tp1->sent,
3586 				    SCTP_FR_LOG_STRIKE_CHUNK);
3587 			}
3588 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3589 				tp1->sent++;
3590 			}
3591 			if ((asoc->sctp_cmt_on_off > 0) &&
3592 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3593 				/*
3594 				 * CMT DAC algorithm: If SACK flag is set to
3595 				 * 0, then lowest_newack test will not pass
3596 				 * because it would have been set to the
3597 				 * cumack earlier. If not already to be
3598 				 * rtx'd, If not a mixed sack and if tp1 is
3599 				 * not between two sacked TSNs, then mark by
3600 				 * one more. NOTE that we are marking by one
3601 				 * additional time since the SACK DAC flag
3602 				 * indicates that two packets have been
3603 				 * received after this missing TSN.
3604 				 */
3605 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3606 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3607 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3608 						sctp_log_fr(48 + num_dests_sacked,
3609 						    tp1->rec.data.tsn,
3610 						    tp1->sent,
3611 						    SCTP_FR_LOG_STRIKE_CHUNK);
3612 					}
3613 					tp1->sent++;
3614 				}
3615 			}
3616 		}
3617 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3618 			struct sctp_nets *alt;
3619 
3620 			/* fix counts and things */
3621 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3622 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3623 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3624 				    tp1->book_size,
3625 				    (uint32_t)(uintptr_t)tp1->whoTo,
3626 				    tp1->rec.data.tsn);
3627 			}
3628 			if (tp1->whoTo) {
3629 				tp1->whoTo->net_ack++;
3630 				sctp_flight_size_decrease(tp1);
3631 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3632 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3633 					    tp1);
3634 				}
3635 			}
3636 
3637 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3638 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3639 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3640 			}
3641 			/* add back to the rwnd */
3642 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3643 
3644 			/* remove from the total flight */
3645 			sctp_total_flight_decrease(stcb, tp1);
3646 
3647 			if ((stcb->asoc.prsctp_supported) &&
3648 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3649 				/*
3650 				 * Has it been retransmitted tv_sec times? -
3651 				 * we store the retran count there.
3652 				 */
3653 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3654 					/* Yes, so drop it */
3655 					if (tp1->data != NULL) {
3656 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3657 						    SCTP_SO_NOT_LOCKED);
3658 					}
3659 					/* Make sure to flag we had a FR */
3660 					if (tp1->whoTo != NULL) {
3661 						tp1->whoTo->net_ack++;
3662 					}
3663 					continue;
3664 				}
3665 			}
3666 			/*
3667 			 * SCTP_PRINTF("OK, we are now ready to FR this
3668 			 * guy\n");
3669 			 */
3670 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3671 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3672 				    0, SCTP_FR_MARKED);
3673 			}
3674 			if (strike_flag) {
3675 				/* This is a subsequent FR */
3676 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3677 			}
3678 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3679 			if (asoc->sctp_cmt_on_off > 0) {
3680 				/*
3681 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3682 				 * If CMT is being used, then pick dest with
3683 				 * largest ssthresh for any retransmission.
3684 				 */
3685 				tp1->no_fr_allowed = 1;
3686 				alt = tp1->whoTo;
3687 				/* sa_ignore NO_NULL_CHK */
3688 				if (asoc->sctp_cmt_pf > 0) {
3689 					/*
3690 					 * JRS 5/18/07 - If CMT PF is on,
3691 					 * use the PF version of
3692 					 * find_alt_net()
3693 					 */
3694 					alt = sctp_find_alternate_net(stcb, alt, 2);
3695 				} else {
3696 					/*
3697 					 * JRS 5/18/07 - If only CMT is on,
3698 					 * use the CMT version of
3699 					 * find_alt_net()
3700 					 */
3701 					/* sa_ignore NO_NULL_CHK */
3702 					alt = sctp_find_alternate_net(stcb, alt, 1);
3703 				}
3704 				if (alt == NULL) {
3705 					alt = tp1->whoTo;
3706 				}
3707 				/*
3708 				 * CUCv2: If a different dest is picked for
3709 				 * the retransmission, then new
3710 				 * (rtx-)pseudo_cumack needs to be tracked
3711 				 * for orig dest. Let CUCv2 track new (rtx-)
3712 				 * pseudo-cumack always.
3713 				 */
3714 				if (tp1->whoTo) {
3715 					tp1->whoTo->find_pseudo_cumack = 1;
3716 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3717 				}
3718 
3719 			} else {	/* CMT is OFF */
3720 
3721 #ifdef SCTP_FR_TO_ALTERNATE
3722 				/* Can we find an alternate? */
3723 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3724 #else
3725 				/*
3726 				 * default behavior is to NOT retransmit
3727 				 * FR's to an alternate. Armando Caro's
3728 				 * paper details why.
3729 				 */
3730 				alt = tp1->whoTo;
3731 #endif
3732 			}
3733 
3734 			tp1->rec.data.doing_fast_retransmit = 1;
3735 			tot_retrans++;
3736 			/* mark the sending seq for possible subsequent FR's */
3737 			/*
3738 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3739 			 * (uint32_t)tpi->rec.data.tsn);
3740 			 */
3741 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3742 				/*
3743 				 * If the queue of send is empty then its
3744 				 * the next sequence number that will be
3745 				 * assigned so we subtract one from this to
3746 				 * get the one we last sent.
3747 				 */
3748 				tp1->rec.data.fast_retran_tsn = sending_seq;
3749 			} else {
3750 				/*
3751 				 * If there are chunks on the send queue
3752 				 * (unsent data that has made it from the
3753 				 * stream queues but not out the door, we
3754 				 * take the first one (which will have the
3755 				 * lowest TSN) and subtract one to get the
3756 				 * one we last sent.
3757 				 */
3758 				struct sctp_tmit_chunk *ttt;
3759 
3760 				ttt = TAILQ_FIRST(&asoc->send_queue);
3761 				tp1->rec.data.fast_retran_tsn =
3762 				    ttt->rec.data.tsn;
3763 			}
3764 
3765 			if (tp1->do_rtt) {
3766 				/*
3767 				 * this guy had a RTO calculation pending on
3768 				 * it, cancel it
3769 				 */
3770 				if ((tp1->whoTo != NULL) &&
3771 				    (tp1->whoTo->rto_needed == 0)) {
3772 					tp1->whoTo->rto_needed = 1;
3773 				}
3774 				tp1->do_rtt = 0;
3775 			}
3776 			if (alt != tp1->whoTo) {
3777 				/* yes, there is an alternate. */
3778 				sctp_free_remote_addr(tp1->whoTo);
3779 				/* sa_ignore FREED_MEMORY */
3780 				tp1->whoTo = alt;
3781 				atomic_add_int(&alt->ref_count, 1);
3782 			}
3783 		}
3784 	}
3785 }
3786 
3787 struct sctp_tmit_chunk *
3788 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3789     struct sctp_association *asoc)
3790 {
3791 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3792 	struct timeval now;
3793 	int now_filled = 0;
3794 
3795 	if (asoc->prsctp_supported == 0) {
3796 		return (NULL);
3797 	}
3798 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3799 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3800 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3801 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3802 			/* no chance to advance, out of here */
3803 			break;
3804 		}
3805 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3806 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3807 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3808 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3809 				    asoc->advanced_peer_ack_point,
3810 				    tp1->rec.data.tsn, 0, 0);
3811 			}
3812 		}
3813 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3814 			/*
3815 			 * We can't fwd-tsn past any that are reliable aka
3816 			 * retransmitted until the asoc fails.
3817 			 */
3818 			break;
3819 		}
3820 		if (!now_filled) {
3821 			(void)SCTP_GETTIME_TIMEVAL(&now);
3822 			now_filled = 1;
3823 		}
3824 		/*
3825 		 * now we got a chunk which is marked for another
3826 		 * retransmission to a PR-stream but has run out its chances
3827 		 * already maybe OR has been marked to skip now. Can we skip
3828 		 * it if its a resend?
3829 		 */
3830 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3831 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3832 			/*
3833 			 * Now is this one marked for resend and its time is
3834 			 * now up?
3835 			 */
3836 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3837 				/* Yes so drop it */
3838 				if (tp1->data) {
3839 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3840 					    1, SCTP_SO_NOT_LOCKED);
3841 				}
3842 			} else {
3843 				/*
3844 				 * No, we are done when hit one for resend
3845 				 * whos time as not expired.
3846 				 */
3847 				break;
3848 			}
3849 		}
3850 		/*
3851 		 * Ok now if this chunk is marked to drop it we can clean up
3852 		 * the chunk, advance our peer ack point and we can check
3853 		 * the next chunk.
3854 		 */
3855 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3856 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3857 			/* advance PeerAckPoint goes forward */
3858 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3859 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3860 				a_adv = tp1;
3861 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3862 				/* No update but we do save the chk */
3863 				a_adv = tp1;
3864 			}
3865 		} else {
3866 			/*
3867 			 * If it is still in RESEND we can advance no
3868 			 * further
3869 			 */
3870 			break;
3871 		}
3872 	}
3873 	return (a_adv);
3874 }
3875 
3876 static int
3877 sctp_fs_audit(struct sctp_association *asoc)
3878 {
3879 	struct sctp_tmit_chunk *chk;
3880 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3881 	int ret;
3882 #ifndef INVARIANTS
3883 	int entry_flight, entry_cnt;
3884 #endif
3885 
3886 	ret = 0;
3887 #ifndef INVARIANTS
3888 	entry_flight = asoc->total_flight;
3889 	entry_cnt = asoc->total_flight_count;
3890 #endif
3891 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3892 		return (0);
3893 
3894 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3895 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3896 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3897 			    chk->rec.data.tsn,
3898 			    chk->send_size,
3899 			    chk->snd_count);
3900 			inflight++;
3901 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3902 			resend++;
3903 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3904 			inbetween++;
3905 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3906 			above++;
3907 		} else {
3908 			acked++;
3909 		}
3910 	}
3911 
3912 	if ((inflight > 0) || (inbetween > 0)) {
3913 #ifdef INVARIANTS
3914 		panic("Flight size-express incorrect? \n");
3915 #else
3916 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3917 		    entry_flight, entry_cnt);
3918 
3919 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3920 		    inflight, inbetween, resend, above, acked);
3921 		ret = 1;
3922 #endif
3923 	}
3924 	return (ret);
3925 }
3926 
3927 
3928 static void
3929 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3930     struct sctp_association *asoc,
3931     struct sctp_tmit_chunk *tp1)
3932 {
3933 	tp1->window_probe = 0;
3934 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3935 		/* TSN's skipped we do NOT move back. */
3936 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3937 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3938 		    tp1->book_size,
3939 		    (uint32_t)(uintptr_t)tp1->whoTo,
3940 		    tp1->rec.data.tsn);
3941 		return;
3942 	}
3943 	/* First setup this by shrinking flight */
3944 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3945 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3946 		    tp1);
3947 	}
3948 	sctp_flight_size_decrease(tp1);
3949 	sctp_total_flight_decrease(stcb, tp1);
3950 	/* Now mark for resend */
3951 	tp1->sent = SCTP_DATAGRAM_RESEND;
3952 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3953 
3954 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3955 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3956 		    tp1->whoTo->flight_size,
3957 		    tp1->book_size,
3958 		    (uint32_t)(uintptr_t)tp1->whoTo,
3959 		    tp1->rec.data.tsn);
3960 	}
3961 }
3962 
3963 void
3964 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3965     uint32_t rwnd, int *abort_now, int ecne_seen)
3966 {
3967 	struct sctp_nets *net;
3968 	struct sctp_association *asoc;
3969 	struct sctp_tmit_chunk *tp1, *tp2;
3970 	uint32_t old_rwnd;
3971 	int win_probe_recovery = 0;
3972 	int win_probe_recovered = 0;
3973 	int j, done_once = 0;
3974 	int rto_ok = 1;
3975 	uint32_t send_s;
3976 
3977 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3978 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3979 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3980 	}
3981 	SCTP_TCB_LOCK_ASSERT(stcb);
3982 #ifdef SCTP_ASOCLOG_OF_TSNS
3983 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3984 	stcb->asoc.cumack_log_at++;
3985 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3986 		stcb->asoc.cumack_log_at = 0;
3987 	}
3988 #endif
3989 	asoc = &stcb->asoc;
3990 	old_rwnd = asoc->peers_rwnd;
3991 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3992 		/* old ack */
3993 		return;
3994 	} else if (asoc->last_acked_seq == cumack) {
3995 		/* Window update sack */
3996 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3997 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3998 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3999 			/* SWS sender side engages */
4000 			asoc->peers_rwnd = 0;
4001 		}
4002 		if (asoc->peers_rwnd > old_rwnd) {
4003 			goto again;
4004 		}
4005 		return;
4006 	}
4007 
4008 	/* First setup for CC stuff */
4009 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4010 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4011 			/* Drag along the window_tsn for cwr's */
4012 			net->cwr_window_tsn = cumack;
4013 		}
4014 		net->prev_cwnd = net->cwnd;
4015 		net->net_ack = 0;
4016 		net->net_ack2 = 0;
4017 
4018 		/*
4019 		 * CMT: Reset CUC and Fast recovery algo variables before
4020 		 * SACK processing
4021 		 */
4022 		net->new_pseudo_cumack = 0;
4023 		net->will_exit_fast_recovery = 0;
4024 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4025 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4026 		}
4027 	}
4028 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4029 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4030 		    sctpchunk_listhead);
4031 		send_s = tp1->rec.data.tsn + 1;
4032 	} else {
4033 		send_s = asoc->sending_seq;
4034 	}
4035 	if (SCTP_TSN_GE(cumack, send_s)) {
4036 		struct mbuf *op_err;
4037 		char msg[SCTP_DIAG_INFO_LEN];
4038 
4039 		*abort_now = 1;
4040 		/* XXX */
4041 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4042 		    cumack, send_s);
4043 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4044 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4045 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4046 		return;
4047 	}
4048 	asoc->this_sack_highest_gap = cumack;
4049 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4050 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4051 		    stcb->asoc.overall_error_count,
4052 		    0,
4053 		    SCTP_FROM_SCTP_INDATA,
4054 		    __LINE__);
4055 	}
4056 	stcb->asoc.overall_error_count = 0;
4057 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4058 		/* process the new consecutive TSN first */
4059 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4060 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4061 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4062 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4063 				}
4064 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4065 					/*
4066 					 * If it is less than ACKED, it is
4067 					 * now no-longer in flight. Higher
4068 					 * values may occur during marking
4069 					 */
4070 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4071 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4072 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4073 							    tp1->whoTo->flight_size,
4074 							    tp1->book_size,
4075 							    (uint32_t)(uintptr_t)tp1->whoTo,
4076 							    tp1->rec.data.tsn);
4077 						}
4078 						sctp_flight_size_decrease(tp1);
4079 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4080 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4081 							    tp1);
4082 						}
4083 						/* sa_ignore NO_NULL_CHK */
4084 						sctp_total_flight_decrease(stcb, tp1);
4085 					}
4086 					tp1->whoTo->net_ack += tp1->send_size;
4087 					if (tp1->snd_count < 2) {
4088 						/*
4089 						 * True non-retransmitted
4090 						 * chunk
4091 						 */
4092 						tp1->whoTo->net_ack2 +=
4093 						    tp1->send_size;
4094 
4095 						/* update RTO too? */
4096 						if (tp1->do_rtt) {
4097 							if (rto_ok &&
4098 							    sctp_calculate_rto(stcb,
4099 							    &stcb->asoc,
4100 							    tp1->whoTo,
4101 							    &tp1->sent_rcv_time,
4102 							    SCTP_RTT_FROM_DATA)) {
4103 								rto_ok = 0;
4104 							}
4105 							if (tp1->whoTo->rto_needed == 0) {
4106 								tp1->whoTo->rto_needed = 1;
4107 							}
4108 							tp1->do_rtt = 0;
4109 						}
4110 					}
4111 					/*
4112 					 * CMT: CUCv2 algorithm. From the
4113 					 * cumack'd TSNs, for each TSN being
4114 					 * acked for the first time, set the
4115 					 * following variables for the
4116 					 * corresp destination.
4117 					 * new_pseudo_cumack will trigger a
4118 					 * cwnd update.
4119 					 * find_(rtx_)pseudo_cumack will
4120 					 * trigger search for the next
4121 					 * expected (rtx-)pseudo-cumack.
4122 					 */
4123 					tp1->whoTo->new_pseudo_cumack = 1;
4124 					tp1->whoTo->find_pseudo_cumack = 1;
4125 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4126 
4127 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4128 						/* sa_ignore NO_NULL_CHK */
4129 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4130 					}
4131 				}
4132 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4133 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4134 				}
4135 				if (tp1->rec.data.chunk_was_revoked) {
4136 					/* deflate the cwnd */
4137 					tp1->whoTo->cwnd -= tp1->book_size;
4138 					tp1->rec.data.chunk_was_revoked = 0;
4139 				}
4140 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4141 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4142 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4143 #ifdef INVARIANTS
4144 					} else {
4145 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4146 #endif
4147 					}
4148 				}
4149 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4150 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4151 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4152 					asoc->trigger_reset = 1;
4153 				}
4154 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4155 				if (tp1->data) {
4156 					/* sa_ignore NO_NULL_CHK */
4157 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4158 					sctp_m_freem(tp1->data);
4159 					tp1->data = NULL;
4160 				}
4161 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4162 					sctp_log_sack(asoc->last_acked_seq,
4163 					    cumack,
4164 					    tp1->rec.data.tsn,
4165 					    0,
4166 					    0,
4167 					    SCTP_LOG_FREE_SENT);
4168 				}
4169 				asoc->sent_queue_cnt--;
4170 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4171 			} else {
4172 				break;
4173 			}
4174 		}
4175 
4176 	}
4177 	/* sa_ignore NO_NULL_CHK */
4178 	if (stcb->sctp_socket) {
4179 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4180 		struct socket *so;
4181 
4182 #endif
4183 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4184 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4185 			/* sa_ignore NO_NULL_CHK */
4186 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4187 		}
4188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4189 		so = SCTP_INP_SO(stcb->sctp_ep);
4190 		atomic_add_int(&stcb->asoc.refcnt, 1);
4191 		SCTP_TCB_UNLOCK(stcb);
4192 		SCTP_SOCKET_LOCK(so, 1);
4193 		SCTP_TCB_LOCK(stcb);
4194 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4195 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4196 			/* assoc was freed while we were unlocked */
4197 			SCTP_SOCKET_UNLOCK(so, 1);
4198 			return;
4199 		}
4200 #endif
4201 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4202 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4203 		SCTP_SOCKET_UNLOCK(so, 1);
4204 #endif
4205 	} else {
4206 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4207 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4208 		}
4209 	}
4210 
4211 	/* JRS - Use the congestion control given in the CC module */
4212 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4213 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 			if (net->net_ack2 > 0) {
4215 				/*
4216 				 * Karn's rule applies to clearing error
4217 				 * count, this is optional.
4218 				 */
4219 				net->error_count = 0;
4220 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4221 					/* addr came good */
4222 					net->dest_state |= SCTP_ADDR_REACHABLE;
4223 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4224 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4225 				}
4226 				if (net == stcb->asoc.primary_destination) {
4227 					if (stcb->asoc.alternate) {
4228 						/*
4229 						 * release the alternate,
4230 						 * primary is good
4231 						 */
4232 						sctp_free_remote_addr(stcb->asoc.alternate);
4233 						stcb->asoc.alternate = NULL;
4234 					}
4235 				}
4236 				if (net->dest_state & SCTP_ADDR_PF) {
4237 					net->dest_state &= ~SCTP_ADDR_PF;
4238 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4239 					    stcb->sctp_ep, stcb, net,
4240 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4241 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4242 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4243 					/* Done with this net */
4244 					net->net_ack = 0;
4245 				}
4246 				/* restore any doubled timers */
4247 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4248 				if (net->RTO < stcb->asoc.minrto) {
4249 					net->RTO = stcb->asoc.minrto;
4250 				}
4251 				if (net->RTO > stcb->asoc.maxrto) {
4252 					net->RTO = stcb->asoc.maxrto;
4253 				}
4254 			}
4255 		}
4256 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4257 	}
4258 	asoc->last_acked_seq = cumack;
4259 
4260 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4261 		/* nothing left in-flight */
4262 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4263 			net->flight_size = 0;
4264 			net->partial_bytes_acked = 0;
4265 		}
4266 		asoc->total_flight = 0;
4267 		asoc->total_flight_count = 0;
4268 	}
4269 
4270 	/* RWND update */
4271 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4272 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4273 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4274 		/* SWS sender side engages */
4275 		asoc->peers_rwnd = 0;
4276 	}
4277 	if (asoc->peers_rwnd > old_rwnd) {
4278 		win_probe_recovery = 1;
4279 	}
4280 	/* Now assure a timer where data is queued at */
4281 again:
4282 	j = 0;
4283 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284 		if (win_probe_recovery && (net->window_probe)) {
4285 			win_probe_recovered = 1;
4286 			/*
4287 			 * Find first chunk that was used with window probe
4288 			 * and clear the sent
4289 			 */
4290 			/* sa_ignore FREED_MEMORY */
4291 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4292 				if (tp1->window_probe) {
4293 					/* move back to data send queue */
4294 					sctp_window_probe_recovery(stcb, asoc, tp1);
4295 					break;
4296 				}
4297 			}
4298 		}
4299 		if (net->flight_size) {
4300 			j++;
4301 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4302 			if (net->window_probe) {
4303 				net->window_probe = 0;
4304 			}
4305 		} else {
4306 			if (net->window_probe) {
4307 				/*
4308 				 * In window probes we must assure a timer
4309 				 * is still running there
4310 				 */
4311 				net->window_probe = 0;
4312 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4313 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4314 				}
4315 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4316 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4317 				    stcb, net,
4318 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4319 			}
4320 		}
4321 	}
4322 	if ((j == 0) &&
4323 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4324 	    (asoc->sent_queue_retran_cnt == 0) &&
4325 	    (win_probe_recovered == 0) &&
4326 	    (done_once == 0)) {
4327 		/*
4328 		 * huh, this should not happen unless all packets are
4329 		 * PR-SCTP and marked to skip of course.
4330 		 */
4331 		if (sctp_fs_audit(asoc)) {
4332 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4333 				net->flight_size = 0;
4334 			}
4335 			asoc->total_flight = 0;
4336 			asoc->total_flight_count = 0;
4337 			asoc->sent_queue_retran_cnt = 0;
4338 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4339 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4340 					sctp_flight_size_increase(tp1);
4341 					sctp_total_flight_increase(stcb, tp1);
4342 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4343 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4344 				}
4345 			}
4346 		}
4347 		done_once = 1;
4348 		goto again;
4349 	}
4350 	/**********************************/
4351 	/* Now what about shutdown issues */
4352 	/**********************************/
4353 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4354 		/* nothing left on sendqueue.. consider done */
4355 		/* clean up */
4356 		if ((asoc->stream_queue_cnt == 1) &&
4357 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4358 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4359 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4360 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4361 		}
4362 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4363 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4364 		    (asoc->stream_queue_cnt == 1) &&
4365 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4366 			struct mbuf *op_err;
4367 
4368 			*abort_now = 1;
4369 			/* XXX */
4370 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4371 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4372 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4373 			return;
4374 		}
4375 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4376 		    (asoc->stream_queue_cnt == 0)) {
4377 			struct sctp_nets *netp;
4378 
4379 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4380 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4381 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4382 			}
4383 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4384 			sctp_stop_timers_for_shutdown(stcb);
4385 			if (asoc->alternate) {
4386 				netp = asoc->alternate;
4387 			} else {
4388 				netp = asoc->primary_destination;
4389 			}
4390 			sctp_send_shutdown(stcb, netp);
4391 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4392 			    stcb->sctp_ep, stcb, netp);
4393 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4394 			    stcb->sctp_ep, stcb, netp);
4395 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4396 		    (asoc->stream_queue_cnt == 0)) {
4397 			struct sctp_nets *netp;
4398 
4399 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4400 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4401 			sctp_stop_timers_for_shutdown(stcb);
4402 			if (asoc->alternate) {
4403 				netp = asoc->alternate;
4404 			} else {
4405 				netp = asoc->primary_destination;
4406 			}
4407 			sctp_send_shutdown_ack(stcb, netp);
4408 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4409 			    stcb->sctp_ep, stcb, netp);
4410 		}
4411 	}
4412 	/*********************************************/
4413 	/* Here we perform PR-SCTP procedures        */
4414 	/* (section 4.2)                             */
4415 	/*********************************************/
4416 	/* C1. update advancedPeerAckPoint */
4417 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4418 		asoc->advanced_peer_ack_point = cumack;
4419 	}
4420 	/* PR-Sctp issues need to be addressed too */
4421 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4422 		struct sctp_tmit_chunk *lchk;
4423 		uint32_t old_adv_peer_ack_point;
4424 
4425 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4426 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4427 		/* C3. See if we need to send a Fwd-TSN */
4428 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4429 			/*
4430 			 * ISSUE with ECN, see FWD-TSN processing.
4431 			 */
4432 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4433 				send_forward_tsn(stcb, asoc);
4434 			} else if (lchk) {
4435 				/* try to FR fwd-tsn's that get lost too */
4436 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4437 					send_forward_tsn(stcb, asoc);
4438 				}
4439 			}
4440 		}
4441 		if (lchk) {
4442 			/* Assure a timer is up */
4443 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4444 			    stcb->sctp_ep, stcb, lchk->whoTo);
4445 		}
4446 	}
4447 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4448 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4449 		    rwnd,
4450 		    stcb->asoc.peers_rwnd,
4451 		    stcb->asoc.total_flight,
4452 		    stcb->asoc.total_output_queue_size);
4453 	}
4454 }
4455 
4456 void
4457 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4458     struct sctp_tcb *stcb,
4459     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4460     int *abort_now, uint8_t flags,
4461     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4462 {
4463 	struct sctp_association *asoc;
4464 	struct sctp_tmit_chunk *tp1, *tp2;
4465 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4466 	uint16_t wake_him = 0;
4467 	uint32_t send_s = 0;
4468 	long j;
4469 	int accum_moved = 0;
4470 	int will_exit_fast_recovery = 0;
4471 	uint32_t a_rwnd, old_rwnd;
4472 	int win_probe_recovery = 0;
4473 	int win_probe_recovered = 0;
4474 	struct sctp_nets *net = NULL;
4475 	int done_once;
4476 	int rto_ok = 1;
4477 	uint8_t reneged_all = 0;
4478 	uint8_t cmt_dac_flag;
4479 
4480 	/*
4481 	 * we take any chance we can to service our queues since we cannot
4482 	 * get awoken when the socket is read from :<
4483 	 */
4484 	/*
4485 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4486 	 * old sack, if so discard. 2) If there is nothing left in the send
4487 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4488 	 * too, update any rwnd change and verify no timers are running.
4489 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4490 	 * moved process these first and note that it moved. 4) Process any
4491 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4492 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4493 	 * sync up flightsizes and things, stop all timers and also check
4494 	 * for shutdown_pending state. If so then go ahead and send off the
4495 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4496 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4497 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4498 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4499 	 * if in shutdown_recv state.
4500 	 */
4501 	SCTP_TCB_LOCK_ASSERT(stcb);
4502 	/* CMT DAC algo */
4503 	this_sack_lowest_newack = 0;
4504 	SCTP_STAT_INCR(sctps_slowpath_sack);
4505 	last_tsn = cum_ack;
4506 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4507 #ifdef SCTP_ASOCLOG_OF_TSNS
4508 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4509 	stcb->asoc.cumack_log_at++;
4510 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4511 		stcb->asoc.cumack_log_at = 0;
4512 	}
4513 #endif
4514 	a_rwnd = rwnd;
4515 
4516 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4517 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4518 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4519 	}
4520 
4521 	old_rwnd = stcb->asoc.peers_rwnd;
4522 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4523 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4524 		    stcb->asoc.overall_error_count,
4525 		    0,
4526 		    SCTP_FROM_SCTP_INDATA,
4527 		    __LINE__);
4528 	}
4529 	stcb->asoc.overall_error_count = 0;
4530 	asoc = &stcb->asoc;
4531 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4532 		sctp_log_sack(asoc->last_acked_seq,
4533 		    cum_ack,
4534 		    0,
4535 		    num_seg,
4536 		    num_dup,
4537 		    SCTP_LOG_NEW_SACK);
4538 	}
4539 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4540 		uint16_t i;
4541 		uint32_t *dupdata, dblock;
4542 
4543 		for (i = 0; i < num_dup; i++) {
4544 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4545 			    sizeof(uint32_t), (uint8_t *)&dblock);
4546 			if (dupdata == NULL) {
4547 				break;
4548 			}
4549 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4550 		}
4551 	}
4552 	/* reality check */
4553 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4554 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4555 		    sctpchunk_listhead);
4556 		send_s = tp1->rec.data.tsn + 1;
4557 	} else {
4558 		tp1 = NULL;
4559 		send_s = asoc->sending_seq;
4560 	}
4561 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4562 		struct mbuf *op_err;
4563 		char msg[SCTP_DIAG_INFO_LEN];
4564 
4565 		/*
4566 		 * no way, we have not even sent this TSN out yet. Peer is
4567 		 * hopelessly messed up with us.
4568 		 */
4569 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4570 		    cum_ack, send_s);
4571 		if (tp1) {
4572 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4573 			    tp1->rec.data.tsn, (void *)tp1);
4574 		}
4575 hopeless_peer:
4576 		*abort_now = 1;
4577 		/* XXX */
4578 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4579 		    cum_ack, send_s);
4580 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4581 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4582 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4583 		return;
4584 	}
4585 	/**********************/
4586 	/* 1) check the range */
4587 	/**********************/
4588 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4589 		/* acking something behind */
4590 		return;
4591 	}
4592 
4593 	/* update the Rwnd of the peer */
4594 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4595 	    TAILQ_EMPTY(&asoc->send_queue) &&
4596 	    (asoc->stream_queue_cnt == 0)) {
4597 		/* nothing left on send/sent and strmq */
4598 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4599 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4600 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4601 		}
4602 		asoc->peers_rwnd = a_rwnd;
4603 		if (asoc->sent_queue_retran_cnt) {
4604 			asoc->sent_queue_retran_cnt = 0;
4605 		}
4606 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4607 			/* SWS sender side engages */
4608 			asoc->peers_rwnd = 0;
4609 		}
4610 		/* stop any timers */
4611 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4613 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4614 			net->partial_bytes_acked = 0;
4615 			net->flight_size = 0;
4616 		}
4617 		asoc->total_flight = 0;
4618 		asoc->total_flight_count = 0;
4619 		return;
4620 	}
4621 	/*
4622 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4623 	 * things. The total byte count acked is tracked in netAckSz AND
4624 	 * netAck2 is used to track the total bytes acked that are un-
4625 	 * amibguious and were never retransmitted. We track these on a per
4626 	 * destination address basis.
4627 	 */
4628 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4629 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4630 			/* Drag along the window_tsn for cwr's */
4631 			net->cwr_window_tsn = cum_ack;
4632 		}
4633 		net->prev_cwnd = net->cwnd;
4634 		net->net_ack = 0;
4635 		net->net_ack2 = 0;
4636 
4637 		/*
4638 		 * CMT: Reset CUC and Fast recovery algo variables before
4639 		 * SACK processing
4640 		 */
4641 		net->new_pseudo_cumack = 0;
4642 		net->will_exit_fast_recovery = 0;
4643 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4644 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4645 		}
4646 
4647 		/*
4648 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4649 		 * to be greater than the cumack. Also reset saw_newack to 0
4650 		 * for all dests.
4651 		 */
4652 		net->saw_newack = 0;
4653 		net->this_sack_highest_newack = last_tsn;
4654 	}
4655 	/* process the new consecutive TSN first */
4656 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4657 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4658 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4659 				accum_moved = 1;
4660 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4661 					/*
4662 					 * If it is less than ACKED, it is
4663 					 * now no-longer in flight. Higher
4664 					 * values may occur during marking
4665 					 */
4666 					if ((tp1->whoTo->dest_state &
4667 					    SCTP_ADDR_UNCONFIRMED) &&
4668 					    (tp1->snd_count < 2)) {
4669 						/*
4670 						 * If there was no retran
4671 						 * and the address is
4672 						 * un-confirmed and we sent
4673 						 * there and are now
4674 						 * sacked.. its confirmed,
4675 						 * mark it so.
4676 						 */
4677 						tp1->whoTo->dest_state &=
4678 						    ~SCTP_ADDR_UNCONFIRMED;
4679 					}
4680 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4681 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4682 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4683 							    tp1->whoTo->flight_size,
4684 							    tp1->book_size,
4685 							    (uint32_t)(uintptr_t)tp1->whoTo,
4686 							    tp1->rec.data.tsn);
4687 						}
4688 						sctp_flight_size_decrease(tp1);
4689 						sctp_total_flight_decrease(stcb, tp1);
4690 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4691 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4692 							    tp1);
4693 						}
4694 					}
4695 					tp1->whoTo->net_ack += tp1->send_size;
4696 
4697 					/* CMT SFR and DAC algos */
4698 					this_sack_lowest_newack = tp1->rec.data.tsn;
4699 					tp1->whoTo->saw_newack = 1;
4700 
4701 					if (tp1->snd_count < 2) {
4702 						/*
4703 						 * True non-retransmitted
4704 						 * chunk
4705 						 */
4706 						tp1->whoTo->net_ack2 +=
4707 						    tp1->send_size;
4708 
4709 						/* update RTO too? */
4710 						if (tp1->do_rtt) {
4711 							if (rto_ok &&
4712 							    sctp_calculate_rto(stcb,
4713 							    &stcb->asoc,
4714 							    tp1->whoTo,
4715 							    &tp1->sent_rcv_time,
4716 							    SCTP_RTT_FROM_DATA)) {
4717 								rto_ok = 0;
4718 							}
4719 							if (tp1->whoTo->rto_needed == 0) {
4720 								tp1->whoTo->rto_needed = 1;
4721 							}
4722 							tp1->do_rtt = 0;
4723 						}
4724 					}
4725 					/*
4726 					 * CMT: CUCv2 algorithm. From the
4727 					 * cumack'd TSNs, for each TSN being
4728 					 * acked for the first time, set the
4729 					 * following variables for the
4730 					 * corresp destination.
4731 					 * new_pseudo_cumack will trigger a
4732 					 * cwnd update.
4733 					 * find_(rtx_)pseudo_cumack will
4734 					 * trigger search for the next
4735 					 * expected (rtx-)pseudo-cumack.
4736 					 */
4737 					tp1->whoTo->new_pseudo_cumack = 1;
4738 					tp1->whoTo->find_pseudo_cumack = 1;
4739 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4740 
4741 
4742 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4743 						sctp_log_sack(asoc->last_acked_seq,
4744 						    cum_ack,
4745 						    tp1->rec.data.tsn,
4746 						    0,
4747 						    0,
4748 						    SCTP_LOG_TSN_ACKED);
4749 					}
4750 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4751 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4752 					}
4753 				}
4754 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4755 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4756 #ifdef SCTP_AUDITING_ENABLED
4757 					sctp_audit_log(0xB3,
4758 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4759 #endif
4760 				}
4761 				if (tp1->rec.data.chunk_was_revoked) {
4762 					/* deflate the cwnd */
4763 					tp1->whoTo->cwnd -= tp1->book_size;
4764 					tp1->rec.data.chunk_was_revoked = 0;
4765 				}
4766 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4767 					tp1->sent = SCTP_DATAGRAM_ACKED;
4768 				}
4769 			}
4770 		} else {
4771 			break;
4772 		}
4773 	}
4774 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4775 	/* always set this up to cum-ack */
4776 	asoc->this_sack_highest_gap = last_tsn;
4777 
4778 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4779 
4780 		/*
4781 		 * thisSackHighestGap will increase while handling NEW
4782 		 * segments this_sack_highest_newack will increase while
4783 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4784 		 * used for CMT DAC algo. saw_newack will also change.
4785 		 */
4786 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4787 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4788 		    num_seg, num_nr_seg, &rto_ok)) {
4789 			wake_him++;
4790 		}
4791 		/*
4792 		 * validate the biggest_tsn_acked in the gap acks if strict
4793 		 * adherence is wanted.
4794 		 */
4795 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4796 			/*
4797 			 * peer is either confused or we are under attack.
4798 			 * We must abort.
4799 			 */
4800 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4801 			    biggest_tsn_acked, send_s);
4802 			goto hopeless_peer;
4803 		}
4804 	}
4805 	/*******************************************/
4806 	/* cancel ALL T3-send timer if accum moved */
4807 	/*******************************************/
4808 	if (asoc->sctp_cmt_on_off > 0) {
4809 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4810 			if (net->new_pseudo_cumack)
4811 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4812 				    stcb, net,
4813 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4814 
4815 		}
4816 	} else {
4817 		if (accum_moved) {
4818 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4819 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4820 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4821 			}
4822 		}
4823 	}
4824 	/********************************************/
4825 	/* drop the acked chunks from the sentqueue */
4826 	/********************************************/
4827 	asoc->last_acked_seq = cum_ack;
4828 
4829 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4830 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4831 			break;
4832 		}
4833 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4834 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4835 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4836 #ifdef INVARIANTS
4837 			} else {
4838 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4839 #endif
4840 			}
4841 		}
4842 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4843 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4844 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4845 			asoc->trigger_reset = 1;
4846 		}
4847 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4848 		if (PR_SCTP_ENABLED(tp1->flags)) {
4849 			if (asoc->pr_sctp_cnt != 0)
4850 				asoc->pr_sctp_cnt--;
4851 		}
4852 		asoc->sent_queue_cnt--;
4853 		if (tp1->data) {
4854 			/* sa_ignore NO_NULL_CHK */
4855 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4856 			sctp_m_freem(tp1->data);
4857 			tp1->data = NULL;
4858 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4859 				asoc->sent_queue_cnt_removeable--;
4860 			}
4861 		}
4862 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4863 			sctp_log_sack(asoc->last_acked_seq,
4864 			    cum_ack,
4865 			    tp1->rec.data.tsn,
4866 			    0,
4867 			    0,
4868 			    SCTP_LOG_FREE_SENT);
4869 		}
4870 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4871 		wake_him++;
4872 	}
4873 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4874 #ifdef INVARIANTS
4875 		panic("Warning flight size is positive and should be 0");
4876 #else
4877 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4878 		    asoc->total_flight);
4879 #endif
4880 		asoc->total_flight = 0;
4881 	}
4882 
4883 	/* sa_ignore NO_NULL_CHK */
4884 	if ((wake_him) && (stcb->sctp_socket)) {
4885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4886 		struct socket *so;
4887 
4888 #endif
4889 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4890 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4891 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4892 		}
4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4894 		so = SCTP_INP_SO(stcb->sctp_ep);
4895 		atomic_add_int(&stcb->asoc.refcnt, 1);
4896 		SCTP_TCB_UNLOCK(stcb);
4897 		SCTP_SOCKET_LOCK(so, 1);
4898 		SCTP_TCB_LOCK(stcb);
4899 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4900 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4901 			/* assoc was freed while we were unlocked */
4902 			SCTP_SOCKET_UNLOCK(so, 1);
4903 			return;
4904 		}
4905 #endif
4906 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4908 		SCTP_SOCKET_UNLOCK(so, 1);
4909 #endif
4910 	} else {
4911 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4912 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4913 		}
4914 	}
4915 
4916 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4917 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4918 			/* Setup so we will exit RFC2582 fast recovery */
4919 			will_exit_fast_recovery = 1;
4920 		}
4921 	}
4922 	/*
4923 	 * Check for revoked fragments:
4924 	 *
4925 	 * if Previous sack - Had no frags then we can't have any revoked if
4926 	 * Previous sack - Had frag's then - If we now have frags aka
4927 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4928 	 * some of them. else - The peer revoked all ACKED fragments, since
4929 	 * we had some before and now we have NONE.
4930 	 */
4931 
4932 	if (num_seg) {
4933 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4934 		asoc->saw_sack_with_frags = 1;
4935 	} else if (asoc->saw_sack_with_frags) {
4936 		int cnt_revoked = 0;
4937 
4938 		/* Peer revoked all dg's marked or acked */
4939 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4940 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4941 				tp1->sent = SCTP_DATAGRAM_SENT;
4942 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4943 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4944 					    tp1->whoTo->flight_size,
4945 					    tp1->book_size,
4946 					    (uint32_t)(uintptr_t)tp1->whoTo,
4947 					    tp1->rec.data.tsn);
4948 				}
4949 				sctp_flight_size_increase(tp1);
4950 				sctp_total_flight_increase(stcb, tp1);
4951 				tp1->rec.data.chunk_was_revoked = 1;
4952 				/*
4953 				 * To ensure that this increase in
4954 				 * flightsize, which is artificial, does not
4955 				 * throttle the sender, we also increase the
4956 				 * cwnd artificially.
4957 				 */
4958 				tp1->whoTo->cwnd += tp1->book_size;
4959 				cnt_revoked++;
4960 			}
4961 		}
4962 		if (cnt_revoked) {
4963 			reneged_all = 1;
4964 		}
4965 		asoc->saw_sack_with_frags = 0;
4966 	}
4967 	if (num_nr_seg > 0)
4968 		asoc->saw_sack_with_nr_frags = 1;
4969 	else
4970 		asoc->saw_sack_with_nr_frags = 0;
4971 
4972 	/* JRS - Use the congestion control given in the CC module */
4973 	if (ecne_seen == 0) {
4974 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4975 			if (net->net_ack2 > 0) {
4976 				/*
4977 				 * Karn's rule applies to clearing error
4978 				 * count, this is optional.
4979 				 */
4980 				net->error_count = 0;
4981 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4982 					/* addr came good */
4983 					net->dest_state |= SCTP_ADDR_REACHABLE;
4984 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4985 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4986 				}
4987 
4988 				if (net == stcb->asoc.primary_destination) {
4989 					if (stcb->asoc.alternate) {
4990 						/*
4991 						 * release the alternate,
4992 						 * primary is good
4993 						 */
4994 						sctp_free_remote_addr(stcb->asoc.alternate);
4995 						stcb->asoc.alternate = NULL;
4996 					}
4997 				}
4998 
4999 				if (net->dest_state & SCTP_ADDR_PF) {
5000 					net->dest_state &= ~SCTP_ADDR_PF;
5001 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5002 					    stcb->sctp_ep, stcb, net,
5003 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5004 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5005 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5006 					/* Done with this net */
5007 					net->net_ack = 0;
5008 				}
5009 				/* restore any doubled timers */
5010 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5011 				if (net->RTO < stcb->asoc.minrto) {
5012 					net->RTO = stcb->asoc.minrto;
5013 				}
5014 				if (net->RTO > stcb->asoc.maxrto) {
5015 					net->RTO = stcb->asoc.maxrto;
5016 				}
5017 			}
5018 		}
5019 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5020 	}
5021 
5022 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5023 		/* nothing left in-flight */
5024 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5025 			/* stop all timers */
5026 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5027 			    stcb, net,
5028 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5029 			net->flight_size = 0;
5030 			net->partial_bytes_acked = 0;
5031 		}
5032 		asoc->total_flight = 0;
5033 		asoc->total_flight_count = 0;
5034 	}
5035 
5036 	/**********************************/
5037 	/* Now what about shutdown issues */
5038 	/**********************************/
5039 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5040 		/* nothing left on sendqueue.. consider done */
5041 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5042 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5043 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5044 		}
5045 		asoc->peers_rwnd = a_rwnd;
5046 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5047 			/* SWS sender side engages */
5048 			asoc->peers_rwnd = 0;
5049 		}
5050 		/* clean up */
5051 		if ((asoc->stream_queue_cnt == 1) &&
5052 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5053 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5054 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5055 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5056 		}
5057 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5058 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5059 		    (asoc->stream_queue_cnt == 1) &&
5060 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5061 			struct mbuf *op_err;
5062 
5063 			*abort_now = 1;
5064 			/* XXX */
5065 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5066 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5067 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5068 			return;
5069 		}
5070 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5071 		    (asoc->stream_queue_cnt == 0)) {
5072 			struct sctp_nets *netp;
5073 
5074 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5075 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5076 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5077 			}
5078 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5079 			sctp_stop_timers_for_shutdown(stcb);
5080 			if (asoc->alternate) {
5081 				netp = asoc->alternate;
5082 			} else {
5083 				netp = asoc->primary_destination;
5084 			}
5085 			sctp_send_shutdown(stcb, netp);
5086 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5087 			    stcb->sctp_ep, stcb, netp);
5088 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5089 			    stcb->sctp_ep, stcb, netp);
5090 			return;
5091 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5092 		    (asoc->stream_queue_cnt == 0)) {
5093 			struct sctp_nets *netp;
5094 
5095 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5096 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5097 			sctp_stop_timers_for_shutdown(stcb);
5098 			if (asoc->alternate) {
5099 				netp = asoc->alternate;
5100 			} else {
5101 				netp = asoc->primary_destination;
5102 			}
5103 			sctp_send_shutdown_ack(stcb, netp);
5104 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5105 			    stcb->sctp_ep, stcb, netp);
5106 			return;
5107 		}
5108 	}
5109 	/*
5110 	 * Now here we are going to recycle net_ack for a different use...
5111 	 * HEADS UP.
5112 	 */
5113 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5114 		net->net_ack = 0;
5115 	}
5116 
5117 	/*
5118 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5119 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5120 	 * automatically ensure that.
5121 	 */
5122 	if ((asoc->sctp_cmt_on_off > 0) &&
5123 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5124 	    (cmt_dac_flag == 0)) {
5125 		this_sack_lowest_newack = cum_ack;
5126 	}
5127 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5128 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5129 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5130 	}
5131 	/* JRS - Use the congestion control given in the CC module */
5132 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5133 
5134 	/* Now are we exiting loss recovery ? */
5135 	if (will_exit_fast_recovery) {
5136 		/* Ok, we must exit fast recovery */
5137 		asoc->fast_retran_loss_recovery = 0;
5138 	}
5139 	if ((asoc->sat_t3_loss_recovery) &&
5140 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5141 		/* end satellite t3 loss recovery */
5142 		asoc->sat_t3_loss_recovery = 0;
5143 	}
5144 	/*
5145 	 * CMT Fast recovery
5146 	 */
5147 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5148 		if (net->will_exit_fast_recovery) {
5149 			/* Ok, we must exit fast recovery */
5150 			net->fast_retran_loss_recovery = 0;
5151 		}
5152 	}
5153 
5154 	/* Adjust and set the new rwnd value */
5155 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5156 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5157 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5158 	}
5159 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5160 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5161 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5162 		/* SWS sender side engages */
5163 		asoc->peers_rwnd = 0;
5164 	}
5165 	if (asoc->peers_rwnd > old_rwnd) {
5166 		win_probe_recovery = 1;
5167 	}
5168 
5169 	/*
5170 	 * Now we must setup so we have a timer up for anyone with
5171 	 * outstanding data.
5172 	 */
5173 	done_once = 0;
5174 again:
5175 	j = 0;
5176 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5177 		if (win_probe_recovery && (net->window_probe)) {
5178 			win_probe_recovered = 1;
5179 			/*-
5180 			 * Find first chunk that was used with
5181 			 * window probe and clear the event. Put
5182 			 * it back into the send queue as if has
5183 			 * not been sent.
5184 			 */
5185 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5186 				if (tp1->window_probe) {
5187 					sctp_window_probe_recovery(stcb, asoc, tp1);
5188 					break;
5189 				}
5190 			}
5191 		}
5192 		if (net->flight_size) {
5193 			j++;
5194 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5195 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5196 				    stcb->sctp_ep, stcb, net);
5197 			}
5198 			if (net->window_probe) {
5199 				net->window_probe = 0;
5200 			}
5201 		} else {
5202 			if (net->window_probe) {
5203 				/*
5204 				 * In window probes we must assure a timer
5205 				 * is still running there
5206 				 */
5207 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5208 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5209 					    stcb->sctp_ep, stcb, net);
5210 
5211 				}
5212 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5213 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5214 				    stcb, net,
5215 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5216 			}
5217 		}
5218 	}
5219 	if ((j == 0) &&
5220 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5221 	    (asoc->sent_queue_retran_cnt == 0) &&
5222 	    (win_probe_recovered == 0) &&
5223 	    (done_once == 0)) {
5224 		/*
5225 		 * huh, this should not happen unless all packets are
5226 		 * PR-SCTP and marked to skip of course.
5227 		 */
5228 		if (sctp_fs_audit(asoc)) {
5229 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5230 				net->flight_size = 0;
5231 			}
5232 			asoc->total_flight = 0;
5233 			asoc->total_flight_count = 0;
5234 			asoc->sent_queue_retran_cnt = 0;
5235 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5236 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5237 					sctp_flight_size_increase(tp1);
5238 					sctp_total_flight_increase(stcb, tp1);
5239 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5240 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5241 				}
5242 			}
5243 		}
5244 		done_once = 1;
5245 		goto again;
5246 	}
5247 	/*********************************************/
5248 	/* Here we perform PR-SCTP procedures        */
5249 	/* (section 4.2)                             */
5250 	/*********************************************/
5251 	/* C1. update advancedPeerAckPoint */
5252 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5253 		asoc->advanced_peer_ack_point = cum_ack;
5254 	}
5255 	/* C2. try to further move advancedPeerAckPoint ahead */
5256 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5257 		struct sctp_tmit_chunk *lchk;
5258 		uint32_t old_adv_peer_ack_point;
5259 
5260 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5261 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5262 		/* C3. See if we need to send a Fwd-TSN */
5263 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5264 			/*
5265 			 * ISSUE with ECN, see FWD-TSN processing.
5266 			 */
5267 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5268 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5269 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5270 				    old_adv_peer_ack_point);
5271 			}
5272 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5273 				send_forward_tsn(stcb, asoc);
5274 			} else if (lchk) {
5275 				/* try to FR fwd-tsn's that get lost too */
5276 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5277 					send_forward_tsn(stcb, asoc);
5278 				}
5279 			}
5280 		}
5281 		if (lchk) {
5282 			/* Assure a timer is up */
5283 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5284 			    stcb->sctp_ep, stcb, lchk->whoTo);
5285 		}
5286 	}
5287 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5288 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5289 		    a_rwnd,
5290 		    stcb->asoc.peers_rwnd,
5291 		    stcb->asoc.total_flight,
5292 		    stcb->asoc.total_output_queue_size);
5293 	}
5294 }
5295 
5296 void
5297 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5298 {
5299 	/* Copy cum-ack */
5300 	uint32_t cum_ack, a_rwnd;
5301 
5302 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5303 	/* Arrange so a_rwnd does NOT change */
5304 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5305 
5306 	/* Now call the express sack handling */
5307 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5308 }
5309 
5310 static void
5311 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5312     struct sctp_stream_in *strmin)
5313 {
5314 	struct sctp_queued_to_read *control, *ncontrol;
5315 	struct sctp_association *asoc;
5316 	uint32_t mid;
5317 	int need_reasm_check = 0;
5318 
5319 	asoc = &stcb->asoc;
5320 	mid = strmin->last_mid_delivered;
5321 	/*
5322 	 * First deliver anything prior to and including the stream no that
5323 	 * came in.
5324 	 */
5325 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5326 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5327 			/* this is deliverable now */
5328 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5329 				if (control->on_strm_q) {
5330 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5331 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5332 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5333 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5334 #ifdef INVARIANTS
5335 					} else {
5336 						panic("strmin: %p ctl: %p unknown %d",
5337 						    strmin, control, control->on_strm_q);
5338 #endif
5339 					}
5340 					control->on_strm_q = 0;
5341 				}
5342 				/* subtract pending on streams */
5343 				if (asoc->size_on_all_streams >= control->length) {
5344 					asoc->size_on_all_streams -= control->length;
5345 				} else {
5346 #ifdef INVARIANTS
5347 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5348 #else
5349 					asoc->size_on_all_streams = 0;
5350 #endif
5351 				}
5352 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5353 				/* deliver it to at least the delivery-q */
5354 				if (stcb->sctp_socket) {
5355 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5356 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5357 					    control,
5358 					    &stcb->sctp_socket->so_rcv,
5359 					    1, SCTP_READ_LOCK_HELD,
5360 					    SCTP_SO_NOT_LOCKED);
5361 				}
5362 			} else {
5363 				/* Its a fragmented message */
5364 				if (control->first_frag_seen) {
5365 					/*
5366 					 * Make it so this is next to
5367 					 * deliver, we restore later
5368 					 */
5369 					strmin->last_mid_delivered = control->mid - 1;
5370 					need_reasm_check = 1;
5371 					break;
5372 				}
5373 			}
5374 		} else {
5375 			/* no more delivery now. */
5376 			break;
5377 		}
5378 	}
5379 	if (need_reasm_check) {
5380 		int ret;
5381 
5382 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5383 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5384 			/* Restore the next to deliver unless we are ahead */
5385 			strmin->last_mid_delivered = mid;
5386 		}
5387 		if (ret == 0) {
5388 			/* Left the front Partial one on */
5389 			return;
5390 		}
5391 		need_reasm_check = 0;
5392 	}
5393 	/*
5394 	 * now we must deliver things in queue the normal way  if any are
5395 	 * now ready.
5396 	 */
5397 	mid = strmin->last_mid_delivered + 1;
5398 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5399 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5400 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5401 				/* this is deliverable now */
5402 				if (control->on_strm_q) {
5403 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5404 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5405 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5406 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5407 #ifdef INVARIANTS
5408 					} else {
5409 						panic("strmin: %p ctl: %p unknown %d",
5410 						    strmin, control, control->on_strm_q);
5411 #endif
5412 					}
5413 					control->on_strm_q = 0;
5414 				}
5415 				/* subtract pending on streams */
5416 				if (asoc->size_on_all_streams >= control->length) {
5417 					asoc->size_on_all_streams -= control->length;
5418 				} else {
5419 #ifdef INVARIANTS
5420 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5421 #else
5422 					asoc->size_on_all_streams = 0;
5423 #endif
5424 				}
5425 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5426 				/* deliver it to at least the delivery-q */
5427 				strmin->last_mid_delivered = control->mid;
5428 				if (stcb->sctp_socket) {
5429 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5430 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5431 					    control,
5432 					    &stcb->sctp_socket->so_rcv, 1,
5433 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5434 
5435 				}
5436 				mid = strmin->last_mid_delivered + 1;
5437 			} else {
5438 				/* Its a fragmented message */
5439 				if (control->first_frag_seen) {
5440 					/*
5441 					 * Make it so this is next to
5442 					 * deliver
5443 					 */
5444 					strmin->last_mid_delivered = control->mid - 1;
5445 					need_reasm_check = 1;
5446 					break;
5447 				}
5448 			}
5449 		} else {
5450 			break;
5451 		}
5452 	}
5453 	if (need_reasm_check) {
5454 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5455 	}
5456 }
5457 
5458 
5459 
5460 static void
5461 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5462     struct sctp_association *asoc,
5463     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5464 {
5465 	struct sctp_queued_to_read *control;
5466 	struct sctp_stream_in *strm;
5467 	struct sctp_tmit_chunk *chk, *nchk;
5468 	int cnt_removed = 0;
5469 
5470 	/*
5471 	 * For now large messages held on the stream reasm that are complete
5472 	 * will be tossed too. We could in theory do more work to spin
5473 	 * through and stop after dumping one msg aka seeing the start of a
5474 	 * new msg at the head, and call the delivery function... to see if
5475 	 * it can be delivered... But for now we just dump everything on the
5476 	 * queue.
5477 	 */
5478 	strm = &asoc->strmin[stream];
5479 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5480 	if (control == NULL) {
5481 		/* Not found */
5482 		return;
5483 	}
5484 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5485 		return;
5486 	}
5487 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5488 		/* Purge hanging chunks */
5489 		if (!asoc->idata_supported && (ordered == 0)) {
5490 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5491 				break;
5492 			}
5493 		}
5494 		cnt_removed++;
5495 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5496 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5497 			asoc->size_on_reasm_queue -= chk->send_size;
5498 		} else {
5499 #ifdef INVARIANTS
5500 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5501 #else
5502 			asoc->size_on_reasm_queue = 0;
5503 #endif
5504 		}
5505 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5506 		if (chk->data) {
5507 			sctp_m_freem(chk->data);
5508 			chk->data = NULL;
5509 		}
5510 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5511 	}
5512 	if (!TAILQ_EMPTY(&control->reasm)) {
5513 		/* This has to be old data, unordered */
5514 		if (control->data) {
5515 			sctp_m_freem(control->data);
5516 			control->data = NULL;
5517 		}
5518 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5519 		chk = TAILQ_FIRST(&control->reasm);
5520 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5521 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5522 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5523 			    chk, SCTP_READ_LOCK_HELD);
5524 		}
5525 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5526 		return;
5527 	}
5528 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5529 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5530 		if (asoc->size_on_all_streams >= control->length) {
5531 			asoc->size_on_all_streams -= control->length;
5532 		} else {
5533 #ifdef INVARIANTS
5534 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5535 #else
5536 			asoc->size_on_all_streams = 0;
5537 #endif
5538 		}
5539 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5540 		control->on_strm_q = 0;
5541 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5542 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5543 		control->on_strm_q = 0;
5544 #ifdef INVARIANTS
5545 	} else if (control->on_strm_q) {
5546 		panic("strm: %p ctl: %p unknown %d",
5547 		    strm, control, control->on_strm_q);
5548 #endif
5549 	}
5550 	control->on_strm_q = 0;
5551 	if (control->on_read_q == 0) {
5552 		sctp_free_remote_addr(control->whoFrom);
5553 		if (control->data) {
5554 			sctp_m_freem(control->data);
5555 			control->data = NULL;
5556 		}
5557 		sctp_free_a_readq(stcb, control);
5558 	}
5559 }
5560 
5561 void
5562 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5563     struct sctp_forward_tsn_chunk *fwd,
5564     int *abort_flag, struct mbuf *m, int offset)
5565 {
5566 	/* The pr-sctp fwd tsn */
5567 	/*
5568 	 * here we will perform all the data receiver side steps for
5569 	 * processing FwdTSN, as required in by pr-sctp draft:
5570 	 *
5571 	 * Assume we get FwdTSN(x):
5572 	 *
5573 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5574 	 * + others we have 3) examine and update re-ordering queue on
5575 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5576 	 * report where we are.
5577 	 */
5578 	struct sctp_association *asoc;
5579 	uint32_t new_cum_tsn, gap;
5580 	unsigned int i, fwd_sz, m_size;
5581 	uint32_t str_seq;
5582 	struct sctp_stream_in *strm;
5583 	struct sctp_queued_to_read *control, *sv;
5584 
5585 	asoc = &stcb->asoc;
5586 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5587 		SCTPDBG(SCTP_DEBUG_INDATA1,
5588 		    "Bad size too small/big fwd-tsn\n");
5589 		return;
5590 	}
5591 	m_size = (stcb->asoc.mapping_array_size << 3);
5592 	/*************************************************************/
5593 	/* 1. Here we update local cumTSN and shift the bitmap array */
5594 	/*************************************************************/
5595 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5596 
5597 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5598 		/* Already got there ... */
5599 		return;
5600 	}
5601 	/*
5602 	 * now we know the new TSN is more advanced, let's find the actual
5603 	 * gap
5604 	 */
5605 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5606 	asoc->cumulative_tsn = new_cum_tsn;
5607 	if (gap >= m_size) {
5608 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5609 			struct mbuf *op_err;
5610 			char msg[SCTP_DIAG_INFO_LEN];
5611 
5612 			/*
5613 			 * out of range (of single byte chunks in the rwnd I
5614 			 * give out). This must be an attacker.
5615 			 */
5616 			*abort_flag = 1;
5617 			snprintf(msg, sizeof(msg),
5618 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5619 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5620 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5621 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5622 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5623 			return;
5624 		}
5625 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5626 
5627 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5628 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5629 		asoc->highest_tsn_inside_map = new_cum_tsn;
5630 
5631 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5632 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5633 
5634 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5635 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5636 		}
5637 	} else {
5638 		SCTP_TCB_LOCK_ASSERT(stcb);
5639 		for (i = 0; i <= gap; i++) {
5640 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5641 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5642 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5643 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5644 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5645 				}
5646 			}
5647 		}
5648 	}
5649 	/*************************************************************/
5650 	/* 2. Clear up re-assembly queue                             */
5651 	/*************************************************************/
5652 
5653 	/* This is now done as part of clearing up the stream/seq */
5654 	if (asoc->idata_supported == 0) {
5655 		uint16_t sid;
5656 
5657 		/* Flush all the un-ordered data based on cum-tsn */
5658 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5659 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5660 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5661 		}
5662 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5663 	}
5664 	/*******************************************************/
5665 	/* 3. Update the PR-stream re-ordering queues and fix  */
5666 	/* delivery issues as needed.                       */
5667 	/*******************************************************/
5668 	fwd_sz -= sizeof(*fwd);
5669 	if (m && fwd_sz) {
5670 		/* New method. */
5671 		unsigned int num_str;
5672 		uint32_t mid, cur_mid;
5673 		uint16_t sid;
5674 		uint16_t ordered, flags;
5675 		struct sctp_strseq *stseq, strseqbuf;
5676 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5677 
5678 		offset += sizeof(*fwd);
5679 
5680 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5681 		if (asoc->idata_supported) {
5682 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5683 		} else {
5684 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5685 		}
5686 		for (i = 0; i < num_str; i++) {
5687 			if (asoc->idata_supported) {
5688 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5689 				    sizeof(struct sctp_strseq_mid),
5690 				    (uint8_t *)&strseqbuf_m);
5691 				offset += sizeof(struct sctp_strseq_mid);
5692 				if (stseq_m == NULL) {
5693 					break;
5694 				}
5695 				sid = ntohs(stseq_m->sid);
5696 				mid = ntohl(stseq_m->mid);
5697 				flags = ntohs(stseq_m->flags);
5698 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5699 					ordered = 0;
5700 				} else {
5701 					ordered = 1;
5702 				}
5703 			} else {
5704 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5705 				    sizeof(struct sctp_strseq),
5706 				    (uint8_t *)&strseqbuf);
5707 				offset += sizeof(struct sctp_strseq);
5708 				if (stseq == NULL) {
5709 					break;
5710 				}
5711 				sid = ntohs(stseq->sid);
5712 				mid = (uint32_t)ntohs(stseq->ssn);
5713 				ordered = 1;
5714 			}
5715 			/* Convert */
5716 
5717 			/* now process */
5718 
5719 			/*
5720 			 * Ok we now look for the stream/seq on the read
5721 			 * queue where its not all delivered. If we find it
5722 			 * we transmute the read entry into a PDI_ABORTED.
5723 			 */
5724 			if (sid >= asoc->streamincnt) {
5725 				/* screwed up streams, stop!  */
5726 				break;
5727 			}
5728 			if ((asoc->str_of_pdapi == sid) &&
5729 			    (asoc->ssn_of_pdapi == mid)) {
5730 				/*
5731 				 * If this is the one we were partially
5732 				 * delivering now then we no longer are.
5733 				 * Note this will change with the reassembly
5734 				 * re-write.
5735 				 */
5736 				asoc->fragmented_delivery_inprogress = 0;
5737 			}
5738 			strm = &asoc->strmin[sid];
5739 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5740 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5741 			}
5742 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5743 				if ((control->sinfo_stream == sid) &&
5744 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5745 					str_seq = (sid << 16) | (0x0000ffff & mid);
5746 					control->pdapi_aborted = 1;
5747 					sv = stcb->asoc.control_pdapi;
5748 					control->end_added = 1;
5749 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5750 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5751 						if (asoc->size_on_all_streams >= control->length) {
5752 							asoc->size_on_all_streams -= control->length;
5753 						} else {
5754 #ifdef INVARIANTS
5755 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5756 #else
5757 							asoc->size_on_all_streams = 0;
5758 #endif
5759 						}
5760 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5761 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5762 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5763 #ifdef INVARIANTS
5764 					} else if (control->on_strm_q) {
5765 						panic("strm: %p ctl: %p unknown %d",
5766 						    strm, control, control->on_strm_q);
5767 #endif
5768 					}
5769 					control->on_strm_q = 0;
5770 					stcb->asoc.control_pdapi = control;
5771 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5772 					    stcb,
5773 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5774 					    (void *)&str_seq,
5775 					    SCTP_SO_NOT_LOCKED);
5776 					stcb->asoc.control_pdapi = sv;
5777 					break;
5778 				} else if ((control->sinfo_stream == sid) &&
5779 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5780 					/* We are past our victim SSN */
5781 					break;
5782 				}
5783 			}
5784 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5785 				/* Update the sequence number */
5786 				strm->last_mid_delivered = mid;
5787 			}
5788 			/* now kick the stream the new way */
5789 			/* sa_ignore NO_NULL_CHK */
5790 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5791 		}
5792 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5793 	}
5794 	/*
5795 	 * Now slide thing forward.
5796 	 */
5797 	sctp_slide_mapping_arrays(stcb);
5798 }
5799