xref: /freebsd/sys/netinet/sctp_indata.c (revision 1165fc9a526630487a1feb63daef65c5aee1a583)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 void
72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 {
74 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
75 }
76 
77 /* Calculate what the rwnd would be */
78 uint32_t
79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 {
81 	uint32_t calc = 0;
82 
83 	/*
84 	 * This is really set wrong with respect to a 1-2-m socket. Since
85 	 * the sb_cc is the count that everyone as put up. When we re-write
86 	 * sctp_soreceive then we will fix this so that ONLY this
87 	 * associations data is taken into account.
88 	 */
89 	if (stcb->sctp_socket == NULL) {
90 		return (calc);
91 	}
92 
93 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 	if (stcb->asoc.sb_cc == 0 &&
98 	    asoc->cnt_on_reasm_queue == 0 &&
99 	    asoc->cnt_on_all_streams == 0) {
100 		/* Full rwnd granted */
101 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
102 		return (calc);
103 	}
104 	/* get actual space */
105 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 	/*
107 	 * take out what has NOT been put on socket queue and we yet hold
108 	 * for putting up.
109 	 */
110 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 	    asoc->cnt_on_reasm_queue * MSIZE));
112 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 	    asoc->cnt_on_all_streams * MSIZE));
114 	if (calc == 0) {
115 		/* out of space */
116 		return (calc);
117 	}
118 
119 	/* what is the overhead of all these rwnd's */
120 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 	/*
122 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 	 * even it is 0. SWS engaged
124 	 */
125 	if (calc < stcb->asoc.my_rwnd_control_len) {
126 		calc = 1;
127 	}
128 	return (calc);
129 }
130 
131 /*
132  * Build out our readq entry based on the incoming packet.
133  */
134 struct sctp_queued_to_read *
135 sctp_build_readq_entry(struct sctp_tcb *stcb,
136     struct sctp_nets *net,
137     uint32_t tsn, uint32_t ppid,
138     uint32_t context, uint16_t sid,
139     uint32_t mid, uint8_t flags,
140     struct mbuf *dm)
141 {
142 	struct sctp_queued_to_read *read_queue_e = NULL;
143 
144 	sctp_alloc_a_readq(stcb, read_queue_e);
145 	if (read_queue_e == NULL) {
146 		goto failed_build;
147 	}
148 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 	read_queue_e->sinfo_stream = sid;
150 	read_queue_e->sinfo_flags = (flags << 8);
151 	read_queue_e->sinfo_ppid = ppid;
152 	read_queue_e->sinfo_context = context;
153 	read_queue_e->sinfo_tsn = tsn;
154 	read_queue_e->sinfo_cumtsn = tsn;
155 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 	read_queue_e->mid = mid;
157 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 	TAILQ_INIT(&read_queue_e->reasm);
159 	read_queue_e->whoFrom = net;
160 	atomic_add_int(&net->ref_count, 1);
161 	read_queue_e->data = dm;
162 	read_queue_e->stcb = stcb;
163 	read_queue_e->port_from = stcb->rport;
164 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
165 		read_queue_e->do_not_ref_stcb = 1;
166 	}
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 static void
286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
287 {
288 	uint32_t gap, i;
289 	int in_r, in_nr;
290 
291 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
292 		return;
293 	}
294 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
295 		/*
296 		 * This tsn is behind the cum ack and thus we don't need to
297 		 * worry about it being moved from one to the other.
298 		 */
299 		return;
300 	}
301 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 	KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
305 	if (!in_nr) {
306 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
308 			asoc->highest_tsn_inside_nr_map = tsn;
309 		}
310 	}
311 	if (in_r) {
312 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
313 		if (tsn == asoc->highest_tsn_inside_map) {
314 			/* We must back down to see what the new highest is. */
315 			for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 				SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 				if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 					asoc->highest_tsn_inside_map = i;
319 					break;
320 				}
321 			}
322 			if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
323 				asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
324 			}
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t flags, unordered;
337 
338 	flags = (control->sinfo_flags >> 8);
339 	unordered = flags & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = 1;
359 		control->first_frag_seen = 1;
360 		control->last_frag_seen = 1;
361 	}
362 	if (TAILQ_EMPTY(q)) {
363 		/* Empty queue */
364 		TAILQ_INSERT_HEAD(q, control, next_instrm);
365 		if (unordered) {
366 			control->on_strm_q = SCTP_ON_UNORDERED;
367 		} else {
368 			control->on_strm_q = SCTP_ON_ORDERED;
369 		}
370 		return (0);
371 	} else {
372 		TAILQ_FOREACH(at, q, next_instrm) {
373 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
374 				/*
375 				 * one in queue is bigger than the new one,
376 				 * insert before this one
377 				 */
378 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
379 				if (unordered) {
380 					control->on_strm_q = SCTP_ON_UNORDERED;
381 				} else {
382 					control->on_strm_q = SCTP_ON_ORDERED;
383 				}
384 				break;
385 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * Gak, He sent me a duplicate msg id
388 				 * number?? return -1 to abort.
389 				 */
390 				return (-1);
391 			} else {
392 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
393 					/*
394 					 * We are at the end, insert it
395 					 * after this one
396 					 */
397 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
398 						sctp_log_strm_del(control, at,
399 						    SCTP_STR_LOG_FROM_INSERT_TL);
400 					}
401 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
402 					if (unordered) {
403 						control->on_strm_q = SCTP_ON_UNORDERED;
404 					} else {
405 						control->on_strm_q = SCTP_ON_ORDERED;
406 					}
407 					break;
408 				}
409 			}
410 		}
411 	}
412 	return (0);
413 }
414 
415 static void
416 sctp_abort_in_reasm(struct sctp_tcb *stcb,
417     struct sctp_queued_to_read *control,
418     struct sctp_tmit_chunk *chk,
419     int *abort_flag, int opspot)
420 {
421 	char msg[SCTP_DIAG_INFO_LEN];
422 	struct mbuf *oper;
423 
424 	if (stcb->asoc.idata_supported) {
425 		SCTP_SNPRINTF(msg, sizeof(msg),
426 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 		    opspot,
428 		    control->fsn_included,
429 		    chk->rec.data.tsn,
430 		    chk->rec.data.sid,
431 		    chk->rec.data.fsn, chk->rec.data.mid);
432 	} else {
433 		SCTP_SNPRINTF(msg, sizeof(msg),
434 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 		    opspot,
436 		    control->fsn_included,
437 		    chk->rec.data.tsn,
438 		    chk->rec.data.sid,
439 		    chk->rec.data.fsn,
440 		    (uint16_t)chk->rec.data.mid);
441 	}
442 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 	sctp_m_freem(chk->data);
444 	chk->data = NULL;
445 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
448 	*abort_flag = 1;
449 }
450 
451 static void
452 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
453 {
454 	/*
455 	 * The control could not be placed and must be cleaned.
456 	 */
457 	struct sctp_tmit_chunk *chk, *nchk;
458 
459 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 		if (chk->data)
462 			sctp_m_freem(chk->data);
463 		chk->data = NULL;
464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 	}
466 	sctp_free_remote_addr(control->whoFrom);
467 	if (control->data) {
468 		sctp_m_freem(control->data);
469 		control->data = NULL;
470 	}
471 	sctp_free_a_readq(stcb, control);
472 }
473 
474 /*
475  * Queue the chunk either right into the socket buffer if it is the next one
476  * to go OR put it in the correct place in the delivery queue.  If we do
477  * append to the so_buf, keep doing so until we are out of order as
478  * long as the control's entered are non-fragmented.
479  */
480 static void
481 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
482     struct sctp_association *asoc,
483     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
484 {
485 	/*
486 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
487 	 * all the data in one stream this could happen quite rapidly. One
488 	 * could use the TSN to keep track of things, but this scheme breaks
489 	 * down in the other type of stream usage that could occur. Send a
490 	 * single msg to stream 0, send 4Billion messages to stream 1, now
491 	 * send a message to stream 0. You have a situation where the TSN
492 	 * has wrapped but not in the stream. Is this worth worrying about
493 	 * or should we just change our queue sort at the bottom to be by
494 	 * TSN.
495 	 *
496 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
497 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
498 	 * assignment this could happen... and I don't see how this would be
499 	 * a violation. So for now I am undecided an will leave the sort by
500 	 * SSN alone. Maybe a hybrid approach is the answer
501 	 *
502 	 */
503 	struct sctp_queued_to_read *at;
504 	int queue_needed;
505 	uint32_t nxt_todel;
506 	struct mbuf *op_err;
507 	struct sctp_stream_in *strm;
508 	char msg[SCTP_DIAG_INFO_LEN];
509 
510 	strm = &asoc->strmin[control->sinfo_stream];
511 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
512 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
513 	}
514 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
515 		/* The incoming sseq is behind where we last delivered? */
516 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
517 		    strm->last_mid_delivered, control->mid);
518 		/*
519 		 * throw it in the stream so it gets cleaned up in
520 		 * association destruction
521 		 */
522 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
523 		if (asoc->idata_supported) {
524 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
525 			    strm->last_mid_delivered, control->sinfo_tsn,
526 			    control->sinfo_stream, control->mid);
527 		} else {
528 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
529 			    (uint16_t)strm->last_mid_delivered,
530 			    control->sinfo_tsn,
531 			    control->sinfo_stream,
532 			    (uint16_t)control->mid);
533 		}
534 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
535 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
536 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
537 		*abort_flag = 1;
538 		return;
539 	}
540 	queue_needed = 1;
541 	asoc->size_on_all_streams += control->length;
542 	sctp_ucount_incr(asoc->cnt_on_all_streams);
543 	nxt_todel = strm->last_mid_delivered + 1;
544 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
545 		/* can be delivered right away? */
546 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
547 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
548 		}
549 		/* EY it wont be queued if it could be delivered directly */
550 		queue_needed = 0;
551 		if (asoc->size_on_all_streams >= control->length) {
552 			asoc->size_on_all_streams -= control->length;
553 		} else {
554 #ifdef INVARIANTS
555 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
556 #else
557 			asoc->size_on_all_streams = 0;
558 #endif
559 		}
560 		sctp_ucount_decr(asoc->cnt_on_all_streams);
561 		strm->last_mid_delivered++;
562 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
563 		sctp_add_to_readq(stcb->sctp_ep, stcb,
564 		    control,
565 		    &stcb->sctp_socket->so_rcv, 1,
566 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
567 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
568 			/* all delivered */
569 			nxt_todel = strm->last_mid_delivered + 1;
570 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
571 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
572 				if (control->on_strm_q == SCTP_ON_ORDERED) {
573 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 					if (asoc->size_on_all_streams >= control->length) {
575 						asoc->size_on_all_streams -= control->length;
576 					} else {
577 #ifdef INVARIANTS
578 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
579 #else
580 						asoc->size_on_all_streams = 0;
581 #endif
582 					}
583 					sctp_ucount_decr(asoc->cnt_on_all_streams);
584 #ifdef INVARIANTS
585 				} else {
586 					panic("Huh control: %p is on_strm_q: %d",
587 					    control, control->on_strm_q);
588 #endif
589 				}
590 				control->on_strm_q = 0;
591 				strm->last_mid_delivered++;
592 				/*
593 				 * We ignore the return of deliver_data here
594 				 * since we always can hold the chunk on the
595 				 * d-queue. And we have a finite number that
596 				 * can be delivered from the strq.
597 				 */
598 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
599 					sctp_log_strm_del(control, NULL,
600 					    SCTP_STR_LOG_FROM_IMMED_DEL);
601 				}
602 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
603 				sctp_add_to_readq(stcb->sctp_ep, stcb,
604 				    control,
605 				    &stcb->sctp_socket->so_rcv, 1,
606 				    SCTP_READ_LOCK_NOT_HELD,
607 				    SCTP_SO_LOCKED);
608 				continue;
609 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
610 				*need_reasm = 1;
611 			}
612 			break;
613 		}
614 	}
615 	if (queue_needed) {
616 		/*
617 		 * Ok, we did not deliver this guy, find the correct place
618 		 * to put it on the queue.
619 		 */
620 		if (sctp_place_control_in_stream(strm, asoc, control)) {
621 			SCTP_SNPRINTF(msg, sizeof(msg),
622 			    "Queue to str MID: %u duplicate", control->mid);
623 			sctp_clean_up_control(stcb, control);
624 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
625 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
626 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
627 			*abort_flag = 1;
628 		}
629 	}
630 }
631 
632 static void
633 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
634 {
635 	struct mbuf *m, *prev = NULL;
636 	struct sctp_tcb *stcb;
637 
638 	stcb = control->stcb;
639 	control->held_length = 0;
640 	control->length = 0;
641 	m = control->data;
642 	while (m) {
643 		if (SCTP_BUF_LEN(m) == 0) {
644 			/* Skip mbufs with NO length */
645 			if (prev == NULL) {
646 				/* First one */
647 				control->data = sctp_m_free(m);
648 				m = control->data;
649 			} else {
650 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
651 				m = SCTP_BUF_NEXT(prev);
652 			}
653 			if (m == NULL) {
654 				control->tail_mbuf = prev;
655 			}
656 			continue;
657 		}
658 		prev = m;
659 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
660 		if (control->on_read_q) {
661 			/*
662 			 * On read queue so we must increment the SB stuff,
663 			 * we assume caller has done any locks of SB.
664 			 */
665 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
666 		}
667 		m = SCTP_BUF_NEXT(m);
668 	}
669 	if (prev) {
670 		control->tail_mbuf = prev;
671 	}
672 }
673 
674 static void
675 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
676 {
677 	struct mbuf *prev = NULL;
678 	struct sctp_tcb *stcb;
679 
680 	stcb = control->stcb;
681 	if (stcb == NULL) {
682 #ifdef INVARIANTS
683 		panic("Control broken");
684 #else
685 		return;
686 #endif
687 	}
688 	if (control->tail_mbuf == NULL) {
689 		/* TSNH */
690 		sctp_m_freem(control->data);
691 		control->data = m;
692 		sctp_setup_tail_pointer(control);
693 		return;
694 	}
695 	control->tail_mbuf->m_next = m;
696 	while (m) {
697 		if (SCTP_BUF_LEN(m) == 0) {
698 			/* Skip mbufs with NO length */
699 			if (prev == NULL) {
700 				/* First one */
701 				control->tail_mbuf->m_next = sctp_m_free(m);
702 				m = control->tail_mbuf->m_next;
703 			} else {
704 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
705 				m = SCTP_BUF_NEXT(prev);
706 			}
707 			if (m == NULL) {
708 				control->tail_mbuf = prev;
709 			}
710 			continue;
711 		}
712 		prev = m;
713 		if (control->on_read_q) {
714 			/*
715 			 * On read queue so we must increment the SB stuff,
716 			 * we assume caller has done any locks of SB.
717 			 */
718 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
719 		}
720 		*added += SCTP_BUF_LEN(m);
721 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
722 		m = SCTP_BUF_NEXT(m);
723 	}
724 	if (prev) {
725 		control->tail_mbuf = prev;
726 	}
727 }
728 
729 static void
730 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
731 {
732 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
733 	nc->sinfo_stream = control->sinfo_stream;
734 	nc->mid = control->mid;
735 	TAILQ_INIT(&nc->reasm);
736 	nc->top_fsn = control->top_fsn;
737 	nc->mid = control->mid;
738 	nc->sinfo_flags = control->sinfo_flags;
739 	nc->sinfo_ppid = control->sinfo_ppid;
740 	nc->sinfo_context = control->sinfo_context;
741 	nc->fsn_included = 0xffffffff;
742 	nc->sinfo_tsn = control->sinfo_tsn;
743 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
744 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
745 	nc->whoFrom = control->whoFrom;
746 	atomic_add_int(&nc->whoFrom->ref_count, 1);
747 	nc->stcb = control->stcb;
748 	nc->port_from = control->port_from;
749 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
750 }
751 
752 static void
753 sctp_reset_a_control(struct sctp_queued_to_read *control,
754     struct sctp_inpcb *inp, uint32_t tsn)
755 {
756 	control->fsn_included = tsn;
757 	if (control->on_read_q) {
758 		/*
759 		 * We have to purge it from there, hopefully this will work
760 		 * :-)
761 		 */
762 		TAILQ_REMOVE(&inp->read_queue, control, next);
763 		control->on_read_q = 0;
764 	}
765 }
766 
767 static int
768 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
769     struct sctp_association *asoc,
770     struct sctp_stream_in *strm,
771     struct sctp_queued_to_read *control,
772     uint32_t pd_point,
773     int inp_read_lock_held)
774 {
775 	/*
776 	 * Special handling for the old un-ordered data chunk. All the
777 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
778 	 * to see if we have it all. If you return one, no other control
779 	 * entries on the un-ordered queue will be looked at. In theory
780 	 * there should be no others entries in reality, unless the guy is
781 	 * sending both unordered NDATA and unordered DATA...
782 	 */
783 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
784 	uint32_t fsn;
785 	struct sctp_queued_to_read *nc;
786 	int cnt_added;
787 
788 	if (control->first_frag_seen == 0) {
789 		/* Nothing we can do, we have not seen the first piece yet */
790 		return (1);
791 	}
792 	/* Collapse any we can */
793 	cnt_added = 0;
794 restart:
795 	fsn = control->fsn_included + 1;
796 	/* Now what can we add? */
797 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
798 		if (chk->rec.data.fsn == fsn) {
799 			/* Ok lets add it */
800 			sctp_alloc_a_readq(stcb, nc);
801 			if (nc == NULL) {
802 				break;
803 			}
804 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
805 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
806 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
807 			fsn++;
808 			cnt_added++;
809 			chk = NULL;
810 			if (control->end_added) {
811 				/* We are done */
812 				if (!TAILQ_EMPTY(&control->reasm)) {
813 					/*
814 					 * Ok we have to move anything left
815 					 * on the control queue to a new
816 					 * control.
817 					 */
818 					sctp_build_readq_entry_from_ctl(nc, control);
819 					tchk = TAILQ_FIRST(&control->reasm);
820 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
821 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
822 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
823 							asoc->size_on_reasm_queue -= tchk->send_size;
824 						} else {
825 #ifdef INVARIANTS
826 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
827 #else
828 							asoc->size_on_reasm_queue = 0;
829 #endif
830 						}
831 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
832 						nc->first_frag_seen = 1;
833 						nc->fsn_included = tchk->rec.data.fsn;
834 						nc->data = tchk->data;
835 						nc->sinfo_ppid = tchk->rec.data.ppid;
836 						nc->sinfo_tsn = tchk->rec.data.tsn;
837 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
838 						tchk->data = NULL;
839 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
840 						sctp_setup_tail_pointer(nc);
841 						tchk = TAILQ_FIRST(&control->reasm);
842 					}
843 					/* Spin the rest onto the queue */
844 					while (tchk) {
845 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
846 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
847 						tchk = TAILQ_FIRST(&control->reasm);
848 					}
849 					/*
850 					 * Now lets add it to the queue
851 					 * after removing control
852 					 */
853 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
854 					nc->on_strm_q = SCTP_ON_UNORDERED;
855 					if (control->on_strm_q) {
856 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
857 						control->on_strm_q = 0;
858 					}
859 				}
860 				if (control->pdapi_started) {
861 					strm->pd_api_started = 0;
862 					control->pdapi_started = 0;
863 				}
864 				if (control->on_strm_q) {
865 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
866 					control->on_strm_q = 0;
867 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
868 				}
869 				if (control->on_read_q == 0) {
870 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
871 					    &stcb->sctp_socket->so_rcv, control->end_added,
872 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
873 				}
874 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
875 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
876 					/*
877 					 * Switch to the new guy and
878 					 * continue
879 					 */
880 					control = nc;
881 					goto restart;
882 				} else {
883 					if (nc->on_strm_q == 0) {
884 						sctp_free_a_readq(stcb, nc);
885 					}
886 				}
887 				return (1);
888 			} else {
889 				sctp_free_a_readq(stcb, nc);
890 			}
891 		} else {
892 			/* Can't add more */
893 			break;
894 		}
895 	}
896 	if (cnt_added && strm->pd_api_started) {
897 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
898 	}
899 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
900 		strm->pd_api_started = 1;
901 		control->pdapi_started = 1;
902 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
903 		    &stcb->sctp_socket->so_rcv, control->end_added,
904 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
905 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
906 		return (0);
907 	} else {
908 		return (1);
909 	}
910 }
911 
912 static void
913 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
914     struct sctp_association *asoc,
915     struct sctp_queued_to_read *control,
916     struct sctp_tmit_chunk *chk,
917     int *abort_flag)
918 {
919 	struct sctp_tmit_chunk *at;
920 	int inserted;
921 
922 	/*
923 	 * Here we need to place the chunk into the control structure sorted
924 	 * in the correct order.
925 	 */
926 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
927 		/* Its the very first one. */
928 		SCTPDBG(SCTP_DEBUG_XXX,
929 		    "chunk is a first fsn: %u becomes fsn_included\n",
930 		    chk->rec.data.fsn);
931 		at = TAILQ_FIRST(&control->reasm);
932 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
933 			/*
934 			 * The first chunk in the reassembly is a smaller
935 			 * TSN than this one, even though this has a first,
936 			 * it must be from a subsequent msg.
937 			 */
938 			goto place_chunk;
939 		}
940 		if (control->first_frag_seen) {
941 			/*
942 			 * In old un-ordered we can reassembly on one
943 			 * control multiple messages. As long as the next
944 			 * FIRST is greater then the old first (TSN i.e. FSN
945 			 * wise)
946 			 */
947 			struct mbuf *tdata;
948 			uint32_t tmp;
949 
950 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
951 				/*
952 				 * Easy way the start of a new guy beyond
953 				 * the lowest
954 				 */
955 				goto place_chunk;
956 			}
957 			if ((chk->rec.data.fsn == control->fsn_included) ||
958 			    (control->pdapi_started)) {
959 				/*
960 				 * Ok this should not happen, if it does we
961 				 * started the pd-api on the higher TSN
962 				 * (since the equals part is a TSN failure
963 				 * it must be that).
964 				 *
965 				 * We are completely hosed in that case
966 				 * since I have no way to recover. This
967 				 * really will only happen if we can get
968 				 * more TSN's higher before the
969 				 * pd-api-point.
970 				 */
971 				sctp_abort_in_reasm(stcb, control, chk,
972 				    abort_flag,
973 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
974 
975 				return;
976 			}
977 			/*
978 			 * Ok we have two firsts and the one we just got is
979 			 * smaller than the one we previously placed.. yuck!
980 			 * We must swap them out.
981 			 */
982 			/* swap the mbufs */
983 			tdata = control->data;
984 			control->data = chk->data;
985 			chk->data = tdata;
986 			/* Save the lengths */
987 			chk->send_size = control->length;
988 			/* Recompute length of control and tail pointer */
989 			sctp_setup_tail_pointer(control);
990 			/* Fix the FSN included */
991 			tmp = control->fsn_included;
992 			control->fsn_included = chk->rec.data.fsn;
993 			chk->rec.data.fsn = tmp;
994 			/* Fix the TSN included */
995 			tmp = control->sinfo_tsn;
996 			control->sinfo_tsn = chk->rec.data.tsn;
997 			chk->rec.data.tsn = tmp;
998 			/* Fix the PPID included */
999 			tmp = control->sinfo_ppid;
1000 			control->sinfo_ppid = chk->rec.data.ppid;
1001 			chk->rec.data.ppid = tmp;
1002 			/* Fix tail pointer */
1003 			goto place_chunk;
1004 		}
1005 		control->first_frag_seen = 1;
1006 		control->fsn_included = chk->rec.data.fsn;
1007 		control->top_fsn = chk->rec.data.fsn;
1008 		control->sinfo_tsn = chk->rec.data.tsn;
1009 		control->sinfo_ppid = chk->rec.data.ppid;
1010 		control->data = chk->data;
1011 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1012 		chk->data = NULL;
1013 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1014 		sctp_setup_tail_pointer(control);
1015 		return;
1016 	}
1017 place_chunk:
1018 	inserted = 0;
1019 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1020 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1021 			/*
1022 			 * This one in queue is bigger than the new one,
1023 			 * insert the new one before at.
1024 			 */
1025 			asoc->size_on_reasm_queue += chk->send_size;
1026 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1027 			inserted = 1;
1028 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1029 			break;
1030 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1031 			/*
1032 			 * They sent a duplicate fsn number. This really
1033 			 * should not happen since the FSN is a TSN and it
1034 			 * should have been dropped earlier.
1035 			 */
1036 			sctp_abort_in_reasm(stcb, control, chk,
1037 			    abort_flag,
1038 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1039 			return;
1040 		}
1041 	}
1042 	if (inserted == 0) {
1043 		/* Its at the end */
1044 		asoc->size_on_reasm_queue += chk->send_size;
1045 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1046 		control->top_fsn = chk->rec.data.fsn;
1047 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1048 	}
1049 }
1050 
1051 static int
1052 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1053     struct sctp_stream_in *strm, int inp_read_lock_held)
1054 {
1055 	/*
1056 	 * Given a stream, strm, see if any of the SSN's on it that are
1057 	 * fragmented are ready to deliver. If so go ahead and place them on
1058 	 * the read queue. In so placing if we have hit the end, then we
1059 	 * need to remove them from the stream's queue.
1060 	 */
1061 	struct sctp_queued_to_read *control, *nctl = NULL;
1062 	uint32_t next_to_del;
1063 	uint32_t pd_point;
1064 	int ret = 0;
1065 
1066 	if (stcb->sctp_socket) {
1067 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1068 		    stcb->sctp_ep->partial_delivery_point);
1069 	} else {
1070 		pd_point = stcb->sctp_ep->partial_delivery_point;
1071 	}
1072 	control = TAILQ_FIRST(&strm->uno_inqueue);
1073 
1074 	if ((control != NULL) &&
1075 	    (asoc->idata_supported == 0)) {
1076 		/* Special handling needed for "old" data format */
1077 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1078 			goto done_un;
1079 		}
1080 	}
1081 	if (strm->pd_api_started) {
1082 		/* Can't add more */
1083 		return (0);
1084 	}
1085 	while (control) {
1086 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1087 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1088 		nctl = TAILQ_NEXT(control, next_instrm);
1089 		if (control->end_added) {
1090 			/* We just put the last bit on */
1091 			if (control->on_strm_q) {
1092 #ifdef INVARIANTS
1093 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1094 					panic("Huh control: %p on_q: %d -- not unordered?",
1095 					    control, control->on_strm_q);
1096 				}
1097 #endif
1098 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1099 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1100 				if (asoc->size_on_all_streams >= control->length) {
1101 					asoc->size_on_all_streams -= control->length;
1102 				} else {
1103 #ifdef INVARIANTS
1104 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1105 #else
1106 					asoc->size_on_all_streams = 0;
1107 #endif
1108 				}
1109 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1110 				control->on_strm_q = 0;
1111 			}
1112 			if (control->on_read_q == 0) {
1113 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1114 				    control,
1115 				    &stcb->sctp_socket->so_rcv, control->end_added,
1116 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1117 			}
1118 		} else {
1119 			/* Can we do a PD-API for this un-ordered guy? */
1120 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1121 				strm->pd_api_started = 1;
1122 				control->pdapi_started = 1;
1123 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1124 				    control,
1125 				    &stcb->sctp_socket->so_rcv, control->end_added,
1126 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1127 
1128 				break;
1129 			}
1130 		}
1131 		control = nctl;
1132 	}
1133 done_un:
1134 	control = TAILQ_FIRST(&strm->inqueue);
1135 	if (strm->pd_api_started) {
1136 		/* Can't add more */
1137 		return (0);
1138 	}
1139 	if (control == NULL) {
1140 		return (ret);
1141 	}
1142 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1143 		/*
1144 		 * Ok the guy at the top was being partially delivered
1145 		 * completed, so we remove it. Note the pd_api flag was
1146 		 * taken off when the chunk was merged on in
1147 		 * sctp_queue_data_for_reasm below.
1148 		 */
1149 		nctl = TAILQ_NEXT(control, next_instrm);
1150 		SCTPDBG(SCTP_DEBUG_XXX,
1151 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1152 		    control, control->end_added, control->mid,
1153 		    control->top_fsn, control->fsn_included,
1154 		    strm->last_mid_delivered);
1155 		if (control->end_added) {
1156 			if (control->on_strm_q) {
1157 #ifdef INVARIANTS
1158 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1159 					panic("Huh control: %p on_q: %d -- not ordered?",
1160 					    control, control->on_strm_q);
1161 				}
1162 #endif
1163 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1164 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1165 				if (asoc->size_on_all_streams >= control->length) {
1166 					asoc->size_on_all_streams -= control->length;
1167 				} else {
1168 #ifdef INVARIANTS
1169 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1170 #else
1171 					asoc->size_on_all_streams = 0;
1172 #endif
1173 				}
1174 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1175 				control->on_strm_q = 0;
1176 			}
1177 			if (strm->pd_api_started && control->pdapi_started) {
1178 				control->pdapi_started = 0;
1179 				strm->pd_api_started = 0;
1180 			}
1181 			if (control->on_read_q == 0) {
1182 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1183 				    control,
1184 				    &stcb->sctp_socket->so_rcv, control->end_added,
1185 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1186 			}
1187 			control = nctl;
1188 		}
1189 	}
1190 	if (strm->pd_api_started) {
1191 		/*
1192 		 * Can't add more must have gotten an un-ordered above being
1193 		 * partially delivered.
1194 		 */
1195 		return (0);
1196 	}
1197 deliver_more:
1198 	next_to_del = strm->last_mid_delivered + 1;
1199 	if (control) {
1200 		SCTPDBG(SCTP_DEBUG_XXX,
1201 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1202 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1203 		    next_to_del);
1204 		nctl = TAILQ_NEXT(control, next_instrm);
1205 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1206 		    (control->first_frag_seen)) {
1207 			int done;
1208 
1209 			/* Ok we can deliver it onto the stream. */
1210 			if (control->end_added) {
1211 				/* We are done with it afterwards */
1212 				if (control->on_strm_q) {
1213 #ifdef INVARIANTS
1214 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1215 						panic("Huh control: %p on_q: %d -- not ordered?",
1216 						    control, control->on_strm_q);
1217 					}
1218 #endif
1219 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1220 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 					if (asoc->size_on_all_streams >= control->length) {
1222 						asoc->size_on_all_streams -= control->length;
1223 					} else {
1224 #ifdef INVARIANTS
1225 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1226 #else
1227 						asoc->size_on_all_streams = 0;
1228 #endif
1229 					}
1230 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1231 					control->on_strm_q = 0;
1232 				}
1233 				ret++;
1234 			}
1235 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1236 				/*
1237 				 * A singleton now slipping through - mark
1238 				 * it non-revokable too
1239 				 */
1240 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1241 			} else if (control->end_added == 0) {
1242 				/*
1243 				 * Check if we can defer adding until its
1244 				 * all there
1245 				 */
1246 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1247 					/*
1248 					 * Don't need it or cannot add more
1249 					 * (one being delivered that way)
1250 					 */
1251 					goto out;
1252 				}
1253 			}
1254 			done = (control->end_added) && (control->last_frag_seen);
1255 			if (control->on_read_q == 0) {
1256 				if (!done) {
1257 					if (asoc->size_on_all_streams >= control->length) {
1258 						asoc->size_on_all_streams -= control->length;
1259 					} else {
1260 #ifdef INVARIANTS
1261 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1262 #else
1263 						asoc->size_on_all_streams = 0;
1264 #endif
1265 					}
1266 					strm->pd_api_started = 1;
1267 					control->pdapi_started = 1;
1268 				}
1269 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1270 				    control,
1271 				    &stcb->sctp_socket->so_rcv, control->end_added,
1272 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1273 			}
1274 			strm->last_mid_delivered = next_to_del;
1275 			if (done) {
1276 				control = nctl;
1277 				goto deliver_more;
1278 			}
1279 		}
1280 	}
1281 out:
1282 	return (ret);
1283 }
1284 
1285 uint32_t
1286 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1287     struct sctp_stream_in *strm,
1288     struct sctp_tcb *stcb, struct sctp_association *asoc,
1289     struct sctp_tmit_chunk *chk, int hold_rlock)
1290 {
1291 	/*
1292 	 * Given a control and a chunk, merge the data from the chk onto the
1293 	 * control and free up the chunk resources.
1294 	 */
1295 	uint32_t added = 0;
1296 	int i_locked = 0;
1297 
1298 	if (control->on_read_q && (hold_rlock == 0)) {
1299 		/*
1300 		 * Its being pd-api'd so we must do some locks.
1301 		 */
1302 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1303 		i_locked = 1;
1304 	}
1305 	if (control->data == NULL) {
1306 		control->data = chk->data;
1307 		sctp_setup_tail_pointer(control);
1308 	} else {
1309 		sctp_add_to_tail_pointer(control, chk->data, &added);
1310 	}
1311 	control->fsn_included = chk->rec.data.fsn;
1312 	asoc->size_on_reasm_queue -= chk->send_size;
1313 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1314 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1315 	chk->data = NULL;
1316 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1317 		control->first_frag_seen = 1;
1318 		control->sinfo_tsn = chk->rec.data.tsn;
1319 		control->sinfo_ppid = chk->rec.data.ppid;
1320 	}
1321 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1322 		/* Its complete */
1323 		if ((control->on_strm_q) && (control->on_read_q)) {
1324 			if (control->pdapi_started) {
1325 				control->pdapi_started = 0;
1326 				strm->pd_api_started = 0;
1327 			}
1328 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1329 				/* Unordered */
1330 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1331 				control->on_strm_q = 0;
1332 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1333 				/* Ordered */
1334 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1335 				/*
1336 				 * Don't need to decrement
1337 				 * size_on_all_streams, since control is on
1338 				 * the read queue.
1339 				 */
1340 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1341 				control->on_strm_q = 0;
1342 #ifdef INVARIANTS
1343 			} else if (control->on_strm_q) {
1344 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1345 				    control->on_strm_q);
1346 #endif
1347 			}
1348 		}
1349 		control->end_added = 1;
1350 		control->last_frag_seen = 1;
1351 	}
1352 	if (i_locked) {
1353 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1354 	}
1355 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1356 	return (added);
1357 }
1358 
1359 /*
1360  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1361  * queue, see if anthing can be delivered. If so pull it off (or as much as
1362  * we can. If we run out of space then we must dump what we can and set the
1363  * appropriate flag to say we queued what we could.
1364  */
1365 static void
1366 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1367     struct sctp_queued_to_read *control,
1368     struct sctp_tmit_chunk *chk,
1369     int created_control,
1370     int *abort_flag, uint32_t tsn)
1371 {
1372 	uint32_t next_fsn;
1373 	struct sctp_tmit_chunk *at, *nat;
1374 	struct sctp_stream_in *strm;
1375 	int do_wakeup, unordered;
1376 	uint32_t lenadded;
1377 
1378 	strm = &asoc->strmin[control->sinfo_stream];
1379 	/*
1380 	 * For old un-ordered data chunks.
1381 	 */
1382 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1383 		unordered = 1;
1384 	} else {
1385 		unordered = 0;
1386 	}
1387 	/* Must be added to the stream-in queue */
1388 	if (created_control) {
1389 		if ((unordered == 0) || (asoc->idata_supported)) {
1390 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1391 		}
1392 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1393 			/* Duplicate SSN? */
1394 			sctp_abort_in_reasm(stcb, control, chk,
1395 			    abort_flag,
1396 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1397 			sctp_clean_up_control(stcb, control);
1398 			return;
1399 		}
1400 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1401 			/*
1402 			 * Ok we created this control and now lets validate
1403 			 * that its legal i.e. there is a B bit set, if not
1404 			 * and we have up to the cum-ack then its invalid.
1405 			 */
1406 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1407 				sctp_abort_in_reasm(stcb, control, chk,
1408 				    abort_flag,
1409 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1410 				return;
1411 			}
1412 		}
1413 	}
1414 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1415 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1416 		return;
1417 	}
1418 	/*
1419 	 * Ok we must queue the chunk into the reasembly portion: o if its
1420 	 * the first it goes to the control mbuf. o if its not first but the
1421 	 * next in sequence it goes to the control, and each succeeding one
1422 	 * in order also goes. o if its not in order we place it on the list
1423 	 * in its place.
1424 	 */
1425 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1426 		/* Its the very first one. */
1427 		SCTPDBG(SCTP_DEBUG_XXX,
1428 		    "chunk is a first fsn: %u becomes fsn_included\n",
1429 		    chk->rec.data.fsn);
1430 		if (control->first_frag_seen) {
1431 			/*
1432 			 * Error on senders part, they either sent us two
1433 			 * data chunks with FIRST, or they sent two
1434 			 * un-ordered chunks that were fragmented at the
1435 			 * same time in the same stream.
1436 			 */
1437 			sctp_abort_in_reasm(stcb, control, chk,
1438 			    abort_flag,
1439 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1440 			return;
1441 		}
1442 		control->first_frag_seen = 1;
1443 		control->sinfo_ppid = chk->rec.data.ppid;
1444 		control->sinfo_tsn = chk->rec.data.tsn;
1445 		control->fsn_included = chk->rec.data.fsn;
1446 		control->data = chk->data;
1447 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1448 		chk->data = NULL;
1449 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1450 		sctp_setup_tail_pointer(control);
1451 		asoc->size_on_all_streams += control->length;
1452 	} else {
1453 		/* Place the chunk in our list */
1454 		int inserted = 0;
1455 
1456 		if (control->last_frag_seen == 0) {
1457 			/* Still willing to raise highest FSN seen */
1458 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1459 				SCTPDBG(SCTP_DEBUG_XXX,
1460 				    "We have a new top_fsn: %u\n",
1461 				    chk->rec.data.fsn);
1462 				control->top_fsn = chk->rec.data.fsn;
1463 			}
1464 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1465 				SCTPDBG(SCTP_DEBUG_XXX,
1466 				    "The last fsn is now in place fsn: %u\n",
1467 				    chk->rec.data.fsn);
1468 				control->last_frag_seen = 1;
1469 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1470 					SCTPDBG(SCTP_DEBUG_XXX,
1471 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1472 					    chk->rec.data.fsn,
1473 					    control->top_fsn);
1474 					sctp_abort_in_reasm(stcb, control, chk,
1475 					    abort_flag,
1476 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1477 					return;
1478 				}
1479 			}
1480 			if (asoc->idata_supported || control->first_frag_seen) {
1481 				/*
1482 				 * For IDATA we always check since we know
1483 				 * that the first fragment is 0. For old
1484 				 * DATA we have to receive the first before
1485 				 * we know the first FSN (which is the TSN).
1486 				 */
1487 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1488 					/*
1489 					 * We have already delivered up to
1490 					 * this so its a dup
1491 					 */
1492 					sctp_abort_in_reasm(stcb, control, chk,
1493 					    abort_flag,
1494 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1495 					return;
1496 				}
1497 			}
1498 		} else {
1499 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1500 				/* Second last? huh? */
1501 				SCTPDBG(SCTP_DEBUG_XXX,
1502 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1503 				    chk->rec.data.fsn, control->top_fsn);
1504 				sctp_abort_in_reasm(stcb, control,
1505 				    chk, abort_flag,
1506 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1507 				return;
1508 			}
1509 			if (asoc->idata_supported || control->first_frag_seen) {
1510 				/*
1511 				 * For IDATA we always check since we know
1512 				 * that the first fragment is 0. For old
1513 				 * DATA we have to receive the first before
1514 				 * we know the first FSN (which is the TSN).
1515 				 */
1516 
1517 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1518 					/*
1519 					 * We have already delivered up to
1520 					 * this so its a dup
1521 					 */
1522 					SCTPDBG(SCTP_DEBUG_XXX,
1523 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1524 					    chk->rec.data.fsn, control->fsn_included);
1525 					sctp_abort_in_reasm(stcb, control, chk,
1526 					    abort_flag,
1527 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1528 					return;
1529 				}
1530 			}
1531 			/*
1532 			 * validate not beyond top FSN if we have seen last
1533 			 * one
1534 			 */
1535 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1536 				SCTPDBG(SCTP_DEBUG_XXX,
1537 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1538 				    chk->rec.data.fsn,
1539 				    control->top_fsn);
1540 				sctp_abort_in_reasm(stcb, control, chk,
1541 				    abort_flag,
1542 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1543 				return;
1544 			}
1545 		}
1546 		/*
1547 		 * If we reach here, we need to place the new chunk in the
1548 		 * reassembly for this control.
1549 		 */
1550 		SCTPDBG(SCTP_DEBUG_XXX,
1551 		    "chunk is a not first fsn: %u needs to be inserted\n",
1552 		    chk->rec.data.fsn);
1553 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1554 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1555 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1556 					/* Last not at the end? huh? */
1557 					SCTPDBG(SCTP_DEBUG_XXX,
1558 					    "Last fragment not last in list: -- abort\n");
1559 					sctp_abort_in_reasm(stcb, control,
1560 					    chk, abort_flag,
1561 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1562 					return;
1563 				}
1564 				/*
1565 				 * This one in queue is bigger than the new
1566 				 * one, insert the new one before at.
1567 				 */
1568 				SCTPDBG(SCTP_DEBUG_XXX,
1569 				    "Insert it before fsn: %u\n",
1570 				    at->rec.data.fsn);
1571 				asoc->size_on_reasm_queue += chk->send_size;
1572 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1574 				inserted = 1;
1575 				break;
1576 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1577 				/*
1578 				 * Gak, He sent me a duplicate str seq
1579 				 * number
1580 				 */
1581 				/*
1582 				 * foo bar, I guess I will just free this
1583 				 * new guy, should we abort too? FIX ME
1584 				 * MAYBE? Or it COULD be that the SSN's have
1585 				 * wrapped. Maybe I should compare to TSN
1586 				 * somehow... sigh for now just blow away
1587 				 * the chunk!
1588 				 */
1589 				SCTPDBG(SCTP_DEBUG_XXX,
1590 				    "Duplicate to fsn: %u -- abort\n",
1591 				    at->rec.data.fsn);
1592 				sctp_abort_in_reasm(stcb, control,
1593 				    chk, abort_flag,
1594 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1595 				return;
1596 			}
1597 		}
1598 		if (inserted == 0) {
1599 			/* Goes on the end */
1600 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1601 			    chk->rec.data.fsn);
1602 			asoc->size_on_reasm_queue += chk->send_size;
1603 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1604 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1605 		}
1606 	}
1607 	/*
1608 	 * Ok lets see if we can suck any up into the control structure that
1609 	 * are in seq if it makes sense.
1610 	 */
1611 	do_wakeup = 0;
1612 	/*
1613 	 * If the first fragment has not been seen there is no sense in
1614 	 * looking.
1615 	 */
1616 	if (control->first_frag_seen) {
1617 		next_fsn = control->fsn_included + 1;
1618 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1619 			if (at->rec.data.fsn == next_fsn) {
1620 				/* We can add this one now to the control */
1621 				SCTPDBG(SCTP_DEBUG_XXX,
1622 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1623 				    control, at,
1624 				    at->rec.data.fsn,
1625 				    next_fsn, control->fsn_included);
1626 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1627 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1628 				if (control->on_read_q) {
1629 					do_wakeup = 1;
1630 				} else {
1631 					/*
1632 					 * We only add to the
1633 					 * size-on-all-streams if its not on
1634 					 * the read q. The read q flag will
1635 					 * cause a sballoc so its accounted
1636 					 * for there.
1637 					 */
1638 					asoc->size_on_all_streams += lenadded;
1639 				}
1640 				next_fsn++;
1641 				if (control->end_added && control->pdapi_started) {
1642 					if (strm->pd_api_started) {
1643 						strm->pd_api_started = 0;
1644 						control->pdapi_started = 0;
1645 					}
1646 					if (control->on_read_q == 0) {
1647 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1648 						    control,
1649 						    &stcb->sctp_socket->so_rcv, control->end_added,
1650 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1651 					}
1652 					break;
1653 				}
1654 			} else {
1655 				break;
1656 			}
1657 		}
1658 	}
1659 	if (do_wakeup) {
1660 		/* Need to wakeup the reader */
1661 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1662 	}
1663 }
1664 
1665 static struct sctp_queued_to_read *
1666 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1667 {
1668 	struct sctp_queued_to_read *control;
1669 
1670 	if (ordered) {
1671 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1672 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1673 				break;
1674 			}
1675 		}
1676 	} else {
1677 		if (idata_supported) {
1678 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1679 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1680 					break;
1681 				}
1682 			}
1683 		} else {
1684 			control = TAILQ_FIRST(&strm->uno_inqueue);
1685 		}
1686 	}
1687 	return (control);
1688 }
1689 
1690 static int
1691 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1692     struct mbuf **m, int offset, int chk_length,
1693     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1694     int *break_flag, int last_chunk, uint8_t chk_type)
1695 {
1696 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1697 	struct sctp_stream_in *strm;
1698 	uint32_t tsn, fsn, gap, mid;
1699 	struct mbuf *dmbuf;
1700 	int the_len;
1701 	int need_reasm_check = 0;
1702 	uint16_t sid;
1703 	struct mbuf *op_err;
1704 	char msg[SCTP_DIAG_INFO_LEN];
1705 	struct sctp_queued_to_read *control, *ncontrol;
1706 	uint32_t ppid;
1707 	uint8_t chk_flags;
1708 	struct sctp_stream_reset_list *liste;
1709 	int ordered;
1710 	size_t clen;
1711 	int created_control = 0;
1712 
1713 	if (chk_type == SCTP_IDATA) {
1714 		struct sctp_idata_chunk *chunk, chunk_buf;
1715 
1716 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1717 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1718 		chk_flags = chunk->ch.chunk_flags;
1719 		clen = sizeof(struct sctp_idata_chunk);
1720 		tsn = ntohl(chunk->dp.tsn);
1721 		sid = ntohs(chunk->dp.sid);
1722 		mid = ntohl(chunk->dp.mid);
1723 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1724 			fsn = 0;
1725 			ppid = chunk->dp.ppid_fsn.ppid;
1726 		} else {
1727 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1728 			ppid = 0xffffffff;	/* Use as an invalid value. */
1729 		}
1730 	} else {
1731 		struct sctp_data_chunk *chunk, chunk_buf;
1732 
1733 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1734 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1735 		chk_flags = chunk->ch.chunk_flags;
1736 		clen = sizeof(struct sctp_data_chunk);
1737 		tsn = ntohl(chunk->dp.tsn);
1738 		sid = ntohs(chunk->dp.sid);
1739 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1740 		fsn = tsn;
1741 		ppid = chunk->dp.ppid;
1742 	}
1743 	if ((size_t)chk_length == clen) {
1744 		/*
1745 		 * Need to send an abort since we had a empty data chunk.
1746 		 */
1747 		op_err = sctp_generate_no_user_data_cause(tsn);
1748 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1749 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1750 		*abort_flag = 1;
1751 		return (0);
1752 	}
1753 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1754 		asoc->send_sack = 1;
1755 	}
1756 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1757 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1759 	}
1760 	if (stcb == NULL) {
1761 		return (0);
1762 	}
1763 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1764 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1765 		/* It is a duplicate */
1766 		SCTP_STAT_INCR(sctps_recvdupdata);
1767 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1768 			/* Record a dup for the next outbound sack */
1769 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1770 			asoc->numduptsns++;
1771 		}
1772 		asoc->send_sack = 1;
1773 		return (0);
1774 	}
1775 	/* Calculate the number of TSN's between the base and this TSN */
1776 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1777 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1778 		/* Can't hold the bit in the mapping at max array, toss it */
1779 		return (0);
1780 	}
1781 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1782 		SCTP_TCB_LOCK_ASSERT(stcb);
1783 		if (sctp_expand_mapping_array(asoc, gap)) {
1784 			/* Can't expand, drop it */
1785 			return (0);
1786 		}
1787 	}
1788 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1789 		*high_tsn = tsn;
1790 	}
1791 	/* See if we have received this one already */
1792 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1793 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1794 		SCTP_STAT_INCR(sctps_recvdupdata);
1795 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1796 			/* Record a dup for the next outbound sack */
1797 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1798 			asoc->numduptsns++;
1799 		}
1800 		asoc->send_sack = 1;
1801 		return (0);
1802 	}
1803 	/*
1804 	 * Check to see about the GONE flag, duplicates would cause a sack
1805 	 * to be sent up above
1806 	 */
1807 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1808 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1809 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1810 		/*
1811 		 * wait a minute, this guy is gone, there is no longer a
1812 		 * receiver. Send peer an ABORT!
1813 		 */
1814 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1815 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1816 		*abort_flag = 1;
1817 		return (0);
1818 	}
1819 	/*
1820 	 * Now before going further we see if there is room. If NOT then we
1821 	 * MAY let one through only IF this TSN is the one we are waiting
1822 	 * for on a partial delivery API.
1823 	 */
1824 
1825 	/* Is the stream valid? */
1826 	if (sid >= asoc->streamincnt) {
1827 		struct sctp_error_invalid_stream *cause;
1828 
1829 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1830 		    0, M_NOWAIT, 1, MT_DATA);
1831 		if (op_err != NULL) {
1832 			/* add some space up front so prepend will work well */
1833 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1834 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1835 			/*
1836 			 * Error causes are just param's and this one has
1837 			 * two back to back phdr, one with the error type
1838 			 * and size, the other with the streamid and a rsvd
1839 			 */
1840 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1841 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1842 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1843 			cause->stream_id = htons(sid);
1844 			cause->reserved = htons(0);
1845 			sctp_queue_op_err(stcb, op_err);
1846 		}
1847 		SCTP_STAT_INCR(sctps_badsid);
1848 		SCTP_TCB_LOCK_ASSERT(stcb);
1849 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1850 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1851 			asoc->highest_tsn_inside_nr_map = tsn;
1852 		}
1853 		if (tsn == (asoc->cumulative_tsn + 1)) {
1854 			/* Update cum-ack */
1855 			asoc->cumulative_tsn = tsn;
1856 		}
1857 		return (0);
1858 	}
1859 	/*
1860 	 * If its a fragmented message, lets see if we can find the control
1861 	 * on the reassembly queues.
1862 	 */
1863 	if ((chk_type == SCTP_IDATA) &&
1864 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1865 	    (fsn == 0)) {
1866 		/*
1867 		 * The first *must* be fsn 0, and other (middle/end) pieces
1868 		 * can *not* be fsn 0. XXX: This can happen in case of a
1869 		 * wrap around. Ignore is for now.
1870 		 */
1871 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1872 		goto err_out;
1873 	}
1874 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1875 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1876 	    chk_flags, control);
1877 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1878 		/* See if we can find the re-assembly entity */
1879 		if (control != NULL) {
1880 			/* We found something, does it belong? */
1881 			if (ordered && (mid != control->mid)) {
1882 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1883 		err_out:
1884 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1885 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1886 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1887 				*abort_flag = 1;
1888 				return (0);
1889 			}
1890 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1891 				/*
1892 				 * We can't have a switched order with an
1893 				 * unordered chunk
1894 				 */
1895 				SCTP_SNPRINTF(msg, sizeof(msg),
1896 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1897 				    tsn);
1898 				goto err_out;
1899 			}
1900 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1901 				/*
1902 				 * We can't have a switched unordered with a
1903 				 * ordered chunk
1904 				 */
1905 				SCTP_SNPRINTF(msg, sizeof(msg),
1906 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1907 				    tsn);
1908 				goto err_out;
1909 			}
1910 		}
1911 	} else {
1912 		/*
1913 		 * Its a complete segment. Lets validate we don't have a
1914 		 * re-assembly going on with the same Stream/Seq (for
1915 		 * ordered) or in the same Stream for unordered.
1916 		 */
1917 		if (control != NULL) {
1918 			if (ordered || asoc->idata_supported) {
1919 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1920 				    chk_flags, mid);
1921 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1922 				goto err_out;
1923 			} else {
1924 				if ((tsn == control->fsn_included + 1) &&
1925 				    (control->end_added == 0)) {
1926 					SCTP_SNPRINTF(msg, sizeof(msg),
1927 					    "Illegal message sequence, missing end for MID: %8.8x",
1928 					    control->fsn_included);
1929 					goto err_out;
1930 				} else {
1931 					control = NULL;
1932 				}
1933 			}
1934 		}
1935 	}
1936 	/* now do the tests */
1937 	if (((asoc->cnt_on_all_streams +
1938 	    asoc->cnt_on_reasm_queue +
1939 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1940 	    (((int)asoc->my_rwnd) <= 0)) {
1941 		/*
1942 		 * When we have NO room in the rwnd we check to make sure
1943 		 * the reader is doing its job...
1944 		 */
1945 		if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1946 			/* some to read, wake-up */
1947 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1948 		}
1949 		/* now is it in the mapping array of what we have accepted? */
1950 		if (chk_type == SCTP_DATA) {
1951 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1952 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1953 				/* Nope not in the valid range dump it */
1954 		dump_packet:
1955 				sctp_set_rwnd(stcb, asoc);
1956 				if ((asoc->cnt_on_all_streams +
1957 				    asoc->cnt_on_reasm_queue +
1958 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1959 					SCTP_STAT_INCR(sctps_datadropchklmt);
1960 				} else {
1961 					SCTP_STAT_INCR(sctps_datadroprwnd);
1962 				}
1963 				*break_flag = 1;
1964 				return (0);
1965 			}
1966 		} else {
1967 			if (control == NULL) {
1968 				goto dump_packet;
1969 			}
1970 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1971 				goto dump_packet;
1972 			}
1973 		}
1974 	}
1975 #ifdef SCTP_ASOCLOG_OF_TSNS
1976 	SCTP_TCB_LOCK_ASSERT(stcb);
1977 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1978 		asoc->tsn_in_at = 0;
1979 		asoc->tsn_in_wrapped = 1;
1980 	}
1981 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1982 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1983 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1984 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1985 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1986 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1987 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1988 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1989 	asoc->tsn_in_at++;
1990 #endif
1991 	/*
1992 	 * Before we continue lets validate that we are not being fooled by
1993 	 * an evil attacker. We can only have Nk chunks based on our TSN
1994 	 * spread allowed by the mapping array N * 8 bits, so there is no
1995 	 * way our stream sequence numbers could have wrapped. We of course
1996 	 * only validate the FIRST fragment so the bit must be set.
1997 	 */
1998 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1999 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2000 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2001 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2002 		/* The incoming sseq is behind where we last delivered? */
2003 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2004 		    mid, asoc->strmin[sid].last_mid_delivered);
2005 
2006 		if (asoc->idata_supported) {
2007 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2008 			    asoc->strmin[sid].last_mid_delivered,
2009 			    tsn,
2010 			    sid,
2011 			    mid);
2012 		} else {
2013 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2014 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2015 			    tsn,
2016 			    sid,
2017 			    (uint16_t)mid);
2018 		}
2019 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2020 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2021 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2022 		*abort_flag = 1;
2023 		return (0);
2024 	}
2025 	if (chk_type == SCTP_IDATA) {
2026 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2027 	} else {
2028 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2029 	}
2030 	if (last_chunk == 0) {
2031 		if (chk_type == SCTP_IDATA) {
2032 			dmbuf = SCTP_M_COPYM(*m,
2033 			    (offset + sizeof(struct sctp_idata_chunk)),
2034 			    the_len, M_NOWAIT);
2035 		} else {
2036 			dmbuf = SCTP_M_COPYM(*m,
2037 			    (offset + sizeof(struct sctp_data_chunk)),
2038 			    the_len, M_NOWAIT);
2039 		}
2040 #ifdef SCTP_MBUF_LOGGING
2041 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2042 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2043 		}
2044 #endif
2045 	} else {
2046 		/* We can steal the last chunk */
2047 		int l_len;
2048 
2049 		dmbuf = *m;
2050 		/* lop off the top part */
2051 		if (chk_type == SCTP_IDATA) {
2052 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2053 		} else {
2054 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2055 		}
2056 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2057 			l_len = SCTP_BUF_LEN(dmbuf);
2058 		} else {
2059 			/*
2060 			 * need to count up the size hopefully does not hit
2061 			 * this to often :-0
2062 			 */
2063 			struct mbuf *lat;
2064 
2065 			l_len = 0;
2066 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2067 				l_len += SCTP_BUF_LEN(lat);
2068 			}
2069 		}
2070 		if (l_len > the_len) {
2071 			/* Trim the end round bytes off  too */
2072 			m_adj(dmbuf, -(l_len - the_len));
2073 		}
2074 	}
2075 	if (dmbuf == NULL) {
2076 		SCTP_STAT_INCR(sctps_nomem);
2077 		return (0);
2078 	}
2079 	/*
2080 	 * Now no matter what, we need a control, get one if we don't have
2081 	 * one (we may have gotten it above when we found the message was
2082 	 * fragmented
2083 	 */
2084 	if (control == NULL) {
2085 		sctp_alloc_a_readq(stcb, control);
2086 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2087 		    ppid,
2088 		    sid,
2089 		    chk_flags,
2090 		    NULL, fsn, mid);
2091 		if (control == NULL) {
2092 			SCTP_STAT_INCR(sctps_nomem);
2093 			return (0);
2094 		}
2095 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2096 			struct mbuf *mm;
2097 
2098 			control->data = dmbuf;
2099 			control->tail_mbuf = NULL;
2100 			for (mm = control->data; mm; mm = mm->m_next) {
2101 				control->length += SCTP_BUF_LEN(mm);
2102 				if (SCTP_BUF_NEXT(mm) == NULL) {
2103 					control->tail_mbuf = mm;
2104 				}
2105 			}
2106 			control->end_added = 1;
2107 			control->last_frag_seen = 1;
2108 			control->first_frag_seen = 1;
2109 			control->fsn_included = fsn;
2110 			control->top_fsn = fsn;
2111 		}
2112 		created_control = 1;
2113 	}
2114 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2115 	    chk_flags, ordered, mid, control);
2116 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2117 	    TAILQ_EMPTY(&asoc->resetHead) &&
2118 	    ((ordered == 0) ||
2119 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2120 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2121 		/* Candidate for express delivery */
2122 		/*
2123 		 * Its not fragmented, No PD-API is up, Nothing in the
2124 		 * delivery queue, Its un-ordered OR ordered and the next to
2125 		 * deliver AND nothing else is stuck on the stream queue,
2126 		 * And there is room for it in the socket buffer. Lets just
2127 		 * stuff it up the buffer....
2128 		 */
2129 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2130 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2131 			asoc->highest_tsn_inside_nr_map = tsn;
2132 		}
2133 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2134 		    control, mid);
2135 
2136 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2137 		    control, &stcb->sctp_socket->so_rcv,
2138 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2139 
2140 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2141 			/* for ordered, bump what we delivered */
2142 			asoc->strmin[sid].last_mid_delivered++;
2143 		}
2144 		SCTP_STAT_INCR(sctps_recvexpress);
2145 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2146 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2147 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2148 		}
2149 		control = NULL;
2150 		goto finish_express_del;
2151 	}
2152 
2153 	/* Now will we need a chunk too? */
2154 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2155 		sctp_alloc_a_chunk(stcb, chk);
2156 		if (chk == NULL) {
2157 			/* No memory so we drop the chunk */
2158 			SCTP_STAT_INCR(sctps_nomem);
2159 			if (last_chunk == 0) {
2160 				/* we copied it, free the copy */
2161 				sctp_m_freem(dmbuf);
2162 			}
2163 			return (0);
2164 		}
2165 		chk->rec.data.tsn = tsn;
2166 		chk->no_fr_allowed = 0;
2167 		chk->rec.data.fsn = fsn;
2168 		chk->rec.data.mid = mid;
2169 		chk->rec.data.sid = sid;
2170 		chk->rec.data.ppid = ppid;
2171 		chk->rec.data.context = stcb->asoc.context;
2172 		chk->rec.data.doing_fast_retransmit = 0;
2173 		chk->rec.data.rcv_flags = chk_flags;
2174 		chk->asoc = asoc;
2175 		chk->send_size = the_len;
2176 		chk->whoTo = net;
2177 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2178 		    chk,
2179 		    control, mid);
2180 		atomic_add_int(&net->ref_count, 1);
2181 		chk->data = dmbuf;
2182 	}
2183 	/* Set the appropriate TSN mark */
2184 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2185 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2186 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2187 			asoc->highest_tsn_inside_nr_map = tsn;
2188 		}
2189 	} else {
2190 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2191 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2192 			asoc->highest_tsn_inside_map = tsn;
2193 		}
2194 	}
2195 	/* Now is it complete (i.e. not fragmented)? */
2196 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2197 		/*
2198 		 * Special check for when streams are resetting. We could be
2199 		 * more smart about this and check the actual stream to see
2200 		 * if it is not being reset.. that way we would not create a
2201 		 * HOLB when amongst streams being reset and those not being
2202 		 * reset.
2203 		 *
2204 		 */
2205 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2206 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2207 			/*
2208 			 * yep its past where we need to reset... go ahead
2209 			 * and queue it.
2210 			 */
2211 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2212 				/* first one on */
2213 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2214 			} else {
2215 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2216 				unsigned char inserted = 0;
2217 
2218 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2219 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2220 						continue;
2221 					} else {
2222 						/* found it */
2223 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2224 						inserted = 1;
2225 						break;
2226 					}
2227 				}
2228 				if (inserted == 0) {
2229 					/*
2230 					 * must be put at end, use prevP
2231 					 * (all setup from loop) to setup
2232 					 * nextP.
2233 					 */
2234 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2235 				}
2236 			}
2237 			goto finish_express_del;
2238 		}
2239 		if (chk_flags & SCTP_DATA_UNORDERED) {
2240 			/* queue directly into socket buffer */
2241 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2242 			    control, mid);
2243 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2244 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2245 			    control,
2246 			    &stcb->sctp_socket->so_rcv, 1,
2247 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2248 
2249 		} else {
2250 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2251 			    mid);
2252 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2253 			if (*abort_flag) {
2254 				if (last_chunk) {
2255 					*m = NULL;
2256 				}
2257 				return (0);
2258 			}
2259 		}
2260 		goto finish_express_del;
2261 	}
2262 	/* If we reach here its a reassembly */
2263 	need_reasm_check = 1;
2264 	SCTPDBG(SCTP_DEBUG_XXX,
2265 	    "Queue data to stream for reasm control: %p MID: %u\n",
2266 	    control, mid);
2267 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2268 	if (*abort_flag) {
2269 		/*
2270 		 * the assoc is now gone and chk was put onto the reasm
2271 		 * queue, which has all been freed.
2272 		 */
2273 		if (last_chunk) {
2274 			*m = NULL;
2275 		}
2276 		return (0);
2277 	}
2278 finish_express_del:
2279 	/* Here we tidy up things */
2280 	if (tsn == (asoc->cumulative_tsn + 1)) {
2281 		/* Update cum-ack */
2282 		asoc->cumulative_tsn = tsn;
2283 	}
2284 	if (last_chunk) {
2285 		*m = NULL;
2286 	}
2287 	if (ordered) {
2288 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2289 	} else {
2290 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2291 	}
2292 	SCTP_STAT_INCR(sctps_recvdata);
2293 	/* Set it present please */
2294 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2295 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2296 	}
2297 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2298 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2299 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2300 	}
2301 	if (need_reasm_check) {
2302 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2303 		need_reasm_check = 0;
2304 	}
2305 	/* check the special flag for stream resets */
2306 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2307 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2308 		/*
2309 		 * we have finished working through the backlogged TSN's now
2310 		 * time to reset streams. 1: call reset function. 2: free
2311 		 * pending_reply space 3: distribute any chunks in
2312 		 * pending_reply_queue.
2313 		 */
2314 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2315 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2316 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2317 		SCTP_FREE(liste, SCTP_M_STRESET);
2318 		/* sa_ignore FREED_MEMORY */
2319 		liste = TAILQ_FIRST(&asoc->resetHead);
2320 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2321 			/* All can be removed */
2322 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2323 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2324 				strm = &asoc->strmin[control->sinfo_stream];
2325 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2326 				if (*abort_flag) {
2327 					return (0);
2328 				}
2329 				if (need_reasm_check) {
2330 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2331 					need_reasm_check = 0;
2332 				}
2333 			}
2334 		} else {
2335 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2336 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2337 					break;
2338 				}
2339 				/*
2340 				 * if control->sinfo_tsn is <= liste->tsn we
2341 				 * can process it which is the NOT of
2342 				 * control->sinfo_tsn > liste->tsn
2343 				 */
2344 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2345 				strm = &asoc->strmin[control->sinfo_stream];
2346 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2347 				if (*abort_flag) {
2348 					return (0);
2349 				}
2350 				if (need_reasm_check) {
2351 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2352 					need_reasm_check = 0;
2353 				}
2354 			}
2355 		}
2356 	}
2357 	return (1);
2358 }
2359 
2360 static const int8_t sctp_map_lookup_tab[256] = {
2361 	0, 1, 0, 2, 0, 1, 0, 3,
2362 	0, 1, 0, 2, 0, 1, 0, 4,
2363 	0, 1, 0, 2, 0, 1, 0, 3,
2364 	0, 1, 0, 2, 0, 1, 0, 5,
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 6,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 5,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 4,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 7,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 5,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 4,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 6,
2385 	0, 1, 0, 2, 0, 1, 0, 3,
2386 	0, 1, 0, 2, 0, 1, 0, 4,
2387 	0, 1, 0, 2, 0, 1, 0, 3,
2388 	0, 1, 0, 2, 0, 1, 0, 5,
2389 	0, 1, 0, 2, 0, 1, 0, 3,
2390 	0, 1, 0, 2, 0, 1, 0, 4,
2391 	0, 1, 0, 2, 0, 1, 0, 3,
2392 	0, 1, 0, 2, 0, 1, 0, 8
2393 };
2394 
2395 void
2396 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2397 {
2398 	/*
2399 	 * Now we also need to check the mapping array in a couple of ways.
2400 	 * 1) Did we move the cum-ack point?
2401 	 *
2402 	 * When you first glance at this you might think that all entries
2403 	 * that make up the position of the cum-ack would be in the
2404 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2405 	 * deliverable. Thats true with one exception, when its a fragmented
2406 	 * message we may not deliver the data until some threshold (or all
2407 	 * of it) is in place. So we must OR the nr_mapping_array and
2408 	 * mapping_array to get a true picture of the cum-ack.
2409 	 */
2410 	struct sctp_association *asoc;
2411 	int at;
2412 	uint8_t val;
2413 	int slide_from, slide_end, lgap, distance;
2414 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2415 
2416 	asoc = &stcb->asoc;
2417 
2418 	old_cumack = asoc->cumulative_tsn;
2419 	old_base = asoc->mapping_array_base_tsn;
2420 	old_highest = asoc->highest_tsn_inside_map;
2421 	/*
2422 	 * We could probably improve this a small bit by calculating the
2423 	 * offset of the current cum-ack as the starting point.
2424 	 */
2425 	at = 0;
2426 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2427 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2428 		if (val == 0xff) {
2429 			at += 8;
2430 		} else {
2431 			/* there is a 0 bit */
2432 			at += sctp_map_lookup_tab[val];
2433 			break;
2434 		}
2435 	}
2436 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2437 
2438 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2439 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2440 #ifdef INVARIANTS
2441 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2442 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2443 #else
2444 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2445 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2446 		sctp_print_mapping_array(asoc);
2447 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2448 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2449 		}
2450 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2451 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2452 #endif
2453 	}
2454 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2455 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2456 	} else {
2457 		highest_tsn = asoc->highest_tsn_inside_map;
2458 	}
2459 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2460 		/* The complete array was completed by a single FR */
2461 		/* highest becomes the cum-ack */
2462 		int clr;
2463 #ifdef INVARIANTS
2464 		unsigned int i;
2465 #endif
2466 
2467 		/* clear the array */
2468 		clr = ((at + 7) >> 3);
2469 		if (clr > asoc->mapping_array_size) {
2470 			clr = asoc->mapping_array_size;
2471 		}
2472 		memset(asoc->mapping_array, 0, clr);
2473 		memset(asoc->nr_mapping_array, 0, clr);
2474 #ifdef INVARIANTS
2475 		for (i = 0; i < asoc->mapping_array_size; i++) {
2476 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2477 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2478 				sctp_print_mapping_array(asoc);
2479 			}
2480 		}
2481 #endif
2482 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2483 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2484 	} else if (at >= 8) {
2485 		/* we can slide the mapping array down */
2486 		/* slide_from holds where we hit the first NON 0xff byte */
2487 
2488 		/*
2489 		 * now calculate the ceiling of the move using our highest
2490 		 * TSN value
2491 		 */
2492 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2493 		slide_end = (lgap >> 3);
2494 		if (slide_end < slide_from) {
2495 			sctp_print_mapping_array(asoc);
2496 #ifdef INVARIANTS
2497 			panic("impossible slide");
2498 #else
2499 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2500 			    lgap, slide_end, slide_from, at);
2501 			return;
2502 #endif
2503 		}
2504 		if (slide_end > asoc->mapping_array_size) {
2505 #ifdef INVARIANTS
2506 			panic("would overrun buffer");
2507 #else
2508 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2509 			    asoc->mapping_array_size, slide_end);
2510 			slide_end = asoc->mapping_array_size;
2511 #endif
2512 		}
2513 		distance = (slide_end - slide_from) + 1;
2514 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2515 			sctp_log_map(old_base, old_cumack, old_highest,
2516 			    SCTP_MAP_PREPARE_SLIDE);
2517 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2518 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2519 		}
2520 		if (distance + slide_from > asoc->mapping_array_size ||
2521 		    distance < 0) {
2522 			/*
2523 			 * Here we do NOT slide forward the array so that
2524 			 * hopefully when more data comes in to fill it up
2525 			 * we will be able to slide it forward. Really I
2526 			 * don't think this should happen :-0
2527 			 */
2528 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2529 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2530 				    (uint32_t)asoc->mapping_array_size,
2531 				    SCTP_MAP_SLIDE_NONE);
2532 			}
2533 		} else {
2534 			int ii;
2535 
2536 			for (ii = 0; ii < distance; ii++) {
2537 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2538 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2539 			}
2540 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2541 				asoc->mapping_array[ii] = 0;
2542 				asoc->nr_mapping_array[ii] = 0;
2543 			}
2544 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2545 				asoc->highest_tsn_inside_map += (slide_from << 3);
2546 			}
2547 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2548 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2549 			}
2550 			asoc->mapping_array_base_tsn += (slide_from << 3);
2551 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2552 				sctp_log_map(asoc->mapping_array_base_tsn,
2553 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2554 				    SCTP_MAP_SLIDE_RESULT);
2555 			}
2556 		}
2557 	}
2558 }
2559 
2560 void
2561 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2562 {
2563 	struct sctp_association *asoc;
2564 	uint32_t highest_tsn;
2565 	int is_a_gap;
2566 
2567 	sctp_slide_mapping_arrays(stcb);
2568 	asoc = &stcb->asoc;
2569 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2570 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2571 	} else {
2572 		highest_tsn = asoc->highest_tsn_inside_map;
2573 	}
2574 	/* Is there a gap now? */
2575 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2576 
2577 	/*
2578 	 * Now we need to see if we need to queue a sack or just start the
2579 	 * timer (if allowed).
2580 	 */
2581 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2582 		/*
2583 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2584 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2585 		 * SACK
2586 		 */
2587 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2588 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2589 			    stcb->sctp_ep, stcb, NULL,
2590 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2591 		}
2592 		sctp_send_shutdown(stcb,
2593 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2594 		if (is_a_gap) {
2595 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2596 		}
2597 	} else {
2598 		/*
2599 		 * CMT DAC algorithm: increase number of packets received
2600 		 * since last ack
2601 		 */
2602 		stcb->asoc.cmt_dac_pkts_rcvd++;
2603 
2604 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2605 							 * SACK */
2606 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2607 							 * longer is one */
2608 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2609 		    (is_a_gap) ||	/* is still a gap */
2610 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2611 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) {	/* hit limit of pkts */
2612 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2613 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2614 			    (stcb->asoc.send_sack == 0) &&
2615 			    (stcb->asoc.numduptsns == 0) &&
2616 			    (stcb->asoc.delayed_ack) &&
2617 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2618 				/*
2619 				 * CMT DAC algorithm: With CMT, delay acks
2620 				 * even in the face of reordering.
2621 				 * Therefore, if acks that do not have to be
2622 				 * sent because of the above reasons, will
2623 				 * be delayed. That is, acks that would have
2624 				 * been sent due to gap reports will be
2625 				 * delayed with DAC. Start the delayed ack
2626 				 * timer.
2627 				 */
2628 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2629 				    stcb->sctp_ep, stcb, NULL);
2630 			} else {
2631 				/*
2632 				 * Ok we must build a SACK since the timer
2633 				 * is pending, we got our first packet OR
2634 				 * there are gaps or duplicates.
2635 				 */
2636 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2637 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2638 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2639 			}
2640 		} else {
2641 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2642 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2643 				    stcb->sctp_ep, stcb, NULL);
2644 			}
2645 		}
2646 	}
2647 }
2648 
2649 int
2650 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2651     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2652     struct sctp_nets *net, uint32_t *high_tsn)
2653 {
2654 	struct sctp_chunkhdr *ch, chunk_buf;
2655 	struct sctp_association *asoc;
2656 	int num_chunks = 0;	/* number of control chunks processed */
2657 	int stop_proc = 0;
2658 	int break_flag, last_chunk;
2659 	int abort_flag = 0, was_a_gap;
2660 	struct mbuf *m;
2661 	uint32_t highest_tsn;
2662 	uint16_t chk_length;
2663 
2664 	/* set the rwnd */
2665 	sctp_set_rwnd(stcb, &stcb->asoc);
2666 
2667 	m = *mm;
2668 	SCTP_TCB_LOCK_ASSERT(stcb);
2669 	asoc = &stcb->asoc;
2670 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2671 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2672 	} else {
2673 		highest_tsn = asoc->highest_tsn_inside_map;
2674 	}
2675 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2676 	/*
2677 	 * setup where we got the last DATA packet from for any SACK that
2678 	 * may need to go out. Don't bump the net. This is done ONLY when a
2679 	 * chunk is assigned.
2680 	 */
2681 	asoc->last_data_chunk_from = net;
2682 
2683 	/*-
2684 	 * Now before we proceed we must figure out if this is a wasted
2685 	 * cluster... i.e. it is a small packet sent in and yet the driver
2686 	 * underneath allocated a full cluster for it. If so we must copy it
2687 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2688 	 * with cluster starvation.
2689 	 */
2690 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2691 		/* we only handle mbufs that are singletons.. not chains */
2692 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2693 		if (m) {
2694 			/* ok lets see if we can copy the data up */
2695 			caddr_t *from, *to;
2696 
2697 			/* get the pointers and copy */
2698 			to = mtod(m, caddr_t *);
2699 			from = mtod((*mm), caddr_t *);
2700 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2701 			/* copy the length and free up the old */
2702 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2703 			sctp_m_freem(*mm);
2704 			/* success, back copy */
2705 			*mm = m;
2706 		} else {
2707 			/* We are in trouble in the mbuf world .. yikes */
2708 			m = *mm;
2709 		}
2710 	}
2711 	/* get pointer to the first chunk header */
2712 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2713 	    sizeof(struct sctp_chunkhdr),
2714 	    (uint8_t *)&chunk_buf);
2715 	if (ch == NULL) {
2716 		return (1);
2717 	}
2718 	/*
2719 	 * process all DATA chunks...
2720 	 */
2721 	*high_tsn = asoc->cumulative_tsn;
2722 	break_flag = 0;
2723 	asoc->data_pkts_seen++;
2724 	while (stop_proc == 0) {
2725 		/* validate chunk length */
2726 		chk_length = ntohs(ch->chunk_length);
2727 		if (length - *offset < chk_length) {
2728 			/* all done, mutulated chunk */
2729 			stop_proc = 1;
2730 			continue;
2731 		}
2732 		if ((asoc->idata_supported == 1) &&
2733 		    (ch->chunk_type == SCTP_DATA)) {
2734 			struct mbuf *op_err;
2735 			char msg[SCTP_DIAG_INFO_LEN];
2736 
2737 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2738 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2739 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2740 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2741 			return (2);
2742 		}
2743 		if ((asoc->idata_supported == 0) &&
2744 		    (ch->chunk_type == SCTP_IDATA)) {
2745 			struct mbuf *op_err;
2746 			char msg[SCTP_DIAG_INFO_LEN];
2747 
2748 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2751 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2752 			return (2);
2753 		}
2754 		if ((ch->chunk_type == SCTP_DATA) ||
2755 		    (ch->chunk_type == SCTP_IDATA)) {
2756 			uint16_t clen;
2757 
2758 			if (ch->chunk_type == SCTP_DATA) {
2759 				clen = sizeof(struct sctp_data_chunk);
2760 			} else {
2761 				clen = sizeof(struct sctp_idata_chunk);
2762 			}
2763 			if (chk_length < clen) {
2764 				/*
2765 				 * Need to send an abort since we had a
2766 				 * invalid data chunk.
2767 				 */
2768 				struct mbuf *op_err;
2769 				char msg[SCTP_DIAG_INFO_LEN];
2770 
2771 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2772 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2773 				    chk_length);
2774 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2775 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2776 				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2777 				return (2);
2778 			}
2779 #ifdef SCTP_AUDITING_ENABLED
2780 			sctp_audit_log(0xB1, 0);
2781 #endif
2782 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2783 				last_chunk = 1;
2784 			} else {
2785 				last_chunk = 0;
2786 			}
2787 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2788 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2789 			    last_chunk, ch->chunk_type)) {
2790 				num_chunks++;
2791 			}
2792 			if (abort_flag)
2793 				return (2);
2794 
2795 			if (break_flag) {
2796 				/*
2797 				 * Set because of out of rwnd space and no
2798 				 * drop rep space left.
2799 				 */
2800 				stop_proc = 1;
2801 				continue;
2802 			}
2803 		} else {
2804 			/* not a data chunk in the data region */
2805 			switch (ch->chunk_type) {
2806 			case SCTP_INITIATION:
2807 			case SCTP_INITIATION_ACK:
2808 			case SCTP_SELECTIVE_ACK:
2809 			case SCTP_NR_SELECTIVE_ACK:
2810 			case SCTP_HEARTBEAT_REQUEST:
2811 			case SCTP_HEARTBEAT_ACK:
2812 			case SCTP_ABORT_ASSOCIATION:
2813 			case SCTP_SHUTDOWN:
2814 			case SCTP_SHUTDOWN_ACK:
2815 			case SCTP_OPERATION_ERROR:
2816 			case SCTP_COOKIE_ECHO:
2817 			case SCTP_COOKIE_ACK:
2818 			case SCTP_ECN_ECHO:
2819 			case SCTP_ECN_CWR:
2820 			case SCTP_SHUTDOWN_COMPLETE:
2821 			case SCTP_AUTHENTICATION:
2822 			case SCTP_ASCONF_ACK:
2823 			case SCTP_PACKET_DROPPED:
2824 			case SCTP_STREAM_RESET:
2825 			case SCTP_FORWARD_CUM_TSN:
2826 			case SCTP_ASCONF:
2827 				{
2828 					/*
2829 					 * Now, what do we do with KNOWN
2830 					 * chunks that are NOT in the right
2831 					 * place?
2832 					 *
2833 					 * For now, I do nothing but ignore
2834 					 * them. We may later want to add
2835 					 * sysctl stuff to switch out and do
2836 					 * either an ABORT() or possibly
2837 					 * process them.
2838 					 */
2839 					struct mbuf *op_err;
2840 					char msg[SCTP_DIAG_INFO_LEN];
2841 
2842 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2843 					    ch->chunk_type);
2844 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2845 					sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2846 					return (2);
2847 				}
2848 			default:
2849 				/*
2850 				 * Unknown chunk type: use bit rules after
2851 				 * checking length
2852 				 */
2853 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2854 					/*
2855 					 * Need to send an abort since we
2856 					 * had a invalid chunk.
2857 					 */
2858 					struct mbuf *op_err;
2859 					char msg[SCTP_DIAG_INFO_LEN];
2860 
2861 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2862 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2863 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2864 					sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2865 					return (2);
2866 				}
2867 				if (ch->chunk_type & 0x40) {
2868 					/* Add a error report to the queue */
2869 					struct mbuf *op_err;
2870 					struct sctp_gen_error_cause *cause;
2871 
2872 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2873 					    0, M_NOWAIT, 1, MT_DATA);
2874 					if (op_err != NULL) {
2875 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2876 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2877 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2878 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2879 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2880 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2881 							sctp_queue_op_err(stcb, op_err);
2882 						} else {
2883 							sctp_m_freem(op_err);
2884 						}
2885 					}
2886 				}
2887 				if ((ch->chunk_type & 0x80) == 0) {
2888 					/* discard the rest of this packet */
2889 					stop_proc = 1;
2890 				}	/* else skip this bad chunk and
2891 					 * continue... */
2892 				break;
2893 			}	/* switch of chunk type */
2894 		}
2895 		*offset += SCTP_SIZE32(chk_length);
2896 		if ((*offset >= length) || stop_proc) {
2897 			/* no more data left in the mbuf chain */
2898 			stop_proc = 1;
2899 			continue;
2900 		}
2901 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2902 		    sizeof(struct sctp_chunkhdr),
2903 		    (uint8_t *)&chunk_buf);
2904 		if (ch == NULL) {
2905 			*offset = length;
2906 			stop_proc = 1;
2907 			continue;
2908 		}
2909 	}
2910 	if (break_flag) {
2911 		/*
2912 		 * we need to report rwnd overrun drops.
2913 		 */
2914 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2915 	}
2916 	if (num_chunks) {
2917 		/*
2918 		 * Did we get data, if so update the time for auto-close and
2919 		 * give peer credit for being alive.
2920 		 */
2921 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2922 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2923 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2924 			    stcb->asoc.overall_error_count,
2925 			    0,
2926 			    SCTP_FROM_SCTP_INDATA,
2927 			    __LINE__);
2928 		}
2929 		stcb->asoc.overall_error_count = 0;
2930 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2931 	}
2932 	/* now service all of the reassm queue if needed */
2933 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2934 		/* Assure that we ack right away */
2935 		stcb->asoc.send_sack = 1;
2936 	}
2937 	/* Start a sack timer or QUEUE a SACK for sending */
2938 	sctp_sack_check(stcb, was_a_gap);
2939 	return (0);
2940 }
2941 
2942 static int
2943 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2944     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2945     int *num_frs,
2946     uint32_t *biggest_newly_acked_tsn,
2947     uint32_t *this_sack_lowest_newack,
2948     int *rto_ok)
2949 {
2950 	struct sctp_tmit_chunk *tp1;
2951 	unsigned int theTSN;
2952 	int j, wake_him = 0, circled = 0;
2953 
2954 	/* Recover the tp1 we last saw */
2955 	tp1 = *p_tp1;
2956 	if (tp1 == NULL) {
2957 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2958 	}
2959 	for (j = frag_strt; j <= frag_end; j++) {
2960 		theTSN = j + last_tsn;
2961 		while (tp1) {
2962 			if (tp1->rec.data.doing_fast_retransmit)
2963 				(*num_frs) += 1;
2964 
2965 			/*-
2966 			 * CMT: CUCv2 algorithm. For each TSN being
2967 			 * processed from the sent queue, track the
2968 			 * next expected pseudo-cumack, or
2969 			 * rtx_pseudo_cumack, if required. Separate
2970 			 * cumack trackers for first transmissions,
2971 			 * and retransmissions.
2972 			 */
2973 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2974 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2975 			    (tp1->snd_count == 1)) {
2976 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2977 				tp1->whoTo->find_pseudo_cumack = 0;
2978 			}
2979 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2981 			    (tp1->snd_count > 1)) {
2982 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2983 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2984 			}
2985 			if (tp1->rec.data.tsn == theTSN) {
2986 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2987 					/*-
2988 					 * must be held until
2989 					 * cum-ack passes
2990 					 */
2991 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2992 						/*-
2993 						 * If it is less than RESEND, it is
2994 						 * now no-longer in flight.
2995 						 * Higher values may already be set
2996 						 * via previous Gap Ack Blocks...
2997 						 * i.e. ACKED or RESEND.
2998 						 */
2999 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3000 						    *biggest_newly_acked_tsn)) {
3001 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3002 						}
3003 						/*-
3004 						 * CMT: SFR algo (and HTNA) - set
3005 						 * saw_newack to 1 for dest being
3006 						 * newly acked. update
3007 						 * this_sack_highest_newack if
3008 						 * appropriate.
3009 						 */
3010 						if (tp1->rec.data.chunk_was_revoked == 0)
3011 							tp1->whoTo->saw_newack = 1;
3012 
3013 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 						    tp1->whoTo->this_sack_highest_newack)) {
3015 							tp1->whoTo->this_sack_highest_newack =
3016 							    tp1->rec.data.tsn;
3017 						}
3018 						/*-
3019 						 * CMT DAC algo: also update
3020 						 * this_sack_lowest_newack
3021 						 */
3022 						if (*this_sack_lowest_newack == 0) {
3023 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3024 								sctp_log_sack(*this_sack_lowest_newack,
3025 								    last_tsn,
3026 								    tp1->rec.data.tsn,
3027 								    0,
3028 								    0,
3029 								    SCTP_LOG_TSN_ACKED);
3030 							}
3031 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3032 						}
3033 						/*-
3034 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3035 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3036 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3037 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3038 						 * Separate pseudo_cumack trackers for first transmissions and
3039 						 * retransmissions.
3040 						 */
3041 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3042 							if (tp1->rec.data.chunk_was_revoked == 0) {
3043 								tp1->whoTo->new_pseudo_cumack = 1;
3044 							}
3045 							tp1->whoTo->find_pseudo_cumack = 1;
3046 						}
3047 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3048 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3049 						}
3050 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3051 							if (tp1->rec.data.chunk_was_revoked == 0) {
3052 								tp1->whoTo->new_pseudo_cumack = 1;
3053 							}
3054 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3055 						}
3056 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3057 							sctp_log_sack(*biggest_newly_acked_tsn,
3058 							    last_tsn,
3059 							    tp1->rec.data.tsn,
3060 							    frag_strt,
3061 							    frag_end,
3062 							    SCTP_LOG_TSN_ACKED);
3063 						}
3064 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3065 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3066 							    tp1->whoTo->flight_size,
3067 							    tp1->book_size,
3068 							    (uint32_t)(uintptr_t)tp1->whoTo,
3069 							    tp1->rec.data.tsn);
3070 						}
3071 						sctp_flight_size_decrease(tp1);
3072 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3073 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3074 							    tp1);
3075 						}
3076 						sctp_total_flight_decrease(stcb, tp1);
3077 
3078 						tp1->whoTo->net_ack += tp1->send_size;
3079 						if (tp1->snd_count < 2) {
3080 							/*-
3081 							 * True non-retransmitted chunk
3082 							 */
3083 							tp1->whoTo->net_ack2 += tp1->send_size;
3084 
3085 							/*-
3086 							 * update RTO too ?
3087 							 */
3088 							if (tp1->do_rtt) {
3089 								if (*rto_ok &&
3090 								    sctp_calculate_rto(stcb,
3091 								    &stcb->asoc,
3092 								    tp1->whoTo,
3093 								    &tp1->sent_rcv_time,
3094 								    SCTP_RTT_FROM_DATA)) {
3095 									*rto_ok = 0;
3096 								}
3097 								if (tp1->whoTo->rto_needed == 0) {
3098 									tp1->whoTo->rto_needed = 1;
3099 								}
3100 								tp1->do_rtt = 0;
3101 							}
3102 						}
3103 					}
3104 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3105 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3106 						    stcb->asoc.this_sack_highest_gap)) {
3107 							stcb->asoc.this_sack_highest_gap =
3108 							    tp1->rec.data.tsn;
3109 						}
3110 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3111 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3112 #ifdef SCTP_AUDITING_ENABLED
3113 							sctp_audit_log(0xB2,
3114 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3115 #endif
3116 						}
3117 					}
3118 					/*-
3119 					 * All chunks NOT UNSENT fall through here and are marked
3120 					 * (leave PR-SCTP ones that are to skip alone though)
3121 					 */
3122 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3123 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3124 						tp1->sent = SCTP_DATAGRAM_MARKED;
3125 					}
3126 					if (tp1->rec.data.chunk_was_revoked) {
3127 						/* deflate the cwnd */
3128 						tp1->whoTo->cwnd -= tp1->book_size;
3129 						tp1->rec.data.chunk_was_revoked = 0;
3130 					}
3131 					/* NR Sack code here */
3132 					if (nr_sacking &&
3133 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3134 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3135 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3136 #ifdef INVARIANTS
3137 						} else {
3138 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3139 #endif
3140 						}
3141 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3142 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3143 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3144 							stcb->asoc.trigger_reset = 1;
3145 						}
3146 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3147 						if (tp1->data) {
3148 							/*
3149 							 * sa_ignore
3150 							 * NO_NULL_CHK
3151 							 */
3152 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3153 							sctp_m_freem(tp1->data);
3154 							tp1->data = NULL;
3155 						}
3156 						wake_him++;
3157 					}
3158 				}
3159 				break;
3160 			}	/* if (tp1->tsn == theTSN) */
3161 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3162 				break;
3163 			}
3164 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3165 			if ((tp1 == NULL) && (circled == 0)) {
3166 				circled++;
3167 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3168 			}
3169 		}		/* end while (tp1) */
3170 		if (tp1 == NULL) {
3171 			circled = 0;
3172 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3173 		}
3174 		/* In case the fragments were not in order we must reset */
3175 	}			/* end for (j = fragStart */
3176 	*p_tp1 = tp1;
3177 	return (wake_him);	/* Return value only used for nr-sack */
3178 }
3179 
3180 static int
3181 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3182     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3183     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3184     int num_seg, int num_nr_seg, int *rto_ok)
3185 {
3186 	struct sctp_gap_ack_block *frag, block;
3187 	struct sctp_tmit_chunk *tp1;
3188 	int i;
3189 	int num_frs = 0;
3190 	int chunk_freed;
3191 	int non_revocable;
3192 	uint16_t frag_strt, frag_end, prev_frag_end;
3193 
3194 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3195 	prev_frag_end = 0;
3196 	chunk_freed = 0;
3197 
3198 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3199 		if (i == num_seg) {
3200 			prev_frag_end = 0;
3201 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3202 		}
3203 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3204 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3205 		*offset += sizeof(block);
3206 		if (frag == NULL) {
3207 			return (chunk_freed);
3208 		}
3209 		frag_strt = ntohs(frag->start);
3210 		frag_end = ntohs(frag->end);
3211 
3212 		if (frag_strt > frag_end) {
3213 			/* This gap report is malformed, skip it. */
3214 			continue;
3215 		}
3216 		if (frag_strt <= prev_frag_end) {
3217 			/* This gap report is not in order, so restart. */
3218 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3219 		}
3220 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3221 			*biggest_tsn_acked = last_tsn + frag_end;
3222 		}
3223 		if (i < num_seg) {
3224 			non_revocable = 0;
3225 		} else {
3226 			non_revocable = 1;
3227 		}
3228 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3229 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3230 		    this_sack_lowest_newack, rto_ok)) {
3231 			chunk_freed = 1;
3232 		}
3233 		prev_frag_end = frag_end;
3234 	}
3235 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3236 		if (num_frs)
3237 			sctp_log_fr(*biggest_tsn_acked,
3238 			    *biggest_newly_acked_tsn,
3239 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3240 	}
3241 	return (chunk_freed);
3242 }
3243 
3244 static void
3245 sctp_check_for_revoked(struct sctp_tcb *stcb,
3246     struct sctp_association *asoc, uint32_t cumack,
3247     uint32_t biggest_tsn_acked)
3248 {
3249 	struct sctp_tmit_chunk *tp1;
3250 
3251 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3252 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3253 			/*
3254 			 * ok this guy is either ACK or MARKED. If it is
3255 			 * ACKED it has been previously acked but not this
3256 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3257 			 * again.
3258 			 */
3259 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3260 				break;
3261 			}
3262 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3263 				/* it has been revoked */
3264 				tp1->sent = SCTP_DATAGRAM_SENT;
3265 				tp1->rec.data.chunk_was_revoked = 1;
3266 				/*
3267 				 * We must add this stuff back in to assure
3268 				 * timers and such get started.
3269 				 */
3270 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3271 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3272 					    tp1->whoTo->flight_size,
3273 					    tp1->book_size,
3274 					    (uint32_t)(uintptr_t)tp1->whoTo,
3275 					    tp1->rec.data.tsn);
3276 				}
3277 				sctp_flight_size_increase(tp1);
3278 				sctp_total_flight_increase(stcb, tp1);
3279 				/*
3280 				 * We inflate the cwnd to compensate for our
3281 				 * artificial inflation of the flight_size.
3282 				 */
3283 				tp1->whoTo->cwnd += tp1->book_size;
3284 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3285 					sctp_log_sack(asoc->last_acked_seq,
3286 					    cumack,
3287 					    tp1->rec.data.tsn,
3288 					    0,
3289 					    0,
3290 					    SCTP_LOG_TSN_REVOKED);
3291 				}
3292 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3293 				/* it has been re-acked in this SACK */
3294 				tp1->sent = SCTP_DATAGRAM_ACKED;
3295 			}
3296 		}
3297 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3298 			break;
3299 	}
3300 }
3301 
3302 static void
3303 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3304     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3305 {
3306 	struct sctp_tmit_chunk *tp1;
3307 	int strike_flag = 0;
3308 	struct timeval now;
3309 	uint32_t sending_seq;
3310 	struct sctp_nets *net;
3311 	int num_dests_sacked = 0;
3312 
3313 	/*
3314 	 * select the sending_seq, this is either the next thing ready to be
3315 	 * sent but not transmitted, OR, the next seq we assign.
3316 	 */
3317 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3318 	if (tp1 == NULL) {
3319 		sending_seq = asoc->sending_seq;
3320 	} else {
3321 		sending_seq = tp1->rec.data.tsn;
3322 	}
3323 
3324 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3325 	if ((asoc->sctp_cmt_on_off > 0) &&
3326 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3327 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3328 			if (net->saw_newack)
3329 				num_dests_sacked++;
3330 		}
3331 	}
3332 	if (stcb->asoc.prsctp_supported) {
3333 		(void)SCTP_GETTIME_TIMEVAL(&now);
3334 	}
3335 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3336 		strike_flag = 0;
3337 		if (tp1->no_fr_allowed) {
3338 			/* this one had a timeout or something */
3339 			continue;
3340 		}
3341 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3342 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3343 				sctp_log_fr(biggest_tsn_newly_acked,
3344 				    tp1->rec.data.tsn,
3345 				    tp1->sent,
3346 				    SCTP_FR_LOG_CHECK_STRIKE);
3347 		}
3348 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3349 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3350 			/* done */
3351 			break;
3352 		}
3353 		if (stcb->asoc.prsctp_supported) {
3354 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3355 				/* Is it expired? */
3356 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3357 					/* Yes so drop it */
3358 					if (tp1->data != NULL) {
3359 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3360 						    SCTP_SO_NOT_LOCKED);
3361 					}
3362 					continue;
3363 				}
3364 			}
3365 		}
3366 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3367 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3368 			/* we are beyond the tsn in the sack  */
3369 			break;
3370 		}
3371 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3372 			/* either a RESEND, ACKED, or MARKED */
3373 			/* skip */
3374 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3375 				/* Continue strikin FWD-TSN chunks */
3376 				tp1->rec.data.fwd_tsn_cnt++;
3377 			}
3378 			continue;
3379 		}
3380 		/*
3381 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3382 		 */
3383 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3384 			/*
3385 			 * No new acks were received for data sent to this
3386 			 * dest. Therefore, according to the SFR algo for
3387 			 * CMT, no data sent to this dest can be marked for
3388 			 * FR using this SACK.
3389 			 */
3390 			continue;
3391 		} else if (tp1->whoTo &&
3392 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3393 			    tp1->whoTo->this_sack_highest_newack) &&
3394 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3395 			/*
3396 			 * CMT: New acks were received for data sent to this
3397 			 * dest. But no new acks were seen for data sent
3398 			 * after tp1. Therefore, according to the SFR algo
3399 			 * for CMT, tp1 cannot be marked for FR using this
3400 			 * SACK. This step covers part of the DAC algo and
3401 			 * the HTNA algo as well.
3402 			 */
3403 			continue;
3404 		}
3405 		/*
3406 		 * Here we check to see if we were have already done a FR
3407 		 * and if so we see if the biggest TSN we saw in the sack is
3408 		 * smaller than the recovery point. If so we don't strike
3409 		 * the tsn... otherwise we CAN strike the TSN.
3410 		 */
3411 		/*
3412 		 * @@@ JRI: Check for CMT if (accum_moved &&
3413 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3414 		 * 0)) {
3415 		 */
3416 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3417 			/*
3418 			 * Strike the TSN if in fast-recovery and cum-ack
3419 			 * moved.
3420 			 */
3421 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3422 				sctp_log_fr(biggest_tsn_newly_acked,
3423 				    tp1->rec.data.tsn,
3424 				    tp1->sent,
3425 				    SCTP_FR_LOG_STRIKE_CHUNK);
3426 			}
3427 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3428 				tp1->sent++;
3429 			}
3430 			if ((asoc->sctp_cmt_on_off > 0) &&
3431 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3432 				/*
3433 				 * CMT DAC algorithm: If SACK flag is set to
3434 				 * 0, then lowest_newack test will not pass
3435 				 * because it would have been set to the
3436 				 * cumack earlier. If not already to be
3437 				 * rtx'd, If not a mixed sack and if tp1 is
3438 				 * not between two sacked TSNs, then mark by
3439 				 * one more. NOTE that we are marking by one
3440 				 * additional time since the SACK DAC flag
3441 				 * indicates that two packets have been
3442 				 * received after this missing TSN.
3443 				 */
3444 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3445 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3446 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 						sctp_log_fr(16 + num_dests_sacked,
3448 						    tp1->rec.data.tsn,
3449 						    tp1->sent,
3450 						    SCTP_FR_LOG_STRIKE_CHUNK);
3451 					}
3452 					tp1->sent++;
3453 				}
3454 			}
3455 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3456 		    (asoc->sctp_cmt_on_off == 0)) {
3457 			/*
3458 			 * For those that have done a FR we must take
3459 			 * special consideration if we strike. I.e the
3460 			 * biggest_newly_acked must be higher than the
3461 			 * sending_seq at the time we did the FR.
3462 			 */
3463 			if (
3464 #ifdef SCTP_FR_TO_ALTERNATE
3465 			/*
3466 			 * If FR's go to new networks, then we must only do
3467 			 * this for singly homed asoc's. However if the FR's
3468 			 * go to the same network (Armando's work) then its
3469 			 * ok to FR multiple times.
3470 			 */
3471 			    (asoc->numnets < 2)
3472 #else
3473 			    (1)
3474 #endif
3475 			    ) {
3476 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3477 				    tp1->rec.data.fast_retran_tsn)) {
3478 					/*
3479 					 * Strike the TSN, since this ack is
3480 					 * beyond where things were when we
3481 					 * did a FR.
3482 					 */
3483 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 						sctp_log_fr(biggest_tsn_newly_acked,
3485 						    tp1->rec.data.tsn,
3486 						    tp1->sent,
3487 						    SCTP_FR_LOG_STRIKE_CHUNK);
3488 					}
3489 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3490 						tp1->sent++;
3491 					}
3492 					strike_flag = 1;
3493 					if ((asoc->sctp_cmt_on_off > 0) &&
3494 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3495 						/*
3496 						 * CMT DAC algorithm: If
3497 						 * SACK flag is set to 0,
3498 						 * then lowest_newack test
3499 						 * will not pass because it
3500 						 * would have been set to
3501 						 * the cumack earlier. If
3502 						 * not already to be rtx'd,
3503 						 * If not a mixed sack and
3504 						 * if tp1 is not between two
3505 						 * sacked TSNs, then mark by
3506 						 * one more. NOTE that we
3507 						 * are marking by one
3508 						 * additional time since the
3509 						 * SACK DAC flag indicates
3510 						 * that two packets have
3511 						 * been received after this
3512 						 * missing TSN.
3513 						 */
3514 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3515 						    (num_dests_sacked == 1) &&
3516 						    SCTP_TSN_GT(this_sack_lowest_newack,
3517 						    tp1->rec.data.tsn)) {
3518 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3519 								sctp_log_fr(32 + num_dests_sacked,
3520 								    tp1->rec.data.tsn,
3521 								    tp1->sent,
3522 								    SCTP_FR_LOG_STRIKE_CHUNK);
3523 							}
3524 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3525 								tp1->sent++;
3526 							}
3527 						}
3528 					}
3529 				}
3530 			}
3531 			/*
3532 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3533 			 * algo covers HTNA.
3534 			 */
3535 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3536 		    biggest_tsn_newly_acked)) {
3537 			/*
3538 			 * We don't strike these: This is the  HTNA
3539 			 * algorithm i.e. we don't strike If our TSN is
3540 			 * larger than the Highest TSN Newly Acked.
3541 			 */
3542 			;
3543 		} else {
3544 			/* Strike the TSN */
3545 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3546 				sctp_log_fr(biggest_tsn_newly_acked,
3547 				    tp1->rec.data.tsn,
3548 				    tp1->sent,
3549 				    SCTP_FR_LOG_STRIKE_CHUNK);
3550 			}
3551 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3552 				tp1->sent++;
3553 			}
3554 			if ((asoc->sctp_cmt_on_off > 0) &&
3555 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3556 				/*
3557 				 * CMT DAC algorithm: If SACK flag is set to
3558 				 * 0, then lowest_newack test will not pass
3559 				 * because it would have been set to the
3560 				 * cumack earlier. If not already to be
3561 				 * rtx'd, If not a mixed sack and if tp1 is
3562 				 * not between two sacked TSNs, then mark by
3563 				 * one more. NOTE that we are marking by one
3564 				 * additional time since the SACK DAC flag
3565 				 * indicates that two packets have been
3566 				 * received after this missing TSN.
3567 				 */
3568 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3569 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3570 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3571 						sctp_log_fr(48 + num_dests_sacked,
3572 						    tp1->rec.data.tsn,
3573 						    tp1->sent,
3574 						    SCTP_FR_LOG_STRIKE_CHUNK);
3575 					}
3576 					tp1->sent++;
3577 				}
3578 			}
3579 		}
3580 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3581 			struct sctp_nets *alt;
3582 
3583 			/* fix counts and things */
3584 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3585 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3586 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3587 				    tp1->book_size,
3588 				    (uint32_t)(uintptr_t)tp1->whoTo,
3589 				    tp1->rec.data.tsn);
3590 			}
3591 			if (tp1->whoTo) {
3592 				tp1->whoTo->net_ack++;
3593 				sctp_flight_size_decrease(tp1);
3594 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3595 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3596 					    tp1);
3597 				}
3598 			}
3599 
3600 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3601 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3602 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3603 			}
3604 			/* add back to the rwnd */
3605 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3606 
3607 			/* remove from the total flight */
3608 			sctp_total_flight_decrease(stcb, tp1);
3609 
3610 			if ((stcb->asoc.prsctp_supported) &&
3611 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3612 				/*
3613 				 * Has it been retransmitted tv_sec times? -
3614 				 * we store the retran count there.
3615 				 */
3616 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3617 					/* Yes, so drop it */
3618 					if (tp1->data != NULL) {
3619 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3620 						    SCTP_SO_NOT_LOCKED);
3621 					}
3622 					/* Make sure to flag we had a FR */
3623 					if (tp1->whoTo != NULL) {
3624 						tp1->whoTo->net_ack++;
3625 					}
3626 					continue;
3627 				}
3628 			}
3629 			/*
3630 			 * SCTP_PRINTF("OK, we are now ready to FR this
3631 			 * guy\n");
3632 			 */
3633 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3634 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3635 				    0, SCTP_FR_MARKED);
3636 			}
3637 			if (strike_flag) {
3638 				/* This is a subsequent FR */
3639 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3640 			}
3641 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3642 			if (asoc->sctp_cmt_on_off > 0) {
3643 				/*
3644 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3645 				 * If CMT is being used, then pick dest with
3646 				 * largest ssthresh for any retransmission.
3647 				 */
3648 				tp1->no_fr_allowed = 1;
3649 				alt = tp1->whoTo;
3650 				/* sa_ignore NO_NULL_CHK */
3651 				if (asoc->sctp_cmt_pf > 0) {
3652 					/*
3653 					 * JRS 5/18/07 - If CMT PF is on,
3654 					 * use the PF version of
3655 					 * find_alt_net()
3656 					 */
3657 					alt = sctp_find_alternate_net(stcb, alt, 2);
3658 				} else {
3659 					/*
3660 					 * JRS 5/18/07 - If only CMT is on,
3661 					 * use the CMT version of
3662 					 * find_alt_net()
3663 					 */
3664 					/* sa_ignore NO_NULL_CHK */
3665 					alt = sctp_find_alternate_net(stcb, alt, 1);
3666 				}
3667 				if (alt == NULL) {
3668 					alt = tp1->whoTo;
3669 				}
3670 				/*
3671 				 * CUCv2: If a different dest is picked for
3672 				 * the retransmission, then new
3673 				 * (rtx-)pseudo_cumack needs to be tracked
3674 				 * for orig dest. Let CUCv2 track new (rtx-)
3675 				 * pseudo-cumack always.
3676 				 */
3677 				if (tp1->whoTo) {
3678 					tp1->whoTo->find_pseudo_cumack = 1;
3679 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3680 				}
3681 			} else {	/* CMT is OFF */
3682 #ifdef SCTP_FR_TO_ALTERNATE
3683 				/* Can we find an alternate? */
3684 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3685 #else
3686 				/*
3687 				 * default behavior is to NOT retransmit
3688 				 * FR's to an alternate. Armando Caro's
3689 				 * paper details why.
3690 				 */
3691 				alt = tp1->whoTo;
3692 #endif
3693 			}
3694 
3695 			tp1->rec.data.doing_fast_retransmit = 1;
3696 			/* mark the sending seq for possible subsequent FR's */
3697 			/*
3698 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3699 			 * (uint32_t)tpi->rec.data.tsn);
3700 			 */
3701 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3702 				/*
3703 				 * If the queue of send is empty then its
3704 				 * the next sequence number that will be
3705 				 * assigned so we subtract one from this to
3706 				 * get the one we last sent.
3707 				 */
3708 				tp1->rec.data.fast_retran_tsn = sending_seq;
3709 			} else {
3710 				/*
3711 				 * If there are chunks on the send queue
3712 				 * (unsent data that has made it from the
3713 				 * stream queues but not out the door, we
3714 				 * take the first one (which will have the
3715 				 * lowest TSN) and subtract one to get the
3716 				 * one we last sent.
3717 				 */
3718 				struct sctp_tmit_chunk *ttt;
3719 
3720 				ttt = TAILQ_FIRST(&asoc->send_queue);
3721 				tp1->rec.data.fast_retran_tsn =
3722 				    ttt->rec.data.tsn;
3723 			}
3724 
3725 			if (tp1->do_rtt) {
3726 				/*
3727 				 * this guy had a RTO calculation pending on
3728 				 * it, cancel it
3729 				 */
3730 				if ((tp1->whoTo != NULL) &&
3731 				    (tp1->whoTo->rto_needed == 0)) {
3732 					tp1->whoTo->rto_needed = 1;
3733 				}
3734 				tp1->do_rtt = 0;
3735 			}
3736 			if (alt != tp1->whoTo) {
3737 				/* yes, there is an alternate. */
3738 				sctp_free_remote_addr(tp1->whoTo);
3739 				/* sa_ignore FREED_MEMORY */
3740 				tp1->whoTo = alt;
3741 				atomic_add_int(&alt->ref_count, 1);
3742 			}
3743 		}
3744 	}
3745 }
3746 
3747 struct sctp_tmit_chunk *
3748 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3749     struct sctp_association *asoc)
3750 {
3751 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3752 	struct timeval now;
3753 	int now_filled = 0;
3754 
3755 	if (asoc->prsctp_supported == 0) {
3756 		return (NULL);
3757 	}
3758 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3759 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3760 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3761 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3762 			/* no chance to advance, out of here */
3763 			break;
3764 		}
3765 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3766 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3767 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3768 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3769 				    asoc->advanced_peer_ack_point,
3770 				    tp1->rec.data.tsn, 0, 0);
3771 			}
3772 		}
3773 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3774 			/*
3775 			 * We can't fwd-tsn past any that are reliable aka
3776 			 * retransmitted until the asoc fails.
3777 			 */
3778 			break;
3779 		}
3780 		if (!now_filled) {
3781 			(void)SCTP_GETTIME_TIMEVAL(&now);
3782 			now_filled = 1;
3783 		}
3784 		/*
3785 		 * now we got a chunk which is marked for another
3786 		 * retransmission to a PR-stream but has run out its chances
3787 		 * already maybe OR has been marked to skip now. Can we skip
3788 		 * it if its a resend?
3789 		 */
3790 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3791 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3792 			/*
3793 			 * Now is this one marked for resend and its time is
3794 			 * now up?
3795 			 */
3796 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3797 				/* Yes so drop it */
3798 				if (tp1->data) {
3799 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3800 					    1, SCTP_SO_NOT_LOCKED);
3801 				}
3802 			} else {
3803 				/*
3804 				 * No, we are done when hit one for resend
3805 				 * whos time as not expired.
3806 				 */
3807 				break;
3808 			}
3809 		}
3810 		/*
3811 		 * Ok now if this chunk is marked to drop it we can clean up
3812 		 * the chunk, advance our peer ack point and we can check
3813 		 * the next chunk.
3814 		 */
3815 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3816 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3817 			/* advance PeerAckPoint goes forward */
3818 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3819 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3820 				a_adv = tp1;
3821 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3822 				/* No update but we do save the chk */
3823 				a_adv = tp1;
3824 			}
3825 		} else {
3826 			/*
3827 			 * If it is still in RESEND we can advance no
3828 			 * further
3829 			 */
3830 			break;
3831 		}
3832 	}
3833 	return (a_adv);
3834 }
3835 
3836 static int
3837 sctp_fs_audit(struct sctp_association *asoc)
3838 {
3839 	struct sctp_tmit_chunk *chk;
3840 	int inflight = 0, inbetween = 0;
3841 	int ret;
3842 #ifndef INVARIANTS
3843 	int resend = 0, acked = 0, above = 0;
3844 	int entry_flight, entry_cnt;
3845 #endif
3846 
3847 	ret = 0;
3848 #ifndef INVARIANTS
3849 	entry_flight = asoc->total_flight;
3850 	entry_cnt = asoc->total_flight_count;
3851 #endif
3852 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3853 		return (0);
3854 
3855 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3857 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3858 			    chk->rec.data.tsn,
3859 			    chk->send_size,
3860 			    chk->snd_count);
3861 			inflight++;
3862 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863 #ifndef INVARIANTS
3864 			resend++;
3865 #endif
3866 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3867 			inbetween++;
3868 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3869 #ifndef INVARIANTS
3870 			above++;
3871 #endif
3872 		} else {
3873 #ifndef INVARIANTS
3874 			acked++;
3875 #endif
3876 		}
3877 	}
3878 
3879 	if ((inflight > 0) || (inbetween > 0)) {
3880 #ifdef INVARIANTS
3881 		panic("Flight size-express incorrect? \n");
3882 #else
3883 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3884 		    entry_flight, entry_cnt);
3885 
3886 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3887 		    inflight, inbetween, resend, above, acked);
3888 		ret = 1;
3889 #endif
3890 	}
3891 	return (ret);
3892 }
3893 
3894 static void
3895 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3896     struct sctp_association *asoc,
3897     struct sctp_tmit_chunk *tp1)
3898 {
3899 	tp1->window_probe = 0;
3900 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3901 		/* TSN's skipped we do NOT move back. */
3902 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3903 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3904 		    tp1->book_size,
3905 		    (uint32_t)(uintptr_t)tp1->whoTo,
3906 		    tp1->rec.data.tsn);
3907 		return;
3908 	}
3909 	/* First setup this by shrinking flight */
3910 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3911 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3912 		    tp1);
3913 	}
3914 	sctp_flight_size_decrease(tp1);
3915 	sctp_total_flight_decrease(stcb, tp1);
3916 	/* Now mark for resend */
3917 	tp1->sent = SCTP_DATAGRAM_RESEND;
3918 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3919 
3920 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3921 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3922 		    tp1->whoTo->flight_size,
3923 		    tp1->book_size,
3924 		    (uint32_t)(uintptr_t)tp1->whoTo,
3925 		    tp1->rec.data.tsn);
3926 	}
3927 }
3928 
3929 void
3930 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3931     uint32_t rwnd, int *abort_now, int ecne_seen)
3932 {
3933 	struct sctp_nets *net;
3934 	struct sctp_association *asoc;
3935 	struct sctp_tmit_chunk *tp1, *tp2;
3936 	uint32_t old_rwnd;
3937 	int win_probe_recovery = 0;
3938 	int win_probe_recovered = 0;
3939 	int j, done_once = 0;
3940 	int rto_ok = 1;
3941 	uint32_t send_s;
3942 
3943 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3944 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3945 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3946 	}
3947 	SCTP_TCB_LOCK_ASSERT(stcb);
3948 #ifdef SCTP_ASOCLOG_OF_TSNS
3949 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3950 	stcb->asoc.cumack_log_at++;
3951 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3952 		stcb->asoc.cumack_log_at = 0;
3953 	}
3954 #endif
3955 	asoc = &stcb->asoc;
3956 	old_rwnd = asoc->peers_rwnd;
3957 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3958 		/* old ack */
3959 		return;
3960 	} else if (asoc->last_acked_seq == cumack) {
3961 		/* Window update sack */
3962 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3963 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3964 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3965 			/* SWS sender side engages */
3966 			asoc->peers_rwnd = 0;
3967 		}
3968 		if (asoc->peers_rwnd > old_rwnd) {
3969 			goto again;
3970 		}
3971 		return;
3972 	}
3973 
3974 	/* First setup for CC stuff */
3975 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3976 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3977 			/* Drag along the window_tsn for cwr's */
3978 			net->cwr_window_tsn = cumack;
3979 		}
3980 		net->prev_cwnd = net->cwnd;
3981 		net->net_ack = 0;
3982 		net->net_ack2 = 0;
3983 
3984 		/*
3985 		 * CMT: Reset CUC and Fast recovery algo variables before
3986 		 * SACK processing
3987 		 */
3988 		net->new_pseudo_cumack = 0;
3989 		net->will_exit_fast_recovery = 0;
3990 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3991 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3992 		}
3993 	}
3994 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3995 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3996 		    sctpchunk_listhead);
3997 		send_s = tp1->rec.data.tsn + 1;
3998 	} else {
3999 		send_s = asoc->sending_seq;
4000 	}
4001 	if (SCTP_TSN_GE(cumack, send_s)) {
4002 		struct mbuf *op_err;
4003 		char msg[SCTP_DIAG_INFO_LEN];
4004 
4005 		*abort_now = 1;
4006 		/* XXX */
4007 		SCTP_SNPRINTF(msg, sizeof(msg),
4008 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4009 		    cumack, send_s);
4010 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4011 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4012 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4013 		return;
4014 	}
4015 	asoc->this_sack_highest_gap = cumack;
4016 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4017 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4018 		    stcb->asoc.overall_error_count,
4019 		    0,
4020 		    SCTP_FROM_SCTP_INDATA,
4021 		    __LINE__);
4022 	}
4023 	stcb->asoc.overall_error_count = 0;
4024 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4025 		/* process the new consecutive TSN first */
4026 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4027 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4028 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4029 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4030 				}
4031 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4032 					/*
4033 					 * If it is less than ACKED, it is
4034 					 * now no-longer in flight. Higher
4035 					 * values may occur during marking
4036 					 */
4037 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4038 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4039 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4040 							    tp1->whoTo->flight_size,
4041 							    tp1->book_size,
4042 							    (uint32_t)(uintptr_t)tp1->whoTo,
4043 							    tp1->rec.data.tsn);
4044 						}
4045 						sctp_flight_size_decrease(tp1);
4046 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4047 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4048 							    tp1);
4049 						}
4050 						/* sa_ignore NO_NULL_CHK */
4051 						sctp_total_flight_decrease(stcb, tp1);
4052 					}
4053 					tp1->whoTo->net_ack += tp1->send_size;
4054 					if (tp1->snd_count < 2) {
4055 						/*
4056 						 * True non-retransmitted
4057 						 * chunk
4058 						 */
4059 						tp1->whoTo->net_ack2 +=
4060 						    tp1->send_size;
4061 
4062 						/* update RTO too? */
4063 						if (tp1->do_rtt) {
4064 							if (rto_ok &&
4065 							    sctp_calculate_rto(stcb,
4066 							    &stcb->asoc,
4067 							    tp1->whoTo,
4068 							    &tp1->sent_rcv_time,
4069 							    SCTP_RTT_FROM_DATA)) {
4070 								rto_ok = 0;
4071 							}
4072 							if (tp1->whoTo->rto_needed == 0) {
4073 								tp1->whoTo->rto_needed = 1;
4074 							}
4075 							tp1->do_rtt = 0;
4076 						}
4077 					}
4078 					/*
4079 					 * CMT: CUCv2 algorithm. From the
4080 					 * cumack'd TSNs, for each TSN being
4081 					 * acked for the first time, set the
4082 					 * following variables for the
4083 					 * corresp destination.
4084 					 * new_pseudo_cumack will trigger a
4085 					 * cwnd update.
4086 					 * find_(rtx_)pseudo_cumack will
4087 					 * trigger search for the next
4088 					 * expected (rtx-)pseudo-cumack.
4089 					 */
4090 					tp1->whoTo->new_pseudo_cumack = 1;
4091 					tp1->whoTo->find_pseudo_cumack = 1;
4092 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4093 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4094 						/* sa_ignore NO_NULL_CHK */
4095 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4096 					}
4097 				}
4098 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4099 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4100 				}
4101 				if (tp1->rec.data.chunk_was_revoked) {
4102 					/* deflate the cwnd */
4103 					tp1->whoTo->cwnd -= tp1->book_size;
4104 					tp1->rec.data.chunk_was_revoked = 0;
4105 				}
4106 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4107 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4108 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4109 #ifdef INVARIANTS
4110 					} else {
4111 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4112 #endif
4113 					}
4114 				}
4115 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4116 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4117 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4118 					asoc->trigger_reset = 1;
4119 				}
4120 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4121 				if (tp1->data) {
4122 					/* sa_ignore NO_NULL_CHK */
4123 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4124 					sctp_m_freem(tp1->data);
4125 					tp1->data = NULL;
4126 				}
4127 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4128 					sctp_log_sack(asoc->last_acked_seq,
4129 					    cumack,
4130 					    tp1->rec.data.tsn,
4131 					    0,
4132 					    0,
4133 					    SCTP_LOG_FREE_SENT);
4134 				}
4135 				asoc->sent_queue_cnt--;
4136 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4137 			} else {
4138 				break;
4139 			}
4140 		}
4141 	}
4142 	/* sa_ignore NO_NULL_CHK */
4143 	if (stcb->sctp_socket) {
4144 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4145 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4146 			/* sa_ignore NO_NULL_CHK */
4147 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4148 		}
4149 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4150 	} else {
4151 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4152 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4153 		}
4154 	}
4155 
4156 	/* JRS - Use the congestion control given in the CC module */
4157 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4158 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4159 			if (net->net_ack2 > 0) {
4160 				/*
4161 				 * Karn's rule applies to clearing error
4162 				 * count, this is optional.
4163 				 */
4164 				net->error_count = 0;
4165 				if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4166 					/* addr came good */
4167 					net->dest_state |= SCTP_ADDR_REACHABLE;
4168 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4169 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4170 				}
4171 				if (net == stcb->asoc.primary_destination) {
4172 					if (stcb->asoc.alternate) {
4173 						/*
4174 						 * release the alternate,
4175 						 * primary is good
4176 						 */
4177 						sctp_free_remote_addr(stcb->asoc.alternate);
4178 						stcb->asoc.alternate = NULL;
4179 					}
4180 				}
4181 				if (net->dest_state & SCTP_ADDR_PF) {
4182 					net->dest_state &= ~SCTP_ADDR_PF;
4183 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4184 					    stcb->sctp_ep, stcb, net,
4185 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4186 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4187 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4188 					/* Done with this net */
4189 					net->net_ack = 0;
4190 				}
4191 				/* restore any doubled timers */
4192 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4193 				if (net->RTO < stcb->asoc.minrto) {
4194 					net->RTO = stcb->asoc.minrto;
4195 				}
4196 				if (net->RTO > stcb->asoc.maxrto) {
4197 					net->RTO = stcb->asoc.maxrto;
4198 				}
4199 			}
4200 		}
4201 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4202 	}
4203 	asoc->last_acked_seq = cumack;
4204 
4205 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4206 		/* nothing left in-flight */
4207 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4208 			net->flight_size = 0;
4209 			net->partial_bytes_acked = 0;
4210 		}
4211 		asoc->total_flight = 0;
4212 		asoc->total_flight_count = 0;
4213 	}
4214 
4215 	/* RWND update */
4216 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4217 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4218 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4219 		/* SWS sender side engages */
4220 		asoc->peers_rwnd = 0;
4221 	}
4222 	if (asoc->peers_rwnd > old_rwnd) {
4223 		win_probe_recovery = 1;
4224 	}
4225 	/* Now assure a timer where data is queued at */
4226 again:
4227 	j = 0;
4228 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4229 		if (win_probe_recovery && (net->window_probe)) {
4230 			win_probe_recovered = 1;
4231 			/*
4232 			 * Find first chunk that was used with window probe
4233 			 * and clear the sent
4234 			 */
4235 			/* sa_ignore FREED_MEMORY */
4236 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4237 				if (tp1->window_probe) {
4238 					/* move back to data send queue */
4239 					sctp_window_probe_recovery(stcb, asoc, tp1);
4240 					break;
4241 				}
4242 			}
4243 		}
4244 		if (net->flight_size) {
4245 			j++;
4246 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4247 			if (net->window_probe) {
4248 				net->window_probe = 0;
4249 			}
4250 		} else {
4251 			if (net->window_probe) {
4252 				/*
4253 				 * In window probes we must assure a timer
4254 				 * is still running there
4255 				 */
4256 				net->window_probe = 0;
4257 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4258 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4259 				}
4260 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4261 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4262 				    stcb, net,
4263 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4264 			}
4265 		}
4266 	}
4267 	if ((j == 0) &&
4268 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4269 	    (asoc->sent_queue_retran_cnt == 0) &&
4270 	    (win_probe_recovered == 0) &&
4271 	    (done_once == 0)) {
4272 		/*
4273 		 * huh, this should not happen unless all packets are
4274 		 * PR-SCTP and marked to skip of course.
4275 		 */
4276 		if (sctp_fs_audit(asoc)) {
4277 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4278 				net->flight_size = 0;
4279 			}
4280 			asoc->total_flight = 0;
4281 			asoc->total_flight_count = 0;
4282 			asoc->sent_queue_retran_cnt = 0;
4283 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4284 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4285 					sctp_flight_size_increase(tp1);
4286 					sctp_total_flight_increase(stcb, tp1);
4287 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4288 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4289 				}
4290 			}
4291 		}
4292 		done_once = 1;
4293 		goto again;
4294 	}
4295 	/**********************************/
4296 	/* Now what about shutdown issues */
4297 	/**********************************/
4298 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4299 		/* nothing left on sendqueue.. consider done */
4300 		/* clean up */
4301 		if ((asoc->stream_queue_cnt == 1) &&
4302 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4303 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4304 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4305 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4306 		}
4307 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4308 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4309 		    (asoc->stream_queue_cnt == 1) &&
4310 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4311 			struct mbuf *op_err;
4312 
4313 			*abort_now = 1;
4314 			/* XXX */
4315 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4316 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4317 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4318 			return;
4319 		}
4320 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4321 		    (asoc->stream_queue_cnt == 0)) {
4322 			struct sctp_nets *netp;
4323 
4324 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4325 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4326 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4327 			}
4328 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4329 			sctp_stop_timers_for_shutdown(stcb);
4330 			if (asoc->alternate) {
4331 				netp = asoc->alternate;
4332 			} else {
4333 				netp = asoc->primary_destination;
4334 			}
4335 			sctp_send_shutdown(stcb, netp);
4336 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4337 			    stcb->sctp_ep, stcb, netp);
4338 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4339 			    stcb->sctp_ep, stcb, NULL);
4340 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4341 		    (asoc->stream_queue_cnt == 0)) {
4342 			struct sctp_nets *netp;
4343 
4344 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4345 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4346 			sctp_stop_timers_for_shutdown(stcb);
4347 			if (asoc->alternate) {
4348 				netp = asoc->alternate;
4349 			} else {
4350 				netp = asoc->primary_destination;
4351 			}
4352 			sctp_send_shutdown_ack(stcb, netp);
4353 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4354 			    stcb->sctp_ep, stcb, netp);
4355 		}
4356 	}
4357 	/*********************************************/
4358 	/* Here we perform PR-SCTP procedures        */
4359 	/* (section 4.2)                             */
4360 	/*********************************************/
4361 	/* C1. update advancedPeerAckPoint */
4362 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4363 		asoc->advanced_peer_ack_point = cumack;
4364 	}
4365 	/* PR-Sctp issues need to be addressed too */
4366 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4367 		struct sctp_tmit_chunk *lchk;
4368 		uint32_t old_adv_peer_ack_point;
4369 
4370 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4371 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4372 		/* C3. See if we need to send a Fwd-TSN */
4373 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4374 			/*
4375 			 * ISSUE with ECN, see FWD-TSN processing.
4376 			 */
4377 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4378 				send_forward_tsn(stcb, asoc);
4379 			} else if (lchk) {
4380 				/* try to FR fwd-tsn's that get lost too */
4381 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4382 					send_forward_tsn(stcb, asoc);
4383 				}
4384 			}
4385 		}
4386 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4387 			if (lchk->whoTo != NULL) {
4388 				break;
4389 			}
4390 		}
4391 		if (lchk != NULL) {
4392 			/* Assure a timer is up */
4393 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4394 			    stcb->sctp_ep, stcb, lchk->whoTo);
4395 		}
4396 	}
4397 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4398 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4399 		    rwnd,
4400 		    stcb->asoc.peers_rwnd,
4401 		    stcb->asoc.total_flight,
4402 		    stcb->asoc.total_output_queue_size);
4403 	}
4404 }
4405 
4406 void
4407 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4408     struct sctp_tcb *stcb,
4409     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4410     int *abort_now, uint8_t flags,
4411     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4412 {
4413 	struct sctp_association *asoc;
4414 	struct sctp_tmit_chunk *tp1, *tp2;
4415 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4416 	uint16_t wake_him = 0;
4417 	uint32_t send_s = 0;
4418 	long j;
4419 	int accum_moved = 0;
4420 	int will_exit_fast_recovery = 0;
4421 	uint32_t a_rwnd, old_rwnd;
4422 	int win_probe_recovery = 0;
4423 	int win_probe_recovered = 0;
4424 	struct sctp_nets *net = NULL;
4425 	int done_once;
4426 	int rto_ok = 1;
4427 	uint8_t reneged_all = 0;
4428 	uint8_t cmt_dac_flag;
4429 
4430 	/*
4431 	 * we take any chance we can to service our queues since we cannot
4432 	 * get awoken when the socket is read from :<
4433 	 */
4434 	/*
4435 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4436 	 * old sack, if so discard. 2) If there is nothing left in the send
4437 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4438 	 * too, update any rwnd change and verify no timers are running.
4439 	 * then return. 3) Process any new consecutive data i.e. cum-ack
4440 	 * moved process these first and note that it moved. 4) Process any
4441 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4442 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4443 	 * sync up flightsizes and things, stop all timers and also check
4444 	 * for shutdown_pending state. If so then go ahead and send off the
4445 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4446 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4447 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4448 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4449 	 * if in shutdown_recv state.
4450 	 */
4451 	SCTP_TCB_LOCK_ASSERT(stcb);
4452 	/* CMT DAC algo */
4453 	this_sack_lowest_newack = 0;
4454 	SCTP_STAT_INCR(sctps_slowpath_sack);
4455 	last_tsn = cum_ack;
4456 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4457 #ifdef SCTP_ASOCLOG_OF_TSNS
4458 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4459 	stcb->asoc.cumack_log_at++;
4460 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4461 		stcb->asoc.cumack_log_at = 0;
4462 	}
4463 #endif
4464 	a_rwnd = rwnd;
4465 
4466 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4467 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4468 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4469 	}
4470 
4471 	old_rwnd = stcb->asoc.peers_rwnd;
4472 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4473 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4474 		    stcb->asoc.overall_error_count,
4475 		    0,
4476 		    SCTP_FROM_SCTP_INDATA,
4477 		    __LINE__);
4478 	}
4479 	stcb->asoc.overall_error_count = 0;
4480 	asoc = &stcb->asoc;
4481 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4482 		sctp_log_sack(asoc->last_acked_seq,
4483 		    cum_ack,
4484 		    0,
4485 		    num_seg,
4486 		    num_dup,
4487 		    SCTP_LOG_NEW_SACK);
4488 	}
4489 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4490 		uint16_t i;
4491 		uint32_t *dupdata, dblock;
4492 
4493 		for (i = 0; i < num_dup; i++) {
4494 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4495 			    sizeof(uint32_t), (uint8_t *)&dblock);
4496 			if (dupdata == NULL) {
4497 				break;
4498 			}
4499 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4500 		}
4501 	}
4502 	/* reality check */
4503 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4504 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4505 		    sctpchunk_listhead);
4506 		send_s = tp1->rec.data.tsn + 1;
4507 	} else {
4508 		tp1 = NULL;
4509 		send_s = asoc->sending_seq;
4510 	}
4511 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4512 		struct mbuf *op_err;
4513 		char msg[SCTP_DIAG_INFO_LEN];
4514 
4515 		/*
4516 		 * no way, we have not even sent this TSN out yet. Peer is
4517 		 * hopelessly messed up with us.
4518 		 */
4519 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4520 		    cum_ack, send_s);
4521 		if (tp1) {
4522 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4523 			    tp1->rec.data.tsn, (void *)tp1);
4524 		}
4525 hopeless_peer:
4526 		*abort_now = 1;
4527 		/* XXX */
4528 		SCTP_SNPRINTF(msg, sizeof(msg),
4529 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4530 		    cum_ack, send_s);
4531 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4532 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4533 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4534 		return;
4535 	}
4536 	/**********************/
4537 	/* 1) check the range */
4538 	/**********************/
4539 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4540 		/* acking something behind */
4541 		return;
4542 	}
4543 
4544 	/* update the Rwnd of the peer */
4545 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4546 	    TAILQ_EMPTY(&asoc->send_queue) &&
4547 	    (asoc->stream_queue_cnt == 0)) {
4548 		/* nothing left on send/sent and strmq */
4549 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4550 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4551 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4552 		}
4553 		asoc->peers_rwnd = a_rwnd;
4554 		if (asoc->sent_queue_retran_cnt) {
4555 			asoc->sent_queue_retran_cnt = 0;
4556 		}
4557 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4558 			/* SWS sender side engages */
4559 			asoc->peers_rwnd = 0;
4560 		}
4561 		/* stop any timers */
4562 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4563 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4564 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4565 			net->partial_bytes_acked = 0;
4566 			net->flight_size = 0;
4567 		}
4568 		asoc->total_flight = 0;
4569 		asoc->total_flight_count = 0;
4570 		return;
4571 	}
4572 	/*
4573 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4574 	 * things. The total byte count acked is tracked in netAckSz AND
4575 	 * netAck2 is used to track the total bytes acked that are un-
4576 	 * ambiguous and were never retransmitted. We track these on a per
4577 	 * destination address basis.
4578 	 */
4579 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4580 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4581 			/* Drag along the window_tsn for cwr's */
4582 			net->cwr_window_tsn = cum_ack;
4583 		}
4584 		net->prev_cwnd = net->cwnd;
4585 		net->net_ack = 0;
4586 		net->net_ack2 = 0;
4587 
4588 		/*
4589 		 * CMT: Reset CUC and Fast recovery algo variables before
4590 		 * SACK processing
4591 		 */
4592 		net->new_pseudo_cumack = 0;
4593 		net->will_exit_fast_recovery = 0;
4594 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4595 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4596 		}
4597 
4598 		/*
4599 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4600 		 * to be greater than the cumack. Also reset saw_newack to 0
4601 		 * for all dests.
4602 		 */
4603 		net->saw_newack = 0;
4604 		net->this_sack_highest_newack = last_tsn;
4605 	}
4606 	/* process the new consecutive TSN first */
4607 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4608 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4609 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4610 				accum_moved = 1;
4611 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4612 					/*
4613 					 * If it is less than ACKED, it is
4614 					 * now no-longer in flight. Higher
4615 					 * values may occur during marking
4616 					 */
4617 					if ((tp1->whoTo->dest_state &
4618 					    SCTP_ADDR_UNCONFIRMED) &&
4619 					    (tp1->snd_count < 2)) {
4620 						/*
4621 						 * If there was no retran
4622 						 * and the address is
4623 						 * un-confirmed and we sent
4624 						 * there and are now
4625 						 * sacked.. its confirmed,
4626 						 * mark it so.
4627 						 */
4628 						tp1->whoTo->dest_state &=
4629 						    ~SCTP_ADDR_UNCONFIRMED;
4630 					}
4631 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4632 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4633 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4634 							    tp1->whoTo->flight_size,
4635 							    tp1->book_size,
4636 							    (uint32_t)(uintptr_t)tp1->whoTo,
4637 							    tp1->rec.data.tsn);
4638 						}
4639 						sctp_flight_size_decrease(tp1);
4640 						sctp_total_flight_decrease(stcb, tp1);
4641 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4642 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4643 							    tp1);
4644 						}
4645 					}
4646 					tp1->whoTo->net_ack += tp1->send_size;
4647 
4648 					/* CMT SFR and DAC algos */
4649 					this_sack_lowest_newack = tp1->rec.data.tsn;
4650 					tp1->whoTo->saw_newack = 1;
4651 
4652 					if (tp1->snd_count < 2) {
4653 						/*
4654 						 * True non-retransmitted
4655 						 * chunk
4656 						 */
4657 						tp1->whoTo->net_ack2 +=
4658 						    tp1->send_size;
4659 
4660 						/* update RTO too? */
4661 						if (tp1->do_rtt) {
4662 							if (rto_ok &&
4663 							    sctp_calculate_rto(stcb,
4664 							    &stcb->asoc,
4665 							    tp1->whoTo,
4666 							    &tp1->sent_rcv_time,
4667 							    SCTP_RTT_FROM_DATA)) {
4668 								rto_ok = 0;
4669 							}
4670 							if (tp1->whoTo->rto_needed == 0) {
4671 								tp1->whoTo->rto_needed = 1;
4672 							}
4673 							tp1->do_rtt = 0;
4674 						}
4675 					}
4676 					/*
4677 					 * CMT: CUCv2 algorithm. From the
4678 					 * cumack'd TSNs, for each TSN being
4679 					 * acked for the first time, set the
4680 					 * following variables for the
4681 					 * corresp destination.
4682 					 * new_pseudo_cumack will trigger a
4683 					 * cwnd update.
4684 					 * find_(rtx_)pseudo_cumack will
4685 					 * trigger search for the next
4686 					 * expected (rtx-)pseudo-cumack.
4687 					 */
4688 					tp1->whoTo->new_pseudo_cumack = 1;
4689 					tp1->whoTo->find_pseudo_cumack = 1;
4690 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4691 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4692 						sctp_log_sack(asoc->last_acked_seq,
4693 						    cum_ack,
4694 						    tp1->rec.data.tsn,
4695 						    0,
4696 						    0,
4697 						    SCTP_LOG_TSN_ACKED);
4698 					}
4699 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4700 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4701 					}
4702 				}
4703 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4704 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4705 #ifdef SCTP_AUDITING_ENABLED
4706 					sctp_audit_log(0xB3,
4707 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4708 #endif
4709 				}
4710 				if (tp1->rec.data.chunk_was_revoked) {
4711 					/* deflate the cwnd */
4712 					tp1->whoTo->cwnd -= tp1->book_size;
4713 					tp1->rec.data.chunk_was_revoked = 0;
4714 				}
4715 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4716 					tp1->sent = SCTP_DATAGRAM_ACKED;
4717 				}
4718 			}
4719 		} else {
4720 			break;
4721 		}
4722 	}
4723 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4724 	/* always set this up to cum-ack */
4725 	asoc->this_sack_highest_gap = last_tsn;
4726 
4727 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4728 		/*
4729 		 * thisSackHighestGap will increase while handling NEW
4730 		 * segments this_sack_highest_newack will increase while
4731 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4732 		 * used for CMT DAC algo. saw_newack will also change.
4733 		 */
4734 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4735 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4736 		    num_seg, num_nr_seg, &rto_ok)) {
4737 			wake_him++;
4738 		}
4739 		/*
4740 		 * validate the biggest_tsn_acked in the gap acks if strict
4741 		 * adherence is wanted.
4742 		 */
4743 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4744 			/*
4745 			 * peer is either confused or we are under attack.
4746 			 * We must abort.
4747 			 */
4748 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4749 			    biggest_tsn_acked, send_s);
4750 			goto hopeless_peer;
4751 		}
4752 	}
4753 	/*******************************************/
4754 	/* cancel ALL T3-send timer if accum moved */
4755 	/*******************************************/
4756 	if (asoc->sctp_cmt_on_off > 0) {
4757 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4758 			if (net->new_pseudo_cumack)
4759 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4760 				    stcb, net,
4761 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4762 		}
4763 	} else {
4764 		if (accum_moved) {
4765 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4766 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4767 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4768 			}
4769 		}
4770 	}
4771 	/********************************************/
4772 	/* drop the acked chunks from the sentqueue */
4773 	/********************************************/
4774 	asoc->last_acked_seq = cum_ack;
4775 
4776 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4777 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4778 			break;
4779 		}
4780 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4781 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4782 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4783 #ifdef INVARIANTS
4784 			} else {
4785 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4786 #endif
4787 			}
4788 		}
4789 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4790 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4791 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4792 			asoc->trigger_reset = 1;
4793 		}
4794 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4795 		if (PR_SCTP_ENABLED(tp1->flags)) {
4796 			if (asoc->pr_sctp_cnt != 0)
4797 				asoc->pr_sctp_cnt--;
4798 		}
4799 		asoc->sent_queue_cnt--;
4800 		if (tp1->data) {
4801 			/* sa_ignore NO_NULL_CHK */
4802 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4803 			sctp_m_freem(tp1->data);
4804 			tp1->data = NULL;
4805 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4806 				asoc->sent_queue_cnt_removeable--;
4807 			}
4808 		}
4809 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4810 			sctp_log_sack(asoc->last_acked_seq,
4811 			    cum_ack,
4812 			    tp1->rec.data.tsn,
4813 			    0,
4814 			    0,
4815 			    SCTP_LOG_FREE_SENT);
4816 		}
4817 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4818 		wake_him++;
4819 	}
4820 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4821 #ifdef INVARIANTS
4822 		panic("Warning flight size is positive and should be 0");
4823 #else
4824 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4825 		    asoc->total_flight);
4826 #endif
4827 		asoc->total_flight = 0;
4828 	}
4829 
4830 	/* sa_ignore NO_NULL_CHK */
4831 	if ((wake_him) && (stcb->sctp_socket)) {
4832 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4833 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4834 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4835 		}
4836 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4837 	} else {
4838 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4839 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4840 		}
4841 	}
4842 
4843 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4844 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4845 			/* Setup so we will exit RFC2582 fast recovery */
4846 			will_exit_fast_recovery = 1;
4847 		}
4848 	}
4849 	/*
4850 	 * Check for revoked fragments:
4851 	 *
4852 	 * if Previous sack - Had no frags then we can't have any revoked if
4853 	 * Previous sack - Had frag's then - If we now have frags aka
4854 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4855 	 * some of them. else - The peer revoked all ACKED fragments, since
4856 	 * we had some before and now we have NONE.
4857 	 */
4858 
4859 	if (num_seg) {
4860 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4861 		asoc->saw_sack_with_frags = 1;
4862 	} else if (asoc->saw_sack_with_frags) {
4863 		int cnt_revoked = 0;
4864 
4865 		/* Peer revoked all dg's marked or acked */
4866 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4867 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4868 				tp1->sent = SCTP_DATAGRAM_SENT;
4869 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4870 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4871 					    tp1->whoTo->flight_size,
4872 					    tp1->book_size,
4873 					    (uint32_t)(uintptr_t)tp1->whoTo,
4874 					    tp1->rec.data.tsn);
4875 				}
4876 				sctp_flight_size_increase(tp1);
4877 				sctp_total_flight_increase(stcb, tp1);
4878 				tp1->rec.data.chunk_was_revoked = 1;
4879 				/*
4880 				 * To ensure that this increase in
4881 				 * flightsize, which is artificial, does not
4882 				 * throttle the sender, we also increase the
4883 				 * cwnd artificially.
4884 				 */
4885 				tp1->whoTo->cwnd += tp1->book_size;
4886 				cnt_revoked++;
4887 			}
4888 		}
4889 		if (cnt_revoked) {
4890 			reneged_all = 1;
4891 		}
4892 		asoc->saw_sack_with_frags = 0;
4893 	}
4894 	if (num_nr_seg > 0)
4895 		asoc->saw_sack_with_nr_frags = 1;
4896 	else
4897 		asoc->saw_sack_with_nr_frags = 0;
4898 
4899 	/* JRS - Use the congestion control given in the CC module */
4900 	if (ecne_seen == 0) {
4901 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4902 			if (net->net_ack2 > 0) {
4903 				/*
4904 				 * Karn's rule applies to clearing error
4905 				 * count, this is optional.
4906 				 */
4907 				net->error_count = 0;
4908 				if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4909 					/* addr came good */
4910 					net->dest_state |= SCTP_ADDR_REACHABLE;
4911 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4912 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4913 				}
4914 
4915 				if (net == stcb->asoc.primary_destination) {
4916 					if (stcb->asoc.alternate) {
4917 						/*
4918 						 * release the alternate,
4919 						 * primary is good
4920 						 */
4921 						sctp_free_remote_addr(stcb->asoc.alternate);
4922 						stcb->asoc.alternate = NULL;
4923 					}
4924 				}
4925 
4926 				if (net->dest_state & SCTP_ADDR_PF) {
4927 					net->dest_state &= ~SCTP_ADDR_PF;
4928 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4929 					    stcb->sctp_ep, stcb, net,
4930 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4931 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4932 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4933 					/* Done with this net */
4934 					net->net_ack = 0;
4935 				}
4936 				/* restore any doubled timers */
4937 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4938 				if (net->RTO < stcb->asoc.minrto) {
4939 					net->RTO = stcb->asoc.minrto;
4940 				}
4941 				if (net->RTO > stcb->asoc.maxrto) {
4942 					net->RTO = stcb->asoc.maxrto;
4943 				}
4944 			}
4945 		}
4946 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4947 	}
4948 
4949 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4950 		/* nothing left in-flight */
4951 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4952 			/* stop all timers */
4953 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4954 			    stcb, net,
4955 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4956 			net->flight_size = 0;
4957 			net->partial_bytes_acked = 0;
4958 		}
4959 		asoc->total_flight = 0;
4960 		asoc->total_flight_count = 0;
4961 	}
4962 
4963 	/**********************************/
4964 	/* Now what about shutdown issues */
4965 	/**********************************/
4966 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4967 		/* nothing left on sendqueue.. consider done */
4968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4969 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4970 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4971 		}
4972 		asoc->peers_rwnd = a_rwnd;
4973 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4974 			/* SWS sender side engages */
4975 			asoc->peers_rwnd = 0;
4976 		}
4977 		/* clean up */
4978 		if ((asoc->stream_queue_cnt == 1) &&
4979 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4980 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4981 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4982 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4983 		}
4984 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4985 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4986 		    (asoc->stream_queue_cnt == 1) &&
4987 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4988 			struct mbuf *op_err;
4989 
4990 			*abort_now = 1;
4991 			/* XXX */
4992 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4993 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
4994 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4995 			return;
4996 		}
4997 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4998 		    (asoc->stream_queue_cnt == 0)) {
4999 			struct sctp_nets *netp;
5000 
5001 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5002 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5003 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5004 			}
5005 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5006 			sctp_stop_timers_for_shutdown(stcb);
5007 			if (asoc->alternate) {
5008 				netp = asoc->alternate;
5009 			} else {
5010 				netp = asoc->primary_destination;
5011 			}
5012 			sctp_send_shutdown(stcb, netp);
5013 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5014 			    stcb->sctp_ep, stcb, netp);
5015 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5016 			    stcb->sctp_ep, stcb, NULL);
5017 			return;
5018 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5019 		    (asoc->stream_queue_cnt == 0)) {
5020 			struct sctp_nets *netp;
5021 
5022 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5023 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5024 			sctp_stop_timers_for_shutdown(stcb);
5025 			if (asoc->alternate) {
5026 				netp = asoc->alternate;
5027 			} else {
5028 				netp = asoc->primary_destination;
5029 			}
5030 			sctp_send_shutdown_ack(stcb, netp);
5031 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5032 			    stcb->sctp_ep, stcb, netp);
5033 			return;
5034 		}
5035 	}
5036 	/*
5037 	 * Now here we are going to recycle net_ack for a different use...
5038 	 * HEADS UP.
5039 	 */
5040 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5041 		net->net_ack = 0;
5042 	}
5043 
5044 	/*
5045 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5046 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5047 	 * automatically ensure that.
5048 	 */
5049 	if ((asoc->sctp_cmt_on_off > 0) &&
5050 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5051 	    (cmt_dac_flag == 0)) {
5052 		this_sack_lowest_newack = cum_ack;
5053 	}
5054 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5055 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5056 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5057 	}
5058 	/* JRS - Use the congestion control given in the CC module */
5059 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5060 
5061 	/* Now are we exiting loss recovery ? */
5062 	if (will_exit_fast_recovery) {
5063 		/* Ok, we must exit fast recovery */
5064 		asoc->fast_retran_loss_recovery = 0;
5065 	}
5066 	if ((asoc->sat_t3_loss_recovery) &&
5067 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5068 		/* end satellite t3 loss recovery */
5069 		asoc->sat_t3_loss_recovery = 0;
5070 	}
5071 	/*
5072 	 * CMT Fast recovery
5073 	 */
5074 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5075 		if (net->will_exit_fast_recovery) {
5076 			/* Ok, we must exit fast recovery */
5077 			net->fast_retran_loss_recovery = 0;
5078 		}
5079 	}
5080 
5081 	/* Adjust and set the new rwnd value */
5082 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5083 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5084 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5085 	}
5086 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5087 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5088 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5089 		/* SWS sender side engages */
5090 		asoc->peers_rwnd = 0;
5091 	}
5092 	if (asoc->peers_rwnd > old_rwnd) {
5093 		win_probe_recovery = 1;
5094 	}
5095 
5096 	/*
5097 	 * Now we must setup so we have a timer up for anyone with
5098 	 * outstanding data.
5099 	 */
5100 	done_once = 0;
5101 again:
5102 	j = 0;
5103 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5104 		if (win_probe_recovery && (net->window_probe)) {
5105 			win_probe_recovered = 1;
5106 			/*-
5107 			 * Find first chunk that was used with
5108 			 * window probe and clear the event. Put
5109 			 * it back into the send queue as if has
5110 			 * not been sent.
5111 			 */
5112 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5113 				if (tp1->window_probe) {
5114 					sctp_window_probe_recovery(stcb, asoc, tp1);
5115 					break;
5116 				}
5117 			}
5118 		}
5119 		if (net->flight_size) {
5120 			j++;
5121 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5122 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5123 				    stcb->sctp_ep, stcb, net);
5124 			}
5125 			if (net->window_probe) {
5126 				net->window_probe = 0;
5127 			}
5128 		} else {
5129 			if (net->window_probe) {
5130 				/*
5131 				 * In window probes we must assure a timer
5132 				 * is still running there
5133 				 */
5134 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5135 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5136 					    stcb->sctp_ep, stcb, net);
5137 				}
5138 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5139 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5140 				    stcb, net,
5141 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5142 			}
5143 		}
5144 	}
5145 	if ((j == 0) &&
5146 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5147 	    (asoc->sent_queue_retran_cnt == 0) &&
5148 	    (win_probe_recovered == 0) &&
5149 	    (done_once == 0)) {
5150 		/*
5151 		 * huh, this should not happen unless all packets are
5152 		 * PR-SCTP and marked to skip of course.
5153 		 */
5154 		if (sctp_fs_audit(asoc)) {
5155 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5156 				net->flight_size = 0;
5157 			}
5158 			asoc->total_flight = 0;
5159 			asoc->total_flight_count = 0;
5160 			asoc->sent_queue_retran_cnt = 0;
5161 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5162 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5163 					sctp_flight_size_increase(tp1);
5164 					sctp_total_flight_increase(stcb, tp1);
5165 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5166 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5167 				}
5168 			}
5169 		}
5170 		done_once = 1;
5171 		goto again;
5172 	}
5173 	/*********************************************/
5174 	/* Here we perform PR-SCTP procedures        */
5175 	/* (section 4.2)                             */
5176 	/*********************************************/
5177 	/* C1. update advancedPeerAckPoint */
5178 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5179 		asoc->advanced_peer_ack_point = cum_ack;
5180 	}
5181 	/* C2. try to further move advancedPeerAckPoint ahead */
5182 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5183 		struct sctp_tmit_chunk *lchk;
5184 		uint32_t old_adv_peer_ack_point;
5185 
5186 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5187 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5188 		/* C3. See if we need to send a Fwd-TSN */
5189 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5190 			/*
5191 			 * ISSUE with ECN, see FWD-TSN processing.
5192 			 */
5193 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5194 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5195 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5196 				    old_adv_peer_ack_point);
5197 			}
5198 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5199 				send_forward_tsn(stcb, asoc);
5200 			} else if (lchk) {
5201 				/* try to FR fwd-tsn's that get lost too */
5202 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5203 					send_forward_tsn(stcb, asoc);
5204 				}
5205 			}
5206 		}
5207 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5208 			if (lchk->whoTo != NULL) {
5209 				break;
5210 			}
5211 		}
5212 		if (lchk != NULL) {
5213 			/* Assure a timer is up */
5214 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5215 			    stcb->sctp_ep, stcb, lchk->whoTo);
5216 		}
5217 	}
5218 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5219 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5220 		    a_rwnd,
5221 		    stcb->asoc.peers_rwnd,
5222 		    stcb->asoc.total_flight,
5223 		    stcb->asoc.total_output_queue_size);
5224 	}
5225 }
5226 
5227 void
5228 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5229 {
5230 	/* Copy cum-ack */
5231 	uint32_t cum_ack, a_rwnd;
5232 
5233 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5234 	/* Arrange so a_rwnd does NOT change */
5235 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5236 
5237 	/* Now call the express sack handling */
5238 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5239 }
5240 
5241 static void
5242 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5243     struct sctp_stream_in *strmin)
5244 {
5245 	struct sctp_queued_to_read *control, *ncontrol;
5246 	struct sctp_association *asoc;
5247 	uint32_t mid;
5248 	int need_reasm_check = 0;
5249 
5250 	asoc = &stcb->asoc;
5251 	mid = strmin->last_mid_delivered;
5252 	/*
5253 	 * First deliver anything prior to and including the stream no that
5254 	 * came in.
5255 	 */
5256 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5257 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5258 			/* this is deliverable now */
5259 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5260 				if (control->on_strm_q) {
5261 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5262 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5263 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5264 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5265 #ifdef INVARIANTS
5266 					} else {
5267 						panic("strmin: %p ctl: %p unknown %d",
5268 						    strmin, control, control->on_strm_q);
5269 #endif
5270 					}
5271 					control->on_strm_q = 0;
5272 				}
5273 				/* subtract pending on streams */
5274 				if (asoc->size_on_all_streams >= control->length) {
5275 					asoc->size_on_all_streams -= control->length;
5276 				} else {
5277 #ifdef INVARIANTS
5278 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5279 #else
5280 					asoc->size_on_all_streams = 0;
5281 #endif
5282 				}
5283 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5284 				/* deliver it to at least the delivery-q */
5285 				if (stcb->sctp_socket) {
5286 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5287 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5288 					    control,
5289 					    &stcb->sctp_socket->so_rcv,
5290 					    1, SCTP_READ_LOCK_HELD,
5291 					    SCTP_SO_NOT_LOCKED);
5292 				}
5293 			} else {
5294 				/* Its a fragmented message */
5295 				if (control->first_frag_seen) {
5296 					/*
5297 					 * Make it so this is next to
5298 					 * deliver, we restore later
5299 					 */
5300 					strmin->last_mid_delivered = control->mid - 1;
5301 					need_reasm_check = 1;
5302 					break;
5303 				}
5304 			}
5305 		} else {
5306 			/* no more delivery now. */
5307 			break;
5308 		}
5309 	}
5310 	if (need_reasm_check) {
5311 		int ret;
5312 
5313 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5314 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5315 			/* Restore the next to deliver unless we are ahead */
5316 			strmin->last_mid_delivered = mid;
5317 		}
5318 		if (ret == 0) {
5319 			/* Left the front Partial one on */
5320 			return;
5321 		}
5322 		need_reasm_check = 0;
5323 	}
5324 	/*
5325 	 * now we must deliver things in queue the normal way  if any are
5326 	 * now ready.
5327 	 */
5328 	mid = strmin->last_mid_delivered + 1;
5329 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5330 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5331 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5332 				/* this is deliverable now */
5333 				if (control->on_strm_q) {
5334 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5335 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5336 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5337 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5338 #ifdef INVARIANTS
5339 					} else {
5340 						panic("strmin: %p ctl: %p unknown %d",
5341 						    strmin, control, control->on_strm_q);
5342 #endif
5343 					}
5344 					control->on_strm_q = 0;
5345 				}
5346 				/* subtract pending on streams */
5347 				if (asoc->size_on_all_streams >= control->length) {
5348 					asoc->size_on_all_streams -= control->length;
5349 				} else {
5350 #ifdef INVARIANTS
5351 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5352 #else
5353 					asoc->size_on_all_streams = 0;
5354 #endif
5355 				}
5356 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5357 				/* deliver it to at least the delivery-q */
5358 				strmin->last_mid_delivered = control->mid;
5359 				if (stcb->sctp_socket) {
5360 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5361 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5362 					    control,
5363 					    &stcb->sctp_socket->so_rcv, 1,
5364 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5365 				}
5366 				mid = strmin->last_mid_delivered + 1;
5367 			} else {
5368 				/* Its a fragmented message */
5369 				if (control->first_frag_seen) {
5370 					/*
5371 					 * Make it so this is next to
5372 					 * deliver
5373 					 */
5374 					strmin->last_mid_delivered = control->mid - 1;
5375 					need_reasm_check = 1;
5376 					break;
5377 				}
5378 			}
5379 		} else {
5380 			break;
5381 		}
5382 	}
5383 	if (need_reasm_check) {
5384 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5385 	}
5386 }
5387 
5388 static void
5389 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5390     struct sctp_association *asoc, struct sctp_stream_in *strm,
5391     struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5392 {
5393 	struct sctp_tmit_chunk *chk, *nchk;
5394 
5395 	/*
5396 	 * For now large messages held on the stream reasm that are complete
5397 	 * will be tossed too. We could in theory do more work to spin
5398 	 * through and stop after dumping one msg aka seeing the start of a
5399 	 * new msg at the head, and call the delivery function... to see if
5400 	 * it can be delivered... But for now we just dump everything on the
5401 	 * queue.
5402 	 */
5403 	if (!asoc->idata_supported && !ordered &&
5404 	    control->first_frag_seen &&
5405 	    SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5406 		return;
5407 	}
5408 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5409 		/* Purge hanging chunks */
5410 		if (!asoc->idata_supported && !ordered) {
5411 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5412 				break;
5413 			}
5414 		}
5415 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5416 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5417 			asoc->size_on_reasm_queue -= chk->send_size;
5418 		} else {
5419 #ifdef INVARIANTS
5420 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5421 #else
5422 			asoc->size_on_reasm_queue = 0;
5423 #endif
5424 		}
5425 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5426 		if (chk->data) {
5427 			sctp_m_freem(chk->data);
5428 			chk->data = NULL;
5429 		}
5430 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5431 	}
5432 	if (!TAILQ_EMPTY(&control->reasm)) {
5433 		/* This has to be old data, unordered */
5434 		if (control->data) {
5435 			sctp_m_freem(control->data);
5436 			control->data = NULL;
5437 		}
5438 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5439 		chk = TAILQ_FIRST(&control->reasm);
5440 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5441 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5442 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5443 			    chk, SCTP_READ_LOCK_HELD);
5444 		}
5445 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5446 		return;
5447 	}
5448 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5449 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5450 		if (asoc->size_on_all_streams >= control->length) {
5451 			asoc->size_on_all_streams -= control->length;
5452 		} else {
5453 #ifdef INVARIANTS
5454 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5455 #else
5456 			asoc->size_on_all_streams = 0;
5457 #endif
5458 		}
5459 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5460 		control->on_strm_q = 0;
5461 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5462 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5463 		control->on_strm_q = 0;
5464 #ifdef INVARIANTS
5465 	} else if (control->on_strm_q) {
5466 		panic("strm: %p ctl: %p unknown %d",
5467 		    strm, control, control->on_strm_q);
5468 #endif
5469 	}
5470 	control->on_strm_q = 0;
5471 	if (control->on_read_q == 0) {
5472 		sctp_free_remote_addr(control->whoFrom);
5473 		if (control->data) {
5474 			sctp_m_freem(control->data);
5475 			control->data = NULL;
5476 		}
5477 		sctp_free_a_readq(stcb, control);
5478 	}
5479 }
5480 
5481 void
5482 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5483     struct sctp_forward_tsn_chunk *fwd,
5484     int *abort_flag, struct mbuf *m, int offset)
5485 {
5486 	/* The pr-sctp fwd tsn */
5487 	/*
5488 	 * here we will perform all the data receiver side steps for
5489 	 * processing FwdTSN, as required in by pr-sctp draft:
5490 	 *
5491 	 * Assume we get FwdTSN(x):
5492 	 *
5493 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5494 	 * + others we have 3) examine and update re-ordering queue on
5495 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5496 	 * report where we are.
5497 	 */
5498 	struct sctp_association *asoc;
5499 	uint32_t new_cum_tsn, gap;
5500 	unsigned int i, fwd_sz, m_size;
5501 	uint32_t str_seq;
5502 	struct sctp_stream_in *strm;
5503 	struct sctp_queued_to_read *control, *ncontrol, *sv;
5504 
5505 	asoc = &stcb->asoc;
5506 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5507 		SCTPDBG(SCTP_DEBUG_INDATA1,
5508 		    "Bad size too small/big fwd-tsn\n");
5509 		return;
5510 	}
5511 	m_size = (stcb->asoc.mapping_array_size << 3);
5512 	/*************************************************************/
5513 	/* 1. Here we update local cumTSN and shift the bitmap array */
5514 	/*************************************************************/
5515 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5516 
5517 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5518 		/* Already got there ... */
5519 		return;
5520 	}
5521 	/*
5522 	 * now we know the new TSN is more advanced, let's find the actual
5523 	 * gap
5524 	 */
5525 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5526 	asoc->cumulative_tsn = new_cum_tsn;
5527 	if (gap >= m_size) {
5528 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5529 			struct mbuf *op_err;
5530 			char msg[SCTP_DIAG_INFO_LEN];
5531 
5532 			/*
5533 			 * out of range (of single byte chunks in the rwnd I
5534 			 * give out). This must be an attacker.
5535 			 */
5536 			*abort_flag = 1;
5537 			SCTP_SNPRINTF(msg, sizeof(msg),
5538 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5539 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5540 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5541 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5542 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5543 			return;
5544 		}
5545 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5546 
5547 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5548 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5549 		asoc->highest_tsn_inside_map = new_cum_tsn;
5550 
5551 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5552 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5553 
5554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5555 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5556 		}
5557 	} else {
5558 		SCTP_TCB_LOCK_ASSERT(stcb);
5559 		for (i = 0; i <= gap; i++) {
5560 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5561 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5562 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5563 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5564 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5565 				}
5566 			}
5567 		}
5568 	}
5569 	/*************************************************************/
5570 	/* 2. Clear up re-assembly queue                             */
5571 	/*************************************************************/
5572 
5573 	/* This is now done as part of clearing up the stream/seq */
5574 	if (asoc->idata_supported == 0) {
5575 		uint16_t sid;
5576 
5577 		/* Flush all the un-ordered data based on cum-tsn */
5578 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5579 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5580 			strm = &asoc->strmin[sid];
5581 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5582 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5583 			}
5584 		}
5585 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5586 	}
5587 	/*******************************************************/
5588 	/* 3. Update the PR-stream re-ordering queues and fix  */
5589 	/* delivery issues as needed.                       */
5590 	/*******************************************************/
5591 	fwd_sz -= sizeof(*fwd);
5592 	if (m && fwd_sz) {
5593 		/* New method. */
5594 		unsigned int num_str;
5595 		uint32_t mid;
5596 		uint16_t sid;
5597 		uint16_t ordered, flags;
5598 		struct sctp_strseq *stseq, strseqbuf;
5599 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5600 
5601 		offset += sizeof(*fwd);
5602 
5603 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5604 		if (asoc->idata_supported) {
5605 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5606 		} else {
5607 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5608 		}
5609 		for (i = 0; i < num_str; i++) {
5610 			if (asoc->idata_supported) {
5611 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5612 				    sizeof(struct sctp_strseq_mid),
5613 				    (uint8_t *)&strseqbuf_m);
5614 				offset += sizeof(struct sctp_strseq_mid);
5615 				if (stseq_m == NULL) {
5616 					break;
5617 				}
5618 				sid = ntohs(stseq_m->sid);
5619 				mid = ntohl(stseq_m->mid);
5620 				flags = ntohs(stseq_m->flags);
5621 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5622 					ordered = 0;
5623 				} else {
5624 					ordered = 1;
5625 				}
5626 			} else {
5627 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5628 				    sizeof(struct sctp_strseq),
5629 				    (uint8_t *)&strseqbuf);
5630 				offset += sizeof(struct sctp_strseq);
5631 				if (stseq == NULL) {
5632 					break;
5633 				}
5634 				sid = ntohs(stseq->sid);
5635 				mid = (uint32_t)ntohs(stseq->ssn);
5636 				ordered = 1;
5637 			}
5638 			/* Convert */
5639 
5640 			/* now process */
5641 
5642 			/*
5643 			 * Ok we now look for the stream/seq on the read
5644 			 * queue where its not all delivered. If we find it
5645 			 * we transmute the read entry into a PDI_ABORTED.
5646 			 */
5647 			if (sid >= asoc->streamincnt) {
5648 				/* screwed up streams, stop!  */
5649 				break;
5650 			}
5651 			if ((asoc->str_of_pdapi == sid) &&
5652 			    (asoc->ssn_of_pdapi == mid)) {
5653 				/*
5654 				 * If this is the one we were partially
5655 				 * delivering now then we no longer are.
5656 				 * Note this will change with the reassembly
5657 				 * re-write.
5658 				 */
5659 				asoc->fragmented_delivery_inprogress = 0;
5660 			}
5661 			strm = &asoc->strmin[sid];
5662 			if (ordered) {
5663 				TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5664 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5665 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5666 					}
5667 				}
5668 			} else {
5669 				if (asoc->idata_supported) {
5670 					TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5671 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5672 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5673 						}
5674 					}
5675 				} else {
5676 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5677 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5678 					}
5679 				}
5680 			}
5681 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5682 				if ((control->sinfo_stream == sid) &&
5683 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5684 					str_seq = (sid << 16) | (0x0000ffff & mid);
5685 					control->pdapi_aborted = 1;
5686 					sv = stcb->asoc.control_pdapi;
5687 					control->end_added = 1;
5688 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5689 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5690 						if (asoc->size_on_all_streams >= control->length) {
5691 							asoc->size_on_all_streams -= control->length;
5692 						} else {
5693 #ifdef INVARIANTS
5694 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5695 #else
5696 							asoc->size_on_all_streams = 0;
5697 #endif
5698 						}
5699 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5700 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5701 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5702 #ifdef INVARIANTS
5703 					} else if (control->on_strm_q) {
5704 						panic("strm: %p ctl: %p unknown %d",
5705 						    strm, control, control->on_strm_q);
5706 #endif
5707 					}
5708 					control->on_strm_q = 0;
5709 					stcb->asoc.control_pdapi = control;
5710 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5711 					    stcb,
5712 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5713 					    (void *)&str_seq,
5714 					    SCTP_SO_NOT_LOCKED);
5715 					stcb->asoc.control_pdapi = sv;
5716 					break;
5717 				} else if ((control->sinfo_stream == sid) &&
5718 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5719 					/* We are past our victim SSN */
5720 					break;
5721 				}
5722 			}
5723 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5724 				/* Update the sequence number */
5725 				strm->last_mid_delivered = mid;
5726 			}
5727 			/* now kick the stream the new way */
5728 			/* sa_ignore NO_NULL_CHK */
5729 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5730 		}
5731 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5732 	}
5733 	/*
5734 	 * Now slide thing forward.
5735 	 */
5736 	sctp_slide_mapping_arrays(stcb);
5737 }
5738