xref: /freebsd/sys/netinet/sctp_indata.c (revision 9e4c35f867aca020df8d01fb7371bf5ae1cc8a2d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 void
72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73 {
74 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
75 }
76 
77 /* Calculate what the rwnd would be */
78 uint32_t
79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80 {
81 	uint32_t calc = 0;
82 
83 	/*
84 	 * This is really set wrong with respect to a 1-2-m socket. Since
85 	 * the sb_cc is the count that everyone as put up. When we re-write
86 	 * sctp_soreceive then we will fix this so that ONLY this
87 	 * associations data is taken into account.
88 	 */
89 	if (stcb->sctp_socket == NULL) {
90 		return (calc);
91 	}
92 
93 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 	if (stcb->asoc.sb_cc == 0 &&
98 	    asoc->cnt_on_reasm_queue == 0 &&
99 	    asoc->cnt_on_all_streams == 0) {
100 		/* Full rwnd granted */
101 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
102 		return (calc);
103 	}
104 	/* get actual space */
105 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 	/*
107 	 * take out what has NOT been put on socket queue and we yet hold
108 	 * for putting up.
109 	 */
110 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 	    asoc->cnt_on_reasm_queue * MSIZE));
112 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 	    asoc->cnt_on_all_streams * MSIZE));
114 	if (calc == 0) {
115 		/* out of space */
116 		return (calc);
117 	}
118 
119 	/* what is the overhead of all these rwnd's */
120 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 	/*
122 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 	 * even it is 0. SWS engaged
124 	 */
125 	if (calc < stcb->asoc.my_rwnd_control_len) {
126 		calc = 1;
127 	}
128 	return (calc);
129 }
130 
131 /*
132  * Build out our readq entry based on the incoming packet.
133  */
134 struct sctp_queued_to_read *
135 sctp_build_readq_entry(struct sctp_tcb *stcb,
136     struct sctp_nets *net,
137     uint32_t tsn, uint32_t ppid,
138     uint32_t context, uint16_t sid,
139     uint32_t mid, uint8_t flags,
140     struct mbuf *dm)
141 {
142 	struct sctp_queued_to_read *read_queue_e = NULL;
143 
144 	sctp_alloc_a_readq(stcb, read_queue_e);
145 	if (read_queue_e == NULL) {
146 		goto failed_build;
147 	}
148 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 	read_queue_e->sinfo_stream = sid;
150 	read_queue_e->sinfo_flags = (flags << 8);
151 	read_queue_e->sinfo_ppid = ppid;
152 	read_queue_e->sinfo_context = context;
153 	read_queue_e->sinfo_tsn = tsn;
154 	read_queue_e->sinfo_cumtsn = tsn;
155 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 	read_queue_e->mid = mid;
157 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 	TAILQ_INIT(&read_queue_e->reasm);
159 	read_queue_e->whoFrom = net;
160 	atomic_add_int(&net->ref_count, 1);
161 	read_queue_e->data = dm;
162 	read_queue_e->stcb = stcb;
163 	read_queue_e->port_from = stcb->rport;
164 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
165 		read_queue_e->do_not_ref_stcb = 1;
166 	}
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 static void
286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
287 {
288 	uint32_t gap, i, cumackp1;
289 	int fnd = 0;
290 	int in_r = 0, in_nr = 0;
291 
292 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
293 		return;
294 	}
295 	cumackp1 = asoc->cumulative_tsn + 1;
296 	if (SCTP_TSN_GT(cumackp1, tsn)) {
297 		/*
298 		 * this tsn is behind the cum ack and thus we don't need to
299 		 * worry about it being moved from one to the other.
300 		 */
301 		return;
302 	}
303 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
304 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
305 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
306 	if ((in_r == 0) && (in_nr == 0)) {
307 #ifdef INVARIANTS
308 		panic("Things are really messed up now");
309 #else
310 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
311 		sctp_print_mapping_array(asoc);
312 #endif
313 	}
314 	if (in_nr == 0)
315 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
316 	if (in_r)
317 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
319 		asoc->highest_tsn_inside_nr_map = tsn;
320 	}
321 	if (tsn == asoc->highest_tsn_inside_map) {
322 		/* We must back down to see what the new highest is */
323 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
324 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
325 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
326 				asoc->highest_tsn_inside_map = i;
327 				fnd = 1;
328 				break;
329 			}
330 		}
331 		if (!fnd) {
332 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
333 		}
334 	}
335 }
336 
337 static int
338 sctp_place_control_in_stream(struct sctp_stream_in *strm,
339     struct sctp_association *asoc,
340     struct sctp_queued_to_read *control)
341 {
342 	struct sctp_queued_to_read *at;
343 	struct sctp_readhead *q;
344 	uint8_t flags, unordered;
345 
346 	flags = (control->sinfo_flags >> 8);
347 	unordered = flags & SCTP_DATA_UNORDERED;
348 	if (unordered) {
349 		q = &strm->uno_inqueue;
350 		if (asoc->idata_supported == 0) {
351 			if (!TAILQ_EMPTY(q)) {
352 				/*
353 				 * Only one stream can be here in old style
354 				 * -- abort
355 				 */
356 				return (-1);
357 			}
358 			TAILQ_INSERT_TAIL(q, control, next_instrm);
359 			control->on_strm_q = SCTP_ON_UNORDERED;
360 			return (0);
361 		}
362 	} else {
363 		q = &strm->inqueue;
364 	}
365 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
366 		control->end_added = 1;
367 		control->first_frag_seen = 1;
368 		control->last_frag_seen = 1;
369 	}
370 	if (TAILQ_EMPTY(q)) {
371 		/* Empty queue */
372 		TAILQ_INSERT_HEAD(q, control, next_instrm);
373 		if (unordered) {
374 			control->on_strm_q = SCTP_ON_UNORDERED;
375 		} else {
376 			control->on_strm_q = SCTP_ON_ORDERED;
377 		}
378 		return (0);
379 	} else {
380 		TAILQ_FOREACH(at, q, next_instrm) {
381 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
382 				/*
383 				 * one in queue is bigger than the new one,
384 				 * insert before this one
385 				 */
386 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
387 				if (unordered) {
388 					control->on_strm_q = SCTP_ON_UNORDERED;
389 				} else {
390 					control->on_strm_q = SCTP_ON_ORDERED;
391 				}
392 				break;
393 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
394 				/*
395 				 * Gak, He sent me a duplicate msg id
396 				 * number?? return -1 to abort.
397 				 */
398 				return (-1);
399 			} else {
400 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
401 					/*
402 					 * We are at the end, insert it
403 					 * after this one
404 					 */
405 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
406 						sctp_log_strm_del(control, at,
407 						    SCTP_STR_LOG_FROM_INSERT_TL);
408 					}
409 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
410 					if (unordered) {
411 						control->on_strm_q = SCTP_ON_UNORDERED;
412 					} else {
413 						control->on_strm_q = SCTP_ON_ORDERED;
414 					}
415 					break;
416 				}
417 			}
418 		}
419 	}
420 	return (0);
421 }
422 
423 static void
424 sctp_abort_in_reasm(struct sctp_tcb *stcb,
425     struct sctp_queued_to_read *control,
426     struct sctp_tmit_chunk *chk,
427     int *abort_flag, int opspot)
428 {
429 	char msg[SCTP_DIAG_INFO_LEN];
430 	struct mbuf *oper;
431 
432 	if (stcb->asoc.idata_supported) {
433 		SCTP_SNPRINTF(msg, sizeof(msg),
434 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
435 		    opspot,
436 		    control->fsn_included,
437 		    chk->rec.data.tsn,
438 		    chk->rec.data.sid,
439 		    chk->rec.data.fsn, chk->rec.data.mid);
440 	} else {
441 		SCTP_SNPRINTF(msg, sizeof(msg),
442 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
443 		    opspot,
444 		    control->fsn_included,
445 		    chk->rec.data.tsn,
446 		    chk->rec.data.sid,
447 		    chk->rec.data.fsn,
448 		    (uint16_t)chk->rec.data.mid);
449 	}
450 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
451 	sctp_m_freem(chk->data);
452 	chk->data = NULL;
453 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
454 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
455 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
456 	*abort_flag = 1;
457 }
458 
459 static void
460 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
461 {
462 	/*
463 	 * The control could not be placed and must be cleaned.
464 	 */
465 	struct sctp_tmit_chunk *chk, *nchk;
466 
467 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
468 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
469 		if (chk->data)
470 			sctp_m_freem(chk->data);
471 		chk->data = NULL;
472 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
473 	}
474 	sctp_free_remote_addr(control->whoFrom);
475 	if (control->data) {
476 		sctp_m_freem(control->data);
477 		control->data = NULL;
478 	}
479 	sctp_free_a_readq(stcb, control);
480 }
481 
482 /*
483  * Queue the chunk either right into the socket buffer if it is the next one
484  * to go OR put it in the correct place in the delivery queue.  If we do
485  * append to the so_buf, keep doing so until we are out of order as
486  * long as the control's entered are non-fragmented.
487  */
488 static void
489 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
490     struct sctp_association *asoc,
491     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
492 {
493 	/*
494 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
495 	 * all the data in one stream this could happen quite rapidly. One
496 	 * could use the TSN to keep track of things, but this scheme breaks
497 	 * down in the other type of stream usage that could occur. Send a
498 	 * single msg to stream 0, send 4Billion messages to stream 1, now
499 	 * send a message to stream 0. You have a situation where the TSN
500 	 * has wrapped but not in the stream. Is this worth worrying about
501 	 * or should we just change our queue sort at the bottom to be by
502 	 * TSN.
503 	 *
504 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
505 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
506 	 * assignment this could happen... and I don't see how this would be
507 	 * a violation. So for now I am undecided an will leave the sort by
508 	 * SSN alone. Maybe a hybred approach is the answer
509 	 *
510 	 */
511 	struct sctp_queued_to_read *at;
512 	int queue_needed;
513 	uint32_t nxt_todel;
514 	struct mbuf *op_err;
515 	struct sctp_stream_in *strm;
516 	char msg[SCTP_DIAG_INFO_LEN];
517 
518 	strm = &asoc->strmin[control->sinfo_stream];
519 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
520 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
521 	}
522 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
523 		/* The incoming sseq is behind where we last delivered? */
524 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
525 		    strm->last_mid_delivered, control->mid);
526 		/*
527 		 * throw it in the stream so it gets cleaned up in
528 		 * association destruction
529 		 */
530 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
531 		if (asoc->idata_supported) {
532 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
533 			    strm->last_mid_delivered, control->sinfo_tsn,
534 			    control->sinfo_stream, control->mid);
535 		} else {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
537 			    (uint16_t)strm->last_mid_delivered,
538 			    control->sinfo_tsn,
539 			    control->sinfo_stream,
540 			    (uint16_t)control->mid);
541 		}
542 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
543 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
544 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
545 		*abort_flag = 1;
546 		return;
547 	}
548 	queue_needed = 1;
549 	asoc->size_on_all_streams += control->length;
550 	sctp_ucount_incr(asoc->cnt_on_all_streams);
551 	nxt_todel = strm->last_mid_delivered + 1;
552 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
553 		/* can be delivered right away? */
554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
555 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
556 		}
557 		/* EY it wont be queued if it could be delivered directly */
558 		queue_needed = 0;
559 		if (asoc->size_on_all_streams >= control->length) {
560 			asoc->size_on_all_streams -= control->length;
561 		} else {
562 #ifdef INVARIANTS
563 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
564 #else
565 			asoc->size_on_all_streams = 0;
566 #endif
567 		}
568 		sctp_ucount_decr(asoc->cnt_on_all_streams);
569 		strm->last_mid_delivered++;
570 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
571 		sctp_add_to_readq(stcb->sctp_ep, stcb,
572 		    control,
573 		    &stcb->sctp_socket->so_rcv, 1,
574 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
575 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
576 			/* all delivered */
577 			nxt_todel = strm->last_mid_delivered + 1;
578 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
579 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
580 				if (control->on_strm_q == SCTP_ON_ORDERED) {
581 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
582 					if (asoc->size_on_all_streams >= control->length) {
583 						asoc->size_on_all_streams -= control->length;
584 					} else {
585 #ifdef INVARIANTS
586 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
587 #else
588 						asoc->size_on_all_streams = 0;
589 #endif
590 					}
591 					sctp_ucount_decr(asoc->cnt_on_all_streams);
592 #ifdef INVARIANTS
593 				} else {
594 					panic("Huh control: %p is on_strm_q: %d",
595 					    control, control->on_strm_q);
596 #endif
597 				}
598 				control->on_strm_q = 0;
599 				strm->last_mid_delivered++;
600 				/*
601 				 * We ignore the return of deliver_data here
602 				 * since we always can hold the chunk on the
603 				 * d-queue. And we have a finite number that
604 				 * can be delivered from the strq.
605 				 */
606 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
607 					sctp_log_strm_del(control, NULL,
608 					    SCTP_STR_LOG_FROM_IMMED_DEL);
609 				}
610 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
611 				sctp_add_to_readq(stcb->sctp_ep, stcb,
612 				    control,
613 				    &stcb->sctp_socket->so_rcv, 1,
614 				    SCTP_READ_LOCK_NOT_HELD,
615 				    SCTP_SO_LOCKED);
616 				continue;
617 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
618 				*need_reasm = 1;
619 			}
620 			break;
621 		}
622 	}
623 	if (queue_needed) {
624 		/*
625 		 * Ok, we did not deliver this guy, find the correct place
626 		 * to put it on the queue.
627 		 */
628 		if (sctp_place_control_in_stream(strm, asoc, control)) {
629 			SCTP_SNPRINTF(msg, sizeof(msg),
630 			    "Queue to str MID: %u duplicate", control->mid);
631 			sctp_clean_up_control(stcb, control);
632 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
633 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
634 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
635 			*abort_flag = 1;
636 		}
637 	}
638 }
639 
640 static void
641 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
642 {
643 	struct mbuf *m, *prev = NULL;
644 	struct sctp_tcb *stcb;
645 
646 	stcb = control->stcb;
647 	control->held_length = 0;
648 	control->length = 0;
649 	m = control->data;
650 	while (m) {
651 		if (SCTP_BUF_LEN(m) == 0) {
652 			/* Skip mbufs with NO length */
653 			if (prev == NULL) {
654 				/* First one */
655 				control->data = sctp_m_free(m);
656 				m = control->data;
657 			} else {
658 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
659 				m = SCTP_BUF_NEXT(prev);
660 			}
661 			if (m == NULL) {
662 				control->tail_mbuf = prev;
663 			}
664 			continue;
665 		}
666 		prev = m;
667 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
668 		if (control->on_read_q) {
669 			/*
670 			 * On read queue so we must increment the SB stuff,
671 			 * we assume caller has done any locks of SB.
672 			 */
673 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
674 		}
675 		m = SCTP_BUF_NEXT(m);
676 	}
677 	if (prev) {
678 		control->tail_mbuf = prev;
679 	}
680 }
681 
682 static void
683 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
684 {
685 	struct mbuf *prev = NULL;
686 	struct sctp_tcb *stcb;
687 
688 	stcb = control->stcb;
689 	if (stcb == NULL) {
690 #ifdef INVARIANTS
691 		panic("Control broken");
692 #else
693 		return;
694 #endif
695 	}
696 	if (control->tail_mbuf == NULL) {
697 		/* TSNH */
698 		sctp_m_freem(control->data);
699 		control->data = m;
700 		sctp_setup_tail_pointer(control);
701 		return;
702 	}
703 	control->tail_mbuf->m_next = m;
704 	while (m) {
705 		if (SCTP_BUF_LEN(m) == 0) {
706 			/* Skip mbufs with NO length */
707 			if (prev == NULL) {
708 				/* First one */
709 				control->tail_mbuf->m_next = sctp_m_free(m);
710 				m = control->tail_mbuf->m_next;
711 			} else {
712 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
713 				m = SCTP_BUF_NEXT(prev);
714 			}
715 			if (m == NULL) {
716 				control->tail_mbuf = prev;
717 			}
718 			continue;
719 		}
720 		prev = m;
721 		if (control->on_read_q) {
722 			/*
723 			 * On read queue so we must increment the SB stuff,
724 			 * we assume caller has done any locks of SB.
725 			 */
726 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
727 		}
728 		*added += SCTP_BUF_LEN(m);
729 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
730 		m = SCTP_BUF_NEXT(m);
731 	}
732 	if (prev) {
733 		control->tail_mbuf = prev;
734 	}
735 }
736 
737 static void
738 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
739 {
740 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
741 	nc->sinfo_stream = control->sinfo_stream;
742 	nc->mid = control->mid;
743 	TAILQ_INIT(&nc->reasm);
744 	nc->top_fsn = control->top_fsn;
745 	nc->mid = control->mid;
746 	nc->sinfo_flags = control->sinfo_flags;
747 	nc->sinfo_ppid = control->sinfo_ppid;
748 	nc->sinfo_context = control->sinfo_context;
749 	nc->fsn_included = 0xffffffff;
750 	nc->sinfo_tsn = control->sinfo_tsn;
751 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
752 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
753 	nc->whoFrom = control->whoFrom;
754 	atomic_add_int(&nc->whoFrom->ref_count, 1);
755 	nc->stcb = control->stcb;
756 	nc->port_from = control->port_from;
757 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
758 }
759 
760 static void
761 sctp_reset_a_control(struct sctp_queued_to_read *control,
762     struct sctp_inpcb *inp, uint32_t tsn)
763 {
764 	control->fsn_included = tsn;
765 	if (control->on_read_q) {
766 		/*
767 		 * We have to purge it from there, hopefully this will work
768 		 * :-)
769 		 */
770 		TAILQ_REMOVE(&inp->read_queue, control, next);
771 		control->on_read_q = 0;
772 	}
773 }
774 
775 static int
776 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
777     struct sctp_association *asoc,
778     struct sctp_stream_in *strm,
779     struct sctp_queued_to_read *control,
780     uint32_t pd_point,
781     int inp_read_lock_held)
782 {
783 	/*
784 	 * Special handling for the old un-ordered data chunk. All the
785 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
786 	 * to see if we have it all. If you return one, no other control
787 	 * entries on the un-ordered queue will be looked at. In theory
788 	 * there should be no others entries in reality, unless the guy is
789 	 * sending both unordered NDATA and unordered DATA...
790 	 */
791 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
792 	uint32_t fsn;
793 	struct sctp_queued_to_read *nc;
794 	int cnt_added;
795 
796 	if (control->first_frag_seen == 0) {
797 		/* Nothing we can do, we have not seen the first piece yet */
798 		return (1);
799 	}
800 	/* Collapse any we can */
801 	cnt_added = 0;
802 restart:
803 	fsn = control->fsn_included + 1;
804 	/* Now what can we add? */
805 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
806 		if (chk->rec.data.fsn == fsn) {
807 			/* Ok lets add it */
808 			sctp_alloc_a_readq(stcb, nc);
809 			if (nc == NULL) {
810 				break;
811 			}
812 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
813 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
814 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
815 			fsn++;
816 			cnt_added++;
817 			chk = NULL;
818 			if (control->end_added) {
819 				/* We are done */
820 				if (!TAILQ_EMPTY(&control->reasm)) {
821 					/*
822 					 * Ok we have to move anything left
823 					 * on the control queue to a new
824 					 * control.
825 					 */
826 					sctp_build_readq_entry_from_ctl(nc, control);
827 					tchk = TAILQ_FIRST(&control->reasm);
828 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
829 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
830 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
831 							asoc->size_on_reasm_queue -= tchk->send_size;
832 						} else {
833 #ifdef INVARIANTS
834 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
835 #else
836 							asoc->size_on_reasm_queue = 0;
837 #endif
838 						}
839 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
840 						nc->first_frag_seen = 1;
841 						nc->fsn_included = tchk->rec.data.fsn;
842 						nc->data = tchk->data;
843 						nc->sinfo_ppid = tchk->rec.data.ppid;
844 						nc->sinfo_tsn = tchk->rec.data.tsn;
845 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
846 						tchk->data = NULL;
847 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
848 						sctp_setup_tail_pointer(nc);
849 						tchk = TAILQ_FIRST(&control->reasm);
850 					}
851 					/* Spin the rest onto the queue */
852 					while (tchk) {
853 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
854 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
855 						tchk = TAILQ_FIRST(&control->reasm);
856 					}
857 					/*
858 					 * Now lets add it to the queue
859 					 * after removing control
860 					 */
861 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
862 					nc->on_strm_q = SCTP_ON_UNORDERED;
863 					if (control->on_strm_q) {
864 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
865 						control->on_strm_q = 0;
866 					}
867 				}
868 				if (control->pdapi_started) {
869 					strm->pd_api_started = 0;
870 					control->pdapi_started = 0;
871 				}
872 				if (control->on_strm_q) {
873 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
874 					control->on_strm_q = 0;
875 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
876 				}
877 				if (control->on_read_q == 0) {
878 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
879 					    &stcb->sctp_socket->so_rcv, control->end_added,
880 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
881 				}
882 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
883 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
884 					/*
885 					 * Switch to the new guy and
886 					 * continue
887 					 */
888 					control = nc;
889 					goto restart;
890 				} else {
891 					if (nc->on_strm_q == 0) {
892 						sctp_free_a_readq(stcb, nc);
893 					}
894 				}
895 				return (1);
896 			} else {
897 				sctp_free_a_readq(stcb, nc);
898 			}
899 		} else {
900 			/* Can't add more */
901 			break;
902 		}
903 	}
904 	if (cnt_added && strm->pd_api_started) {
905 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
906 	}
907 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
908 		strm->pd_api_started = 1;
909 		control->pdapi_started = 1;
910 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
911 		    &stcb->sctp_socket->so_rcv, control->end_added,
912 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
913 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
914 		return (0);
915 	} else {
916 		return (1);
917 	}
918 }
919 
920 static void
921 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
922     struct sctp_association *asoc,
923     struct sctp_queued_to_read *control,
924     struct sctp_tmit_chunk *chk,
925     int *abort_flag)
926 {
927 	struct sctp_tmit_chunk *at;
928 	int inserted;
929 
930 	/*
931 	 * Here we need to place the chunk into the control structure sorted
932 	 * in the correct order.
933 	 */
934 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
935 		/* Its the very first one. */
936 		SCTPDBG(SCTP_DEBUG_XXX,
937 		    "chunk is a first fsn: %u becomes fsn_included\n",
938 		    chk->rec.data.fsn);
939 		at = TAILQ_FIRST(&control->reasm);
940 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
941 			/*
942 			 * The first chunk in the reassembly is a smaller
943 			 * TSN than this one, even though this has a first,
944 			 * it must be from a subsequent msg.
945 			 */
946 			goto place_chunk;
947 		}
948 		if (control->first_frag_seen) {
949 			/*
950 			 * In old un-ordered we can reassembly on one
951 			 * control multiple messages. As long as the next
952 			 * FIRST is greater then the old first (TSN i.e. FSN
953 			 * wise)
954 			 */
955 			struct mbuf *tdata;
956 			uint32_t tmp;
957 
958 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
959 				/*
960 				 * Easy way the start of a new guy beyond
961 				 * the lowest
962 				 */
963 				goto place_chunk;
964 			}
965 			if ((chk->rec.data.fsn == control->fsn_included) ||
966 			    (control->pdapi_started)) {
967 				/*
968 				 * Ok this should not happen, if it does we
969 				 * started the pd-api on the higher TSN
970 				 * (since the equals part is a TSN failure
971 				 * it must be that).
972 				 *
973 				 * We are completly hosed in that case since
974 				 * I have no way to recover. This really
975 				 * will only happen if we can get more TSN's
976 				 * higher before the pd-api-point.
977 				 */
978 				sctp_abort_in_reasm(stcb, control, chk,
979 				    abort_flag,
980 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
981 
982 				return;
983 			}
984 			/*
985 			 * Ok we have two firsts and the one we just got is
986 			 * smaller than the one we previously placed.. yuck!
987 			 * We must swap them out.
988 			 */
989 			/* swap the mbufs */
990 			tdata = control->data;
991 			control->data = chk->data;
992 			chk->data = tdata;
993 			/* Save the lengths */
994 			chk->send_size = control->length;
995 			/* Recompute length of control and tail pointer */
996 			sctp_setup_tail_pointer(control);
997 			/* Fix the FSN included */
998 			tmp = control->fsn_included;
999 			control->fsn_included = chk->rec.data.fsn;
1000 			chk->rec.data.fsn = tmp;
1001 			/* Fix the TSN included */
1002 			tmp = control->sinfo_tsn;
1003 			control->sinfo_tsn = chk->rec.data.tsn;
1004 			chk->rec.data.tsn = tmp;
1005 			/* Fix the PPID included */
1006 			tmp = control->sinfo_ppid;
1007 			control->sinfo_ppid = chk->rec.data.ppid;
1008 			chk->rec.data.ppid = tmp;
1009 			/* Fix tail pointer */
1010 			goto place_chunk;
1011 		}
1012 		control->first_frag_seen = 1;
1013 		control->fsn_included = chk->rec.data.fsn;
1014 		control->top_fsn = chk->rec.data.fsn;
1015 		control->sinfo_tsn = chk->rec.data.tsn;
1016 		control->sinfo_ppid = chk->rec.data.ppid;
1017 		control->data = chk->data;
1018 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1019 		chk->data = NULL;
1020 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1021 		sctp_setup_tail_pointer(control);
1022 		return;
1023 	}
1024 place_chunk:
1025 	inserted = 0;
1026 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1027 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1028 			/*
1029 			 * This one in queue is bigger than the new one,
1030 			 * insert the new one before at.
1031 			 */
1032 			asoc->size_on_reasm_queue += chk->send_size;
1033 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1034 			inserted = 1;
1035 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1036 			break;
1037 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1038 			/*
1039 			 * They sent a duplicate fsn number. This really
1040 			 * should not happen since the FSN is a TSN and it
1041 			 * should have been dropped earlier.
1042 			 */
1043 			sctp_abort_in_reasm(stcb, control, chk,
1044 			    abort_flag,
1045 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1046 			return;
1047 		}
1048 	}
1049 	if (inserted == 0) {
1050 		/* Its at the end */
1051 		asoc->size_on_reasm_queue += chk->send_size;
1052 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1053 		control->top_fsn = chk->rec.data.fsn;
1054 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1055 	}
1056 }
1057 
1058 static int
1059 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060     struct sctp_stream_in *strm, int inp_read_lock_held)
1061 {
1062 	/*
1063 	 * Given a stream, strm, see if any of the SSN's on it that are
1064 	 * fragmented are ready to deliver. If so go ahead and place them on
1065 	 * the read queue. In so placing if we have hit the end, then we
1066 	 * need to remove them from the stream's queue.
1067 	 */
1068 	struct sctp_queued_to_read *control, *nctl = NULL;
1069 	uint32_t next_to_del;
1070 	uint32_t pd_point;
1071 	int ret = 0;
1072 
1073 	if (stcb->sctp_socket) {
1074 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1075 		    stcb->sctp_ep->partial_delivery_point);
1076 	} else {
1077 		pd_point = stcb->sctp_ep->partial_delivery_point;
1078 	}
1079 	control = TAILQ_FIRST(&strm->uno_inqueue);
1080 
1081 	if ((control != NULL) &&
1082 	    (asoc->idata_supported == 0)) {
1083 		/* Special handling needed for "old" data format */
1084 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1085 			goto done_un;
1086 		}
1087 	}
1088 	if (strm->pd_api_started) {
1089 		/* Can't add more */
1090 		return (0);
1091 	}
1092 	while (control) {
1093 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1094 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1095 		nctl = TAILQ_NEXT(control, next_instrm);
1096 		if (control->end_added) {
1097 			/* We just put the last bit on */
1098 			if (control->on_strm_q) {
1099 #ifdef INVARIANTS
1100 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1101 					panic("Huh control: %p on_q: %d -- not unordered?",
1102 					    control, control->on_strm_q);
1103 				}
1104 #endif
1105 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1106 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1107 				if (asoc->size_on_all_streams >= control->length) {
1108 					asoc->size_on_all_streams -= control->length;
1109 				} else {
1110 #ifdef INVARIANTS
1111 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1112 #else
1113 					asoc->size_on_all_streams = 0;
1114 #endif
1115 				}
1116 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1117 				control->on_strm_q = 0;
1118 			}
1119 			if (control->on_read_q == 0) {
1120 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1121 				    control,
1122 				    &stcb->sctp_socket->so_rcv, control->end_added,
1123 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1124 			}
1125 		} else {
1126 			/* Can we do a PD-API for this un-ordered guy? */
1127 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1128 				strm->pd_api_started = 1;
1129 				control->pdapi_started = 1;
1130 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1131 				    control,
1132 				    &stcb->sctp_socket->so_rcv, control->end_added,
1133 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1134 
1135 				break;
1136 			}
1137 		}
1138 		control = nctl;
1139 	}
1140 done_un:
1141 	control = TAILQ_FIRST(&strm->inqueue);
1142 	if (strm->pd_api_started) {
1143 		/* Can't add more */
1144 		return (0);
1145 	}
1146 	if (control == NULL) {
1147 		return (ret);
1148 	}
1149 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1150 		/*
1151 		 * Ok the guy at the top was being partially delivered
1152 		 * completed, so we remove it. Note the pd_api flag was
1153 		 * taken off when the chunk was merged on in
1154 		 * sctp_queue_data_for_reasm below.
1155 		 */
1156 		nctl = TAILQ_NEXT(control, next_instrm);
1157 		SCTPDBG(SCTP_DEBUG_XXX,
1158 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1159 		    control, control->end_added, control->mid,
1160 		    control->top_fsn, control->fsn_included,
1161 		    strm->last_mid_delivered);
1162 		if (control->end_added) {
1163 			if (control->on_strm_q) {
1164 #ifdef INVARIANTS
1165 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1166 					panic("Huh control: %p on_q: %d -- not ordered?",
1167 					    control, control->on_strm_q);
1168 				}
1169 #endif
1170 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1171 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1172 				if (asoc->size_on_all_streams >= control->length) {
1173 					asoc->size_on_all_streams -= control->length;
1174 				} else {
1175 #ifdef INVARIANTS
1176 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1177 #else
1178 					asoc->size_on_all_streams = 0;
1179 #endif
1180 				}
1181 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1182 				control->on_strm_q = 0;
1183 			}
1184 			if (strm->pd_api_started && control->pdapi_started) {
1185 				control->pdapi_started = 0;
1186 				strm->pd_api_started = 0;
1187 			}
1188 			if (control->on_read_q == 0) {
1189 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1190 				    control,
1191 				    &stcb->sctp_socket->so_rcv, control->end_added,
1192 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1193 			}
1194 			control = nctl;
1195 		}
1196 	}
1197 	if (strm->pd_api_started) {
1198 		/*
1199 		 * Can't add more must have gotten an un-ordered above being
1200 		 * partially delivered.
1201 		 */
1202 		return (0);
1203 	}
1204 deliver_more:
1205 	next_to_del = strm->last_mid_delivered + 1;
1206 	if (control) {
1207 		SCTPDBG(SCTP_DEBUG_XXX,
1208 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1209 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1210 		    next_to_del);
1211 		nctl = TAILQ_NEXT(control, next_instrm);
1212 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1213 		    (control->first_frag_seen)) {
1214 			int done;
1215 
1216 			/* Ok we can deliver it onto the stream. */
1217 			if (control->end_added) {
1218 				/* We are done with it afterwards */
1219 				if (control->on_strm_q) {
1220 #ifdef INVARIANTS
1221 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1222 						panic("Huh control: %p on_q: %d -- not ordered?",
1223 						    control, control->on_strm_q);
1224 					}
1225 #endif
1226 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1227 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1228 					if (asoc->size_on_all_streams >= control->length) {
1229 						asoc->size_on_all_streams -= control->length;
1230 					} else {
1231 #ifdef INVARIANTS
1232 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1233 #else
1234 						asoc->size_on_all_streams = 0;
1235 #endif
1236 					}
1237 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1238 					control->on_strm_q = 0;
1239 				}
1240 				ret++;
1241 			}
1242 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1243 				/*
1244 				 * A singleton now slipping through - mark
1245 				 * it non-revokable too
1246 				 */
1247 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1248 			} else if (control->end_added == 0) {
1249 				/*
1250 				 * Check if we can defer adding until its
1251 				 * all there
1252 				 */
1253 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1254 					/*
1255 					 * Don't need it or cannot add more
1256 					 * (one being delivered that way)
1257 					 */
1258 					goto out;
1259 				}
1260 			}
1261 			done = (control->end_added) && (control->last_frag_seen);
1262 			if (control->on_read_q == 0) {
1263 				if (!done) {
1264 					if (asoc->size_on_all_streams >= control->length) {
1265 						asoc->size_on_all_streams -= control->length;
1266 					} else {
1267 #ifdef INVARIANTS
1268 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1269 #else
1270 						asoc->size_on_all_streams = 0;
1271 #endif
1272 					}
1273 					strm->pd_api_started = 1;
1274 					control->pdapi_started = 1;
1275 				}
1276 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1277 				    control,
1278 				    &stcb->sctp_socket->so_rcv, control->end_added,
1279 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1280 			}
1281 			strm->last_mid_delivered = next_to_del;
1282 			if (done) {
1283 				control = nctl;
1284 				goto deliver_more;
1285 			}
1286 		}
1287 	}
1288 out:
1289 	return (ret);
1290 }
1291 
1292 uint32_t
1293 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1294     struct sctp_stream_in *strm,
1295     struct sctp_tcb *stcb, struct sctp_association *asoc,
1296     struct sctp_tmit_chunk *chk, int hold_rlock)
1297 {
1298 	/*
1299 	 * Given a control and a chunk, merge the data from the chk onto the
1300 	 * control and free up the chunk resources.
1301 	 */
1302 	uint32_t added = 0;
1303 	int i_locked = 0;
1304 
1305 	if (control->on_read_q && (hold_rlock == 0)) {
1306 		/*
1307 		 * Its being pd-api'd so we must do some locks.
1308 		 */
1309 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1310 		i_locked = 1;
1311 	}
1312 	if (control->data == NULL) {
1313 		control->data = chk->data;
1314 		sctp_setup_tail_pointer(control);
1315 	} else {
1316 		sctp_add_to_tail_pointer(control, chk->data, &added);
1317 	}
1318 	control->fsn_included = chk->rec.data.fsn;
1319 	asoc->size_on_reasm_queue -= chk->send_size;
1320 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1321 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1322 	chk->data = NULL;
1323 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1324 		control->first_frag_seen = 1;
1325 		control->sinfo_tsn = chk->rec.data.tsn;
1326 		control->sinfo_ppid = chk->rec.data.ppid;
1327 	}
1328 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1329 		/* Its complete */
1330 		if ((control->on_strm_q) && (control->on_read_q)) {
1331 			if (control->pdapi_started) {
1332 				control->pdapi_started = 0;
1333 				strm->pd_api_started = 0;
1334 			}
1335 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1336 				/* Unordered */
1337 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1338 				control->on_strm_q = 0;
1339 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1340 				/* Ordered */
1341 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1342 				/*
1343 				 * Don't need to decrement
1344 				 * size_on_all_streams, since control is on
1345 				 * the read queue.
1346 				 */
1347 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1348 				control->on_strm_q = 0;
1349 #ifdef INVARIANTS
1350 			} else if (control->on_strm_q) {
1351 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1352 				    control->on_strm_q);
1353 #endif
1354 			}
1355 		}
1356 		control->end_added = 1;
1357 		control->last_frag_seen = 1;
1358 	}
1359 	if (i_locked) {
1360 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1361 	}
1362 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1363 	return (added);
1364 }
1365 
1366 /*
1367  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1368  * queue, see if anthing can be delivered. If so pull it off (or as much as
1369  * we can. If we run out of space then we must dump what we can and set the
1370  * appropriate flag to say we queued what we could.
1371  */
1372 static void
1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1374     struct sctp_queued_to_read *control,
1375     struct sctp_tmit_chunk *chk,
1376     int created_control,
1377     int *abort_flag, uint32_t tsn)
1378 {
1379 	uint32_t next_fsn;
1380 	struct sctp_tmit_chunk *at, *nat;
1381 	struct sctp_stream_in *strm;
1382 	int do_wakeup, unordered;
1383 	uint32_t lenadded;
1384 
1385 	strm = &asoc->strmin[control->sinfo_stream];
1386 	/*
1387 	 * For old un-ordered data chunks.
1388 	 */
1389 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1390 		unordered = 1;
1391 	} else {
1392 		unordered = 0;
1393 	}
1394 	/* Must be added to the stream-in queue */
1395 	if (created_control) {
1396 		if ((unordered == 0) || (asoc->idata_supported)) {
1397 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1398 		}
1399 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1400 			/* Duplicate SSN? */
1401 			sctp_abort_in_reasm(stcb, control, chk,
1402 			    abort_flag,
1403 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1404 			sctp_clean_up_control(stcb, control);
1405 			return;
1406 		}
1407 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1408 			/*
1409 			 * Ok we created this control and now lets validate
1410 			 * that its legal i.e. there is a B bit set, if not
1411 			 * and we have up to the cum-ack then its invalid.
1412 			 */
1413 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1414 				sctp_abort_in_reasm(stcb, control, chk,
1415 				    abort_flag,
1416 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1417 				return;
1418 			}
1419 		}
1420 	}
1421 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1422 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1423 		return;
1424 	}
1425 	/*
1426 	 * Ok we must queue the chunk into the reasembly portion: o if its
1427 	 * the first it goes to the control mbuf. o if its not first but the
1428 	 * next in sequence it goes to the control, and each succeeding one
1429 	 * in order also goes. o if its not in order we place it on the list
1430 	 * in its place.
1431 	 */
1432 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1433 		/* Its the very first one. */
1434 		SCTPDBG(SCTP_DEBUG_XXX,
1435 		    "chunk is a first fsn: %u becomes fsn_included\n",
1436 		    chk->rec.data.fsn);
1437 		if (control->first_frag_seen) {
1438 			/*
1439 			 * Error on senders part, they either sent us two
1440 			 * data chunks with FIRST, or they sent two
1441 			 * un-ordered chunks that were fragmented at the
1442 			 * same time in the same stream.
1443 			 */
1444 			sctp_abort_in_reasm(stcb, control, chk,
1445 			    abort_flag,
1446 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1447 			return;
1448 		}
1449 		control->first_frag_seen = 1;
1450 		control->sinfo_ppid = chk->rec.data.ppid;
1451 		control->sinfo_tsn = chk->rec.data.tsn;
1452 		control->fsn_included = chk->rec.data.fsn;
1453 		control->data = chk->data;
1454 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1455 		chk->data = NULL;
1456 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1457 		sctp_setup_tail_pointer(control);
1458 		asoc->size_on_all_streams += control->length;
1459 	} else {
1460 		/* Place the chunk in our list */
1461 		int inserted = 0;
1462 
1463 		if (control->last_frag_seen == 0) {
1464 			/* Still willing to raise highest FSN seen */
1465 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1466 				SCTPDBG(SCTP_DEBUG_XXX,
1467 				    "We have a new top_fsn: %u\n",
1468 				    chk->rec.data.fsn);
1469 				control->top_fsn = chk->rec.data.fsn;
1470 			}
1471 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1472 				SCTPDBG(SCTP_DEBUG_XXX,
1473 				    "The last fsn is now in place fsn: %u\n",
1474 				    chk->rec.data.fsn);
1475 				control->last_frag_seen = 1;
1476 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1477 					SCTPDBG(SCTP_DEBUG_XXX,
1478 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1479 					    chk->rec.data.fsn,
1480 					    control->top_fsn);
1481 					sctp_abort_in_reasm(stcb, control, chk,
1482 					    abort_flag,
1483 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1484 					return;
1485 				}
1486 			}
1487 			if (asoc->idata_supported || control->first_frag_seen) {
1488 				/*
1489 				 * For IDATA we always check since we know
1490 				 * that the first fragment is 0. For old
1491 				 * DATA we have to receive the first before
1492 				 * we know the first FSN (which is the TSN).
1493 				 */
1494 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1495 					/*
1496 					 * We have already delivered up to
1497 					 * this so its a dup
1498 					 */
1499 					sctp_abort_in_reasm(stcb, control, chk,
1500 					    abort_flag,
1501 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1502 					return;
1503 				}
1504 			}
1505 		} else {
1506 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1507 				/* Second last? huh? */
1508 				SCTPDBG(SCTP_DEBUG_XXX,
1509 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1510 				    chk->rec.data.fsn, control->top_fsn);
1511 				sctp_abort_in_reasm(stcb, control,
1512 				    chk, abort_flag,
1513 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1514 				return;
1515 			}
1516 			if (asoc->idata_supported || control->first_frag_seen) {
1517 				/*
1518 				 * For IDATA we always check since we know
1519 				 * that the first fragment is 0. For old
1520 				 * DATA we have to receive the first before
1521 				 * we know the first FSN (which is the TSN).
1522 				 */
1523 
1524 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1525 					/*
1526 					 * We have already delivered up to
1527 					 * this so its a dup
1528 					 */
1529 					SCTPDBG(SCTP_DEBUG_XXX,
1530 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1531 					    chk->rec.data.fsn, control->fsn_included);
1532 					sctp_abort_in_reasm(stcb, control, chk,
1533 					    abort_flag,
1534 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1535 					return;
1536 				}
1537 			}
1538 			/*
1539 			 * validate not beyond top FSN if we have seen last
1540 			 * one
1541 			 */
1542 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1543 				SCTPDBG(SCTP_DEBUG_XXX,
1544 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1545 				    chk->rec.data.fsn,
1546 				    control->top_fsn);
1547 				sctp_abort_in_reasm(stcb, control, chk,
1548 				    abort_flag,
1549 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1550 				return;
1551 			}
1552 		}
1553 		/*
1554 		 * If we reach here, we need to place the new chunk in the
1555 		 * reassembly for this control.
1556 		 */
1557 		SCTPDBG(SCTP_DEBUG_XXX,
1558 		    "chunk is a not first fsn: %u needs to be inserted\n",
1559 		    chk->rec.data.fsn);
1560 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1561 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1562 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1563 					/* Last not at the end? huh? */
1564 					SCTPDBG(SCTP_DEBUG_XXX,
1565 					    "Last fragment not last in list: -- abort\n");
1566 					sctp_abort_in_reasm(stcb, control,
1567 					    chk, abort_flag,
1568 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1569 					return;
1570 				}
1571 				/*
1572 				 * This one in queue is bigger than the new
1573 				 * one, insert the new one before at.
1574 				 */
1575 				SCTPDBG(SCTP_DEBUG_XXX,
1576 				    "Insert it before fsn: %u\n",
1577 				    at->rec.data.fsn);
1578 				asoc->size_on_reasm_queue += chk->send_size;
1579 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1580 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1581 				inserted = 1;
1582 				break;
1583 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1584 				/*
1585 				 * Gak, He sent me a duplicate str seq
1586 				 * number
1587 				 */
1588 				/*
1589 				 * foo bar, I guess I will just free this
1590 				 * new guy, should we abort too? FIX ME
1591 				 * MAYBE? Or it COULD be that the SSN's have
1592 				 * wrapped. Maybe I should compare to TSN
1593 				 * somehow... sigh for now just blow away
1594 				 * the chunk!
1595 				 */
1596 				SCTPDBG(SCTP_DEBUG_XXX,
1597 				    "Duplicate to fsn: %u -- abort\n",
1598 				    at->rec.data.fsn);
1599 				sctp_abort_in_reasm(stcb, control,
1600 				    chk, abort_flag,
1601 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1602 				return;
1603 			}
1604 		}
1605 		if (inserted == 0) {
1606 			/* Goes on the end */
1607 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1608 			    chk->rec.data.fsn);
1609 			asoc->size_on_reasm_queue += chk->send_size;
1610 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1611 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1612 		}
1613 	}
1614 	/*
1615 	 * Ok lets see if we can suck any up into the control structure that
1616 	 * are in seq if it makes sense.
1617 	 */
1618 	do_wakeup = 0;
1619 	/*
1620 	 * If the first fragment has not been seen there is no sense in
1621 	 * looking.
1622 	 */
1623 	if (control->first_frag_seen) {
1624 		next_fsn = control->fsn_included + 1;
1625 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1626 			if (at->rec.data.fsn == next_fsn) {
1627 				/* We can add this one now to the control */
1628 				SCTPDBG(SCTP_DEBUG_XXX,
1629 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1630 				    control, at,
1631 				    at->rec.data.fsn,
1632 				    next_fsn, control->fsn_included);
1633 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1634 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1635 				if (control->on_read_q) {
1636 					do_wakeup = 1;
1637 				} else {
1638 					/*
1639 					 * We only add to the
1640 					 * size-on-all-streams if its not on
1641 					 * the read q. The read q flag will
1642 					 * cause a sballoc so its accounted
1643 					 * for there.
1644 					 */
1645 					asoc->size_on_all_streams += lenadded;
1646 				}
1647 				next_fsn++;
1648 				if (control->end_added && control->pdapi_started) {
1649 					if (strm->pd_api_started) {
1650 						strm->pd_api_started = 0;
1651 						control->pdapi_started = 0;
1652 					}
1653 					if (control->on_read_q == 0) {
1654 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1655 						    control,
1656 						    &stcb->sctp_socket->so_rcv, control->end_added,
1657 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1658 					}
1659 					break;
1660 				}
1661 			} else {
1662 				break;
1663 			}
1664 		}
1665 	}
1666 	if (do_wakeup) {
1667 		/* Need to wakeup the reader */
1668 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1669 	}
1670 }
1671 
1672 static struct sctp_queued_to_read *
1673 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1674 {
1675 	struct sctp_queued_to_read *control;
1676 
1677 	if (ordered) {
1678 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1679 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1680 				break;
1681 			}
1682 		}
1683 	} else {
1684 		if (idata_supported) {
1685 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1686 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1687 					break;
1688 				}
1689 			}
1690 		} else {
1691 			control = TAILQ_FIRST(&strm->uno_inqueue);
1692 		}
1693 	}
1694 	return (control);
1695 }
1696 
1697 static int
1698 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1699     struct mbuf **m, int offset, int chk_length,
1700     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1701     int *break_flag, int last_chunk, uint8_t chk_type)
1702 {
1703 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1704 	struct sctp_stream_in *strm;
1705 	uint32_t tsn, fsn, gap, mid;
1706 	struct mbuf *dmbuf;
1707 	int the_len;
1708 	int need_reasm_check = 0;
1709 	uint16_t sid;
1710 	struct mbuf *op_err;
1711 	char msg[SCTP_DIAG_INFO_LEN];
1712 	struct sctp_queued_to_read *control, *ncontrol;
1713 	uint32_t ppid;
1714 	uint8_t chk_flags;
1715 	struct sctp_stream_reset_list *liste;
1716 	int ordered;
1717 	size_t clen;
1718 	int created_control = 0;
1719 
1720 	if (chk_type == SCTP_IDATA) {
1721 		struct sctp_idata_chunk *chunk, chunk_buf;
1722 
1723 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1724 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1725 		chk_flags = chunk->ch.chunk_flags;
1726 		clen = sizeof(struct sctp_idata_chunk);
1727 		tsn = ntohl(chunk->dp.tsn);
1728 		sid = ntohs(chunk->dp.sid);
1729 		mid = ntohl(chunk->dp.mid);
1730 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1731 			fsn = 0;
1732 			ppid = chunk->dp.ppid_fsn.ppid;
1733 		} else {
1734 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1735 			ppid = 0xffffffff;	/* Use as an invalid value. */
1736 		}
1737 	} else {
1738 		struct sctp_data_chunk *chunk, chunk_buf;
1739 
1740 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1741 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1742 		chk_flags = chunk->ch.chunk_flags;
1743 		clen = sizeof(struct sctp_data_chunk);
1744 		tsn = ntohl(chunk->dp.tsn);
1745 		sid = ntohs(chunk->dp.sid);
1746 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1747 		fsn = tsn;
1748 		ppid = chunk->dp.ppid;
1749 	}
1750 	if ((size_t)chk_length == clen) {
1751 		/*
1752 		 * Need to send an abort since we had a empty data chunk.
1753 		 */
1754 		op_err = sctp_generate_no_user_data_cause(tsn);
1755 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1756 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1757 		*abort_flag = 1;
1758 		return (0);
1759 	}
1760 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1761 		asoc->send_sack = 1;
1762 	}
1763 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1764 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1765 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1766 	}
1767 	if (stcb == NULL) {
1768 		return (0);
1769 	}
1770 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1771 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1772 		/* It is a duplicate */
1773 		SCTP_STAT_INCR(sctps_recvdupdata);
1774 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 			/* Record a dup for the next outbound sack */
1776 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1777 			asoc->numduptsns++;
1778 		}
1779 		asoc->send_sack = 1;
1780 		return (0);
1781 	}
1782 	/* Calculate the number of TSN's between the base and this TSN */
1783 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1784 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1785 		/* Can't hold the bit in the mapping at max array, toss it */
1786 		return (0);
1787 	}
1788 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1789 		SCTP_TCB_LOCK_ASSERT(stcb);
1790 		if (sctp_expand_mapping_array(asoc, gap)) {
1791 			/* Can't expand, drop it */
1792 			return (0);
1793 		}
1794 	}
1795 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1796 		*high_tsn = tsn;
1797 	}
1798 	/* See if we have received this one already */
1799 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1800 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1801 		SCTP_STAT_INCR(sctps_recvdupdata);
1802 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1803 			/* Record a dup for the next outbound sack */
1804 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1805 			asoc->numduptsns++;
1806 		}
1807 		asoc->send_sack = 1;
1808 		return (0);
1809 	}
1810 	/*
1811 	 * Check to see about the GONE flag, duplicates would cause a sack
1812 	 * to be sent up above
1813 	 */
1814 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1815 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1816 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1817 		/*
1818 		 * wait a minute, this guy is gone, there is no longer a
1819 		 * receiver. Send peer an ABORT!
1820 		 */
1821 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1822 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1823 		*abort_flag = 1;
1824 		return (0);
1825 	}
1826 	/*
1827 	 * Now before going further we see if there is room. If NOT then we
1828 	 * MAY let one through only IF this TSN is the one we are waiting
1829 	 * for on a partial delivery API.
1830 	 */
1831 
1832 	/* Is the stream valid? */
1833 	if (sid >= asoc->streamincnt) {
1834 		struct sctp_error_invalid_stream *cause;
1835 
1836 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1837 		    0, M_NOWAIT, 1, MT_DATA);
1838 		if (op_err != NULL) {
1839 			/* add some space up front so prepend will work well */
1840 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1841 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1842 			/*
1843 			 * Error causes are just param's and this one has
1844 			 * two back to back phdr, one with the error type
1845 			 * and size, the other with the streamid and a rsvd
1846 			 */
1847 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1848 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1849 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1850 			cause->stream_id = htons(sid);
1851 			cause->reserved = htons(0);
1852 			sctp_queue_op_err(stcb, op_err);
1853 		}
1854 		SCTP_STAT_INCR(sctps_badsid);
1855 		SCTP_TCB_LOCK_ASSERT(stcb);
1856 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1858 			asoc->highest_tsn_inside_nr_map = tsn;
1859 		}
1860 		if (tsn == (asoc->cumulative_tsn + 1)) {
1861 			/* Update cum-ack */
1862 			asoc->cumulative_tsn = tsn;
1863 		}
1864 		return (0);
1865 	}
1866 	/*
1867 	 * If its a fragmented message, lets see if we can find the control
1868 	 * on the reassembly queues.
1869 	 */
1870 	if ((chk_type == SCTP_IDATA) &&
1871 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1872 	    (fsn == 0)) {
1873 		/*
1874 		 * The first *must* be fsn 0, and other (middle/end) pieces
1875 		 * can *not* be fsn 0. XXX: This can happen in case of a
1876 		 * wrap around. Ignore is for now.
1877 		 */
1878 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1879 		goto err_out;
1880 	}
1881 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1882 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1883 	    chk_flags, control);
1884 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1885 		/* See if we can find the re-assembly entity */
1886 		if (control != NULL) {
1887 			/* We found something, does it belong? */
1888 			if (ordered && (mid != control->mid)) {
1889 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1890 		err_out:
1891 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1892 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1893 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1894 				*abort_flag = 1;
1895 				return (0);
1896 			}
1897 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1898 				/*
1899 				 * We can't have a switched order with an
1900 				 * unordered chunk
1901 				 */
1902 				SCTP_SNPRINTF(msg, sizeof(msg),
1903 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1904 				    tsn);
1905 				goto err_out;
1906 			}
1907 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1908 				/*
1909 				 * We can't have a switched unordered with a
1910 				 * ordered chunk
1911 				 */
1912 				SCTP_SNPRINTF(msg, sizeof(msg),
1913 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1914 				    tsn);
1915 				goto err_out;
1916 			}
1917 		}
1918 	} else {
1919 		/*
1920 		 * Its a complete segment. Lets validate we don't have a
1921 		 * re-assembly going on with the same Stream/Seq (for
1922 		 * ordered) or in the same Stream for unordered.
1923 		 */
1924 		if (control != NULL) {
1925 			if (ordered || asoc->idata_supported) {
1926 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1927 				    chk_flags, mid);
1928 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1929 				goto err_out;
1930 			} else {
1931 				if ((tsn == control->fsn_included + 1) &&
1932 				    (control->end_added == 0)) {
1933 					SCTP_SNPRINTF(msg, sizeof(msg),
1934 					    "Illegal message sequence, missing end for MID: %8.8x",
1935 					    control->fsn_included);
1936 					goto err_out;
1937 				} else {
1938 					control = NULL;
1939 				}
1940 			}
1941 		}
1942 	}
1943 	/* now do the tests */
1944 	if (((asoc->cnt_on_all_streams +
1945 	    asoc->cnt_on_reasm_queue +
1946 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1947 	    (((int)asoc->my_rwnd) <= 0)) {
1948 		/*
1949 		 * When we have NO room in the rwnd we check to make sure
1950 		 * the reader is doing its job...
1951 		 */
1952 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1953 			/* some to read, wake-up */
1954 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1955 		}
1956 		/* now is it in the mapping array of what we have accepted? */
1957 		if (chk_type == SCTP_DATA) {
1958 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1959 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1960 				/* Nope not in the valid range dump it */
1961 		dump_packet:
1962 				sctp_set_rwnd(stcb, asoc);
1963 				if ((asoc->cnt_on_all_streams +
1964 				    asoc->cnt_on_reasm_queue +
1965 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1966 					SCTP_STAT_INCR(sctps_datadropchklmt);
1967 				} else {
1968 					SCTP_STAT_INCR(sctps_datadroprwnd);
1969 				}
1970 				*break_flag = 1;
1971 				return (0);
1972 			}
1973 		} else {
1974 			if (control == NULL) {
1975 				goto dump_packet;
1976 			}
1977 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1978 				goto dump_packet;
1979 			}
1980 		}
1981 	}
1982 #ifdef SCTP_ASOCLOG_OF_TSNS
1983 	SCTP_TCB_LOCK_ASSERT(stcb);
1984 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1985 		asoc->tsn_in_at = 0;
1986 		asoc->tsn_in_wrapped = 1;
1987 	}
1988 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1989 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1990 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1991 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1992 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1993 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1994 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1995 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1996 	asoc->tsn_in_at++;
1997 #endif
1998 	/*
1999 	 * Before we continue lets validate that we are not being fooled by
2000 	 * an evil attacker. We can only have Nk chunks based on our TSN
2001 	 * spread allowed by the mapping array N * 8 bits, so there is no
2002 	 * way our stream sequence numbers could have wrapped. We of course
2003 	 * only validate the FIRST fragment so the bit must be set.
2004 	 */
2005 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2006 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2007 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2008 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2009 		/* The incoming sseq is behind where we last delivered? */
2010 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2011 		    mid, asoc->strmin[sid].last_mid_delivered);
2012 
2013 		if (asoc->idata_supported) {
2014 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2015 			    asoc->strmin[sid].last_mid_delivered,
2016 			    tsn,
2017 			    sid,
2018 			    mid);
2019 		} else {
2020 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2021 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2022 			    tsn,
2023 			    sid,
2024 			    (uint16_t)mid);
2025 		}
2026 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2027 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2028 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2029 		*abort_flag = 1;
2030 		return (0);
2031 	}
2032 	if (chk_type == SCTP_IDATA) {
2033 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2034 	} else {
2035 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2036 	}
2037 	if (last_chunk == 0) {
2038 		if (chk_type == SCTP_IDATA) {
2039 			dmbuf = SCTP_M_COPYM(*m,
2040 			    (offset + sizeof(struct sctp_idata_chunk)),
2041 			    the_len, M_NOWAIT);
2042 		} else {
2043 			dmbuf = SCTP_M_COPYM(*m,
2044 			    (offset + sizeof(struct sctp_data_chunk)),
2045 			    the_len, M_NOWAIT);
2046 		}
2047 #ifdef SCTP_MBUF_LOGGING
2048 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2049 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2050 		}
2051 #endif
2052 	} else {
2053 		/* We can steal the last chunk */
2054 		int l_len;
2055 
2056 		dmbuf = *m;
2057 		/* lop off the top part */
2058 		if (chk_type == SCTP_IDATA) {
2059 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2060 		} else {
2061 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2062 		}
2063 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2064 			l_len = SCTP_BUF_LEN(dmbuf);
2065 		} else {
2066 			/*
2067 			 * need to count up the size hopefully does not hit
2068 			 * this to often :-0
2069 			 */
2070 			struct mbuf *lat;
2071 
2072 			l_len = 0;
2073 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2074 				l_len += SCTP_BUF_LEN(lat);
2075 			}
2076 		}
2077 		if (l_len > the_len) {
2078 			/* Trim the end round bytes off  too */
2079 			m_adj(dmbuf, -(l_len - the_len));
2080 		}
2081 	}
2082 	if (dmbuf == NULL) {
2083 		SCTP_STAT_INCR(sctps_nomem);
2084 		return (0);
2085 	}
2086 	/*
2087 	 * Now no matter what, we need a control, get one if we don't have
2088 	 * one (we may have gotten it above when we found the message was
2089 	 * fragmented
2090 	 */
2091 	if (control == NULL) {
2092 		sctp_alloc_a_readq(stcb, control);
2093 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2094 		    ppid,
2095 		    sid,
2096 		    chk_flags,
2097 		    NULL, fsn, mid);
2098 		if (control == NULL) {
2099 			SCTP_STAT_INCR(sctps_nomem);
2100 			return (0);
2101 		}
2102 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2103 			struct mbuf *mm;
2104 
2105 			control->data = dmbuf;
2106 			control->tail_mbuf = NULL;
2107 			for (mm = control->data; mm; mm = mm->m_next) {
2108 				control->length += SCTP_BUF_LEN(mm);
2109 				if (SCTP_BUF_NEXT(mm) == NULL) {
2110 					control->tail_mbuf = mm;
2111 				}
2112 			}
2113 			control->end_added = 1;
2114 			control->last_frag_seen = 1;
2115 			control->first_frag_seen = 1;
2116 			control->fsn_included = fsn;
2117 			control->top_fsn = fsn;
2118 		}
2119 		created_control = 1;
2120 	}
2121 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2122 	    chk_flags, ordered, mid, control);
2123 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2124 	    TAILQ_EMPTY(&asoc->resetHead) &&
2125 	    ((ordered == 0) ||
2126 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2127 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2128 		/* Candidate for express delivery */
2129 		/*
2130 		 * Its not fragmented, No PD-API is up, Nothing in the
2131 		 * delivery queue, Its un-ordered OR ordered and the next to
2132 		 * deliver AND nothing else is stuck on the stream queue,
2133 		 * And there is room for it in the socket buffer. Lets just
2134 		 * stuff it up the buffer....
2135 		 */
2136 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2137 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2138 			asoc->highest_tsn_inside_nr_map = tsn;
2139 		}
2140 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2141 		    control, mid);
2142 
2143 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2144 		    control, &stcb->sctp_socket->so_rcv,
2145 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2146 
2147 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2148 			/* for ordered, bump what we delivered */
2149 			asoc->strmin[sid].last_mid_delivered++;
2150 		}
2151 		SCTP_STAT_INCR(sctps_recvexpress);
2152 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2153 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2154 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2155 		}
2156 		control = NULL;
2157 		goto finish_express_del;
2158 	}
2159 
2160 	/* Now will we need a chunk too? */
2161 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2162 		sctp_alloc_a_chunk(stcb, chk);
2163 		if (chk == NULL) {
2164 			/* No memory so we drop the chunk */
2165 			SCTP_STAT_INCR(sctps_nomem);
2166 			if (last_chunk == 0) {
2167 				/* we copied it, free the copy */
2168 				sctp_m_freem(dmbuf);
2169 			}
2170 			return (0);
2171 		}
2172 		chk->rec.data.tsn = tsn;
2173 		chk->no_fr_allowed = 0;
2174 		chk->rec.data.fsn = fsn;
2175 		chk->rec.data.mid = mid;
2176 		chk->rec.data.sid = sid;
2177 		chk->rec.data.ppid = ppid;
2178 		chk->rec.data.context = stcb->asoc.context;
2179 		chk->rec.data.doing_fast_retransmit = 0;
2180 		chk->rec.data.rcv_flags = chk_flags;
2181 		chk->asoc = asoc;
2182 		chk->send_size = the_len;
2183 		chk->whoTo = net;
2184 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2185 		    chk,
2186 		    control, mid);
2187 		atomic_add_int(&net->ref_count, 1);
2188 		chk->data = dmbuf;
2189 	}
2190 	/* Set the appropriate TSN mark */
2191 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2192 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2193 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2194 			asoc->highest_tsn_inside_nr_map = tsn;
2195 		}
2196 	} else {
2197 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2198 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2199 			asoc->highest_tsn_inside_map = tsn;
2200 		}
2201 	}
2202 	/* Now is it complete (i.e. not fragmented)? */
2203 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2204 		/*
2205 		 * Special check for when streams are resetting. We could be
2206 		 * more smart about this and check the actual stream to see
2207 		 * if it is not being reset.. that way we would not create a
2208 		 * HOLB when amongst streams being reset and those not being
2209 		 * reset.
2210 		 *
2211 		 */
2212 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2213 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2214 			/*
2215 			 * yep its past where we need to reset... go ahead
2216 			 * and queue it.
2217 			 */
2218 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2219 				/* first one on */
2220 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2221 			} else {
2222 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2223 				unsigned char inserted = 0;
2224 
2225 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2226 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2227 						continue;
2228 					} else {
2229 						/* found it */
2230 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2231 						inserted = 1;
2232 						break;
2233 					}
2234 				}
2235 				if (inserted == 0) {
2236 					/*
2237 					 * must be put at end, use prevP
2238 					 * (all setup from loop) to setup
2239 					 * nextP.
2240 					 */
2241 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2242 				}
2243 			}
2244 			goto finish_express_del;
2245 		}
2246 		if (chk_flags & SCTP_DATA_UNORDERED) {
2247 			/* queue directly into socket buffer */
2248 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2249 			    control, mid);
2250 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2251 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2252 			    control,
2253 			    &stcb->sctp_socket->so_rcv, 1,
2254 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2255 
2256 		} else {
2257 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2258 			    mid);
2259 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2260 			if (*abort_flag) {
2261 				if (last_chunk) {
2262 					*m = NULL;
2263 				}
2264 				return (0);
2265 			}
2266 		}
2267 		goto finish_express_del;
2268 	}
2269 	/* If we reach here its a reassembly */
2270 	need_reasm_check = 1;
2271 	SCTPDBG(SCTP_DEBUG_XXX,
2272 	    "Queue data to stream for reasm control: %p MID: %u\n",
2273 	    control, mid);
2274 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2275 	if (*abort_flag) {
2276 		/*
2277 		 * the assoc is now gone and chk was put onto the reasm
2278 		 * queue, which has all been freed.
2279 		 */
2280 		if (last_chunk) {
2281 			*m = NULL;
2282 		}
2283 		return (0);
2284 	}
2285 finish_express_del:
2286 	/* Here we tidy up things */
2287 	if (tsn == (asoc->cumulative_tsn + 1)) {
2288 		/* Update cum-ack */
2289 		asoc->cumulative_tsn = tsn;
2290 	}
2291 	if (last_chunk) {
2292 		*m = NULL;
2293 	}
2294 	if (ordered) {
2295 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2296 	} else {
2297 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2298 	}
2299 	SCTP_STAT_INCR(sctps_recvdata);
2300 	/* Set it present please */
2301 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2302 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2303 	}
2304 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2305 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2306 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2307 	}
2308 	if (need_reasm_check) {
2309 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2310 		need_reasm_check = 0;
2311 	}
2312 	/* check the special flag for stream resets */
2313 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2314 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2315 		/*
2316 		 * we have finished working through the backlogged TSN's now
2317 		 * time to reset streams. 1: call reset function. 2: free
2318 		 * pending_reply space 3: distribute any chunks in
2319 		 * pending_reply_queue.
2320 		 */
2321 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2322 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2323 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2324 		SCTP_FREE(liste, SCTP_M_STRESET);
2325 		/* sa_ignore FREED_MEMORY */
2326 		liste = TAILQ_FIRST(&asoc->resetHead);
2327 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2328 			/* All can be removed */
2329 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2330 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2331 				strm = &asoc->strmin[control->sinfo_stream];
2332 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2333 				if (*abort_flag) {
2334 					return (0);
2335 				}
2336 				if (need_reasm_check) {
2337 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2338 					need_reasm_check = 0;
2339 				}
2340 			}
2341 		} else {
2342 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2343 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2344 					break;
2345 				}
2346 				/*
2347 				 * if control->sinfo_tsn is <= liste->tsn we
2348 				 * can process it which is the NOT of
2349 				 * control->sinfo_tsn > liste->tsn
2350 				 */
2351 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2352 				strm = &asoc->strmin[control->sinfo_stream];
2353 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2354 				if (*abort_flag) {
2355 					return (0);
2356 				}
2357 				if (need_reasm_check) {
2358 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2359 					need_reasm_check = 0;
2360 				}
2361 			}
2362 		}
2363 	}
2364 	return (1);
2365 }
2366 
2367 static const int8_t sctp_map_lookup_tab[256] = {
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 4,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 5,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 4,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 6,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 4,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 5,
2380 	0, 1, 0, 2, 0, 1, 0, 3,
2381 	0, 1, 0, 2, 0, 1, 0, 4,
2382 	0, 1, 0, 2, 0, 1, 0, 3,
2383 	0, 1, 0, 2, 0, 1, 0, 7,
2384 	0, 1, 0, 2, 0, 1, 0, 3,
2385 	0, 1, 0, 2, 0, 1, 0, 4,
2386 	0, 1, 0, 2, 0, 1, 0, 3,
2387 	0, 1, 0, 2, 0, 1, 0, 5,
2388 	0, 1, 0, 2, 0, 1, 0, 3,
2389 	0, 1, 0, 2, 0, 1, 0, 4,
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 6,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 4,
2394 	0, 1, 0, 2, 0, 1, 0, 3,
2395 	0, 1, 0, 2, 0, 1, 0, 5,
2396 	0, 1, 0, 2, 0, 1, 0, 3,
2397 	0, 1, 0, 2, 0, 1, 0, 4,
2398 	0, 1, 0, 2, 0, 1, 0, 3,
2399 	0, 1, 0, 2, 0, 1, 0, 8
2400 };
2401 
2402 void
2403 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2404 {
2405 	/*
2406 	 * Now we also need to check the mapping array in a couple of ways.
2407 	 * 1) Did we move the cum-ack point?
2408 	 *
2409 	 * When you first glance at this you might think that all entries
2410 	 * that make up the position of the cum-ack would be in the
2411 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2412 	 * deliverable. Thats true with one exception, when its a fragmented
2413 	 * message we may not deliver the data until some threshold (or all
2414 	 * of it) is in place. So we must OR the nr_mapping_array and
2415 	 * mapping_array to get a true picture of the cum-ack.
2416 	 */
2417 	struct sctp_association *asoc;
2418 	int at;
2419 	uint8_t val;
2420 	int slide_from, slide_end, lgap, distance;
2421 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2422 
2423 	asoc = &stcb->asoc;
2424 
2425 	old_cumack = asoc->cumulative_tsn;
2426 	old_base = asoc->mapping_array_base_tsn;
2427 	old_highest = asoc->highest_tsn_inside_map;
2428 	/*
2429 	 * We could probably improve this a small bit by calculating the
2430 	 * offset of the current cum-ack as the starting point.
2431 	 */
2432 	at = 0;
2433 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2434 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2435 		if (val == 0xff) {
2436 			at += 8;
2437 		} else {
2438 			/* there is a 0 bit */
2439 			at += sctp_map_lookup_tab[val];
2440 			break;
2441 		}
2442 	}
2443 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2444 
2445 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2446 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2447 #ifdef INVARIANTS
2448 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2449 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2450 #else
2451 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2452 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2453 		sctp_print_mapping_array(asoc);
2454 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2455 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2456 		}
2457 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2458 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2459 #endif
2460 	}
2461 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2462 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2463 	} else {
2464 		highest_tsn = asoc->highest_tsn_inside_map;
2465 	}
2466 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2467 		/* The complete array was completed by a single FR */
2468 		/* highest becomes the cum-ack */
2469 		int clr;
2470 #ifdef INVARIANTS
2471 		unsigned int i;
2472 #endif
2473 
2474 		/* clear the array */
2475 		clr = ((at + 7) >> 3);
2476 		if (clr > asoc->mapping_array_size) {
2477 			clr = asoc->mapping_array_size;
2478 		}
2479 		memset(asoc->mapping_array, 0, clr);
2480 		memset(asoc->nr_mapping_array, 0, clr);
2481 #ifdef INVARIANTS
2482 		for (i = 0; i < asoc->mapping_array_size; i++) {
2483 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2484 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2485 				sctp_print_mapping_array(asoc);
2486 			}
2487 		}
2488 #endif
2489 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2490 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2491 	} else if (at >= 8) {
2492 		/* we can slide the mapping array down */
2493 		/* slide_from holds where we hit the first NON 0xff byte */
2494 
2495 		/*
2496 		 * now calculate the ceiling of the move using our highest
2497 		 * TSN value
2498 		 */
2499 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2500 		slide_end = (lgap >> 3);
2501 		if (slide_end < slide_from) {
2502 			sctp_print_mapping_array(asoc);
2503 #ifdef INVARIANTS
2504 			panic("impossible slide");
2505 #else
2506 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2507 			    lgap, slide_end, slide_from, at);
2508 			return;
2509 #endif
2510 		}
2511 		if (slide_end > asoc->mapping_array_size) {
2512 #ifdef INVARIANTS
2513 			panic("would overrun buffer");
2514 #else
2515 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2516 			    asoc->mapping_array_size, slide_end);
2517 			slide_end = asoc->mapping_array_size;
2518 #endif
2519 		}
2520 		distance = (slide_end - slide_from) + 1;
2521 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2522 			sctp_log_map(old_base, old_cumack, old_highest,
2523 			    SCTP_MAP_PREPARE_SLIDE);
2524 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2525 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2526 		}
2527 		if (distance + slide_from > asoc->mapping_array_size ||
2528 		    distance < 0) {
2529 			/*
2530 			 * Here we do NOT slide forward the array so that
2531 			 * hopefully when more data comes in to fill it up
2532 			 * we will be able to slide it forward. Really I
2533 			 * don't think this should happen :-0
2534 			 */
2535 
2536 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2537 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2538 				    (uint32_t)asoc->mapping_array_size,
2539 				    SCTP_MAP_SLIDE_NONE);
2540 			}
2541 		} else {
2542 			int ii;
2543 
2544 			for (ii = 0; ii < distance; ii++) {
2545 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2546 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2547 			}
2548 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2549 				asoc->mapping_array[ii] = 0;
2550 				asoc->nr_mapping_array[ii] = 0;
2551 			}
2552 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2553 				asoc->highest_tsn_inside_map += (slide_from << 3);
2554 			}
2555 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2556 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2557 			}
2558 			asoc->mapping_array_base_tsn += (slide_from << 3);
2559 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2560 				sctp_log_map(asoc->mapping_array_base_tsn,
2561 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2562 				    SCTP_MAP_SLIDE_RESULT);
2563 			}
2564 		}
2565 	}
2566 }
2567 
2568 void
2569 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2570 {
2571 	struct sctp_association *asoc;
2572 	uint32_t highest_tsn;
2573 	int is_a_gap;
2574 
2575 	sctp_slide_mapping_arrays(stcb);
2576 	asoc = &stcb->asoc;
2577 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2578 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2579 	} else {
2580 		highest_tsn = asoc->highest_tsn_inside_map;
2581 	}
2582 	/* Is there a gap now? */
2583 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2584 
2585 	/*
2586 	 * Now we need to see if we need to queue a sack or just start the
2587 	 * timer (if allowed).
2588 	 */
2589 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2590 		/*
2591 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2592 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2593 		 * SACK
2594 		 */
2595 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2596 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2597 			    stcb->sctp_ep, stcb, NULL,
2598 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2599 		}
2600 		sctp_send_shutdown(stcb,
2601 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2602 		if (is_a_gap) {
2603 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2604 		}
2605 	} else {
2606 		/*
2607 		 * CMT DAC algorithm: increase number of packets received
2608 		 * since last ack
2609 		 */
2610 		stcb->asoc.cmt_dac_pkts_rcvd++;
2611 
2612 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2613 							 * SACK */
2614 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2615 							 * longer is one */
2616 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2617 		    (is_a_gap) ||	/* is still a gap */
2618 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2619 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2620 		    ) {
2621 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 			    (stcb->asoc.send_sack == 0) &&
2624 			    (stcb->asoc.numduptsns == 0) &&
2625 			    (stcb->asoc.delayed_ack) &&
2626 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2627 				/*
2628 				 * CMT DAC algorithm: With CMT, delay acks
2629 				 * even in the face of
2630 				 *
2631 				 * reordering. Therefore, if acks that do
2632 				 * not have to be sent because of the above
2633 				 * reasons, will be delayed. That is, acks
2634 				 * that would have been sent due to gap
2635 				 * reports will be delayed with DAC. Start
2636 				 * the delayed ack timer.
2637 				 */
2638 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2639 				    stcb->sctp_ep, stcb, NULL);
2640 			} else {
2641 				/*
2642 				 * Ok we must build a SACK since the timer
2643 				 * is pending, we got our first packet OR
2644 				 * there are gaps or duplicates.
2645 				 */
2646 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2647 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2648 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2649 			}
2650 		} else {
2651 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2652 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2653 				    stcb->sctp_ep, stcb, NULL);
2654 			}
2655 		}
2656 	}
2657 }
2658 
2659 int
2660 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2661     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2662     struct sctp_nets *net, uint32_t *high_tsn)
2663 {
2664 	struct sctp_chunkhdr *ch, chunk_buf;
2665 	struct sctp_association *asoc;
2666 	int num_chunks = 0;	/* number of control chunks processed */
2667 	int stop_proc = 0;
2668 	int break_flag, last_chunk;
2669 	int abort_flag = 0, was_a_gap;
2670 	struct mbuf *m;
2671 	uint32_t highest_tsn;
2672 	uint16_t chk_length;
2673 
2674 	/* set the rwnd */
2675 	sctp_set_rwnd(stcb, &stcb->asoc);
2676 
2677 	m = *mm;
2678 	SCTP_TCB_LOCK_ASSERT(stcb);
2679 	asoc = &stcb->asoc;
2680 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2681 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2682 	} else {
2683 		highest_tsn = asoc->highest_tsn_inside_map;
2684 	}
2685 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2686 	/*
2687 	 * setup where we got the last DATA packet from for any SACK that
2688 	 * may need to go out. Don't bump the net. This is done ONLY when a
2689 	 * chunk is assigned.
2690 	 */
2691 	asoc->last_data_chunk_from = net;
2692 
2693 	/*-
2694 	 * Now before we proceed we must figure out if this is a wasted
2695 	 * cluster... i.e. it is a small packet sent in and yet the driver
2696 	 * underneath allocated a full cluster for it. If so we must copy it
2697 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2698 	 * with cluster starvation.
2699 	 */
2700 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2701 		/* we only handle mbufs that are singletons.. not chains */
2702 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2703 		if (m) {
2704 			/* ok lets see if we can copy the data up */
2705 			caddr_t *from, *to;
2706 
2707 			/* get the pointers and copy */
2708 			to = mtod(m, caddr_t *);
2709 			from = mtod((*mm), caddr_t *);
2710 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2711 			/* copy the length and free up the old */
2712 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2713 			sctp_m_freem(*mm);
2714 			/* success, back copy */
2715 			*mm = m;
2716 		} else {
2717 			/* We are in trouble in the mbuf world .. yikes */
2718 			m = *mm;
2719 		}
2720 	}
2721 	/* get pointer to the first chunk header */
2722 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2723 	    sizeof(struct sctp_chunkhdr),
2724 	    (uint8_t *)&chunk_buf);
2725 	if (ch == NULL) {
2726 		return (1);
2727 	}
2728 	/*
2729 	 * process all DATA chunks...
2730 	 */
2731 	*high_tsn = asoc->cumulative_tsn;
2732 	break_flag = 0;
2733 	asoc->data_pkts_seen++;
2734 	while (stop_proc == 0) {
2735 		/* validate chunk length */
2736 		chk_length = ntohs(ch->chunk_length);
2737 		if (length - *offset < chk_length) {
2738 			/* all done, mutulated chunk */
2739 			stop_proc = 1;
2740 			continue;
2741 		}
2742 		if ((asoc->idata_supported == 1) &&
2743 		    (ch->chunk_type == SCTP_DATA)) {
2744 			struct mbuf *op_err;
2745 			char msg[SCTP_DIAG_INFO_LEN];
2746 
2747 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2748 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2749 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2750 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2751 			return (2);
2752 		}
2753 		if ((asoc->idata_supported == 0) &&
2754 		    (ch->chunk_type == SCTP_IDATA)) {
2755 			struct mbuf *op_err;
2756 			char msg[SCTP_DIAG_INFO_LEN];
2757 
2758 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2759 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2760 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2761 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2762 			return (2);
2763 		}
2764 		if ((ch->chunk_type == SCTP_DATA) ||
2765 		    (ch->chunk_type == SCTP_IDATA)) {
2766 			uint16_t clen;
2767 
2768 			if (ch->chunk_type == SCTP_DATA) {
2769 				clen = sizeof(struct sctp_data_chunk);
2770 			} else {
2771 				clen = sizeof(struct sctp_idata_chunk);
2772 			}
2773 			if (chk_length < clen) {
2774 				/*
2775 				 * Need to send an abort since we had a
2776 				 * invalid data chunk.
2777 				 */
2778 				struct mbuf *op_err;
2779 				char msg[SCTP_DIAG_INFO_LEN];
2780 
2781 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2782 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2783 				    chk_length);
2784 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2785 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2786 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2787 				return (2);
2788 			}
2789 #ifdef SCTP_AUDITING_ENABLED
2790 			sctp_audit_log(0xB1, 0);
2791 #endif
2792 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2793 				last_chunk = 1;
2794 			} else {
2795 				last_chunk = 0;
2796 			}
2797 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2798 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2799 			    last_chunk, ch->chunk_type)) {
2800 				num_chunks++;
2801 			}
2802 			if (abort_flag)
2803 				return (2);
2804 
2805 			if (break_flag) {
2806 				/*
2807 				 * Set because of out of rwnd space and no
2808 				 * drop rep space left.
2809 				 */
2810 				stop_proc = 1;
2811 				continue;
2812 			}
2813 		} else {
2814 			/* not a data chunk in the data region */
2815 			switch (ch->chunk_type) {
2816 			case SCTP_INITIATION:
2817 			case SCTP_INITIATION_ACK:
2818 			case SCTP_SELECTIVE_ACK:
2819 			case SCTP_NR_SELECTIVE_ACK:
2820 			case SCTP_HEARTBEAT_REQUEST:
2821 			case SCTP_HEARTBEAT_ACK:
2822 			case SCTP_ABORT_ASSOCIATION:
2823 			case SCTP_SHUTDOWN:
2824 			case SCTP_SHUTDOWN_ACK:
2825 			case SCTP_OPERATION_ERROR:
2826 			case SCTP_COOKIE_ECHO:
2827 			case SCTP_COOKIE_ACK:
2828 			case SCTP_ECN_ECHO:
2829 			case SCTP_ECN_CWR:
2830 			case SCTP_SHUTDOWN_COMPLETE:
2831 			case SCTP_AUTHENTICATION:
2832 			case SCTP_ASCONF_ACK:
2833 			case SCTP_PACKET_DROPPED:
2834 			case SCTP_STREAM_RESET:
2835 			case SCTP_FORWARD_CUM_TSN:
2836 			case SCTP_ASCONF:
2837 				{
2838 					/*
2839 					 * Now, what do we do with KNOWN
2840 					 * chunks that are NOT in the right
2841 					 * place?
2842 					 *
2843 					 * For now, I do nothing but ignore
2844 					 * them. We may later want to add
2845 					 * sysctl stuff to switch out and do
2846 					 * either an ABORT() or possibly
2847 					 * process them.
2848 					 */
2849 					struct mbuf *op_err;
2850 					char msg[SCTP_DIAG_INFO_LEN];
2851 
2852 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2853 					    ch->chunk_type);
2854 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2855 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2856 					return (2);
2857 				}
2858 			default:
2859 				/*
2860 				 * Unknown chunk type: use bit rules after
2861 				 * checking length
2862 				 */
2863 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2864 					/*
2865 					 * Need to send an abort since we
2866 					 * had a invalid chunk.
2867 					 */
2868 					struct mbuf *op_err;
2869 					char msg[SCTP_DIAG_INFO_LEN];
2870 
2871 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2872 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2873 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2874 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2875 					return (2);
2876 				}
2877 				if (ch->chunk_type & 0x40) {
2878 					/* Add a error report to the queue */
2879 					struct mbuf *op_err;
2880 					struct sctp_gen_error_cause *cause;
2881 
2882 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2883 					    0, M_NOWAIT, 1, MT_DATA);
2884 					if (op_err != NULL) {
2885 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2886 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2887 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2888 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2889 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2890 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2891 							sctp_queue_op_err(stcb, op_err);
2892 						} else {
2893 							sctp_m_freem(op_err);
2894 						}
2895 					}
2896 				}
2897 				if ((ch->chunk_type & 0x80) == 0) {
2898 					/* discard the rest of this packet */
2899 					stop_proc = 1;
2900 				}	/* else skip this bad chunk and
2901 					 * continue... */
2902 				break;
2903 			}	/* switch of chunk type */
2904 		}
2905 		*offset += SCTP_SIZE32(chk_length);
2906 		if ((*offset >= length) || stop_proc) {
2907 			/* no more data left in the mbuf chain */
2908 			stop_proc = 1;
2909 			continue;
2910 		}
2911 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2912 		    sizeof(struct sctp_chunkhdr),
2913 		    (uint8_t *)&chunk_buf);
2914 		if (ch == NULL) {
2915 			*offset = length;
2916 			stop_proc = 1;
2917 			continue;
2918 		}
2919 	}
2920 	if (break_flag) {
2921 		/*
2922 		 * we need to report rwnd overrun drops.
2923 		 */
2924 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2925 	}
2926 	if (num_chunks) {
2927 		/*
2928 		 * Did we get data, if so update the time for auto-close and
2929 		 * give peer credit for being alive.
2930 		 */
2931 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2932 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2933 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2934 			    stcb->asoc.overall_error_count,
2935 			    0,
2936 			    SCTP_FROM_SCTP_INDATA,
2937 			    __LINE__);
2938 		}
2939 		stcb->asoc.overall_error_count = 0;
2940 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2941 	}
2942 	/* now service all of the reassm queue if needed */
2943 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2944 		/* Assure that we ack right away */
2945 		stcb->asoc.send_sack = 1;
2946 	}
2947 	/* Start a sack timer or QUEUE a SACK for sending */
2948 	sctp_sack_check(stcb, was_a_gap);
2949 	return (0);
2950 }
2951 
2952 static int
2953 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2954     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2955     int *num_frs,
2956     uint32_t *biggest_newly_acked_tsn,
2957     uint32_t *this_sack_lowest_newack,
2958     int *rto_ok)
2959 {
2960 	struct sctp_tmit_chunk *tp1;
2961 	unsigned int theTSN;
2962 	int j, wake_him = 0, circled = 0;
2963 
2964 	/* Recover the tp1 we last saw */
2965 	tp1 = *p_tp1;
2966 	if (tp1 == NULL) {
2967 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2968 	}
2969 	for (j = frag_strt; j <= frag_end; j++) {
2970 		theTSN = j + last_tsn;
2971 		while (tp1) {
2972 			if (tp1->rec.data.doing_fast_retransmit)
2973 				(*num_frs) += 1;
2974 
2975 			/*-
2976 			 * CMT: CUCv2 algorithm. For each TSN being
2977 			 * processed from the sent queue, track the
2978 			 * next expected pseudo-cumack, or
2979 			 * rtx_pseudo_cumack, if required. Separate
2980 			 * cumack trackers for first transmissions,
2981 			 * and retransmissions.
2982 			 */
2983 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2984 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2985 			    (tp1->snd_count == 1)) {
2986 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2987 				tp1->whoTo->find_pseudo_cumack = 0;
2988 			}
2989 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2990 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2991 			    (tp1->snd_count > 1)) {
2992 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2993 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2994 			}
2995 			if (tp1->rec.data.tsn == theTSN) {
2996 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2997 					/*-
2998 					 * must be held until
2999 					 * cum-ack passes
3000 					 */
3001 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3002 						/*-
3003 						 * If it is less than RESEND, it is
3004 						 * now no-longer in flight.
3005 						 * Higher values may already be set
3006 						 * via previous Gap Ack Blocks...
3007 						 * i.e. ACKED or RESEND.
3008 						 */
3009 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3010 						    *biggest_newly_acked_tsn)) {
3011 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3012 						}
3013 						/*-
3014 						 * CMT: SFR algo (and HTNA) - set
3015 						 * saw_newack to 1 for dest being
3016 						 * newly acked. update
3017 						 * this_sack_highest_newack if
3018 						 * appropriate.
3019 						 */
3020 						if (tp1->rec.data.chunk_was_revoked == 0)
3021 							tp1->whoTo->saw_newack = 1;
3022 
3023 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3024 						    tp1->whoTo->this_sack_highest_newack)) {
3025 							tp1->whoTo->this_sack_highest_newack =
3026 							    tp1->rec.data.tsn;
3027 						}
3028 						/*-
3029 						 * CMT DAC algo: also update
3030 						 * this_sack_lowest_newack
3031 						 */
3032 						if (*this_sack_lowest_newack == 0) {
3033 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3034 								sctp_log_sack(*this_sack_lowest_newack,
3035 								    last_tsn,
3036 								    tp1->rec.data.tsn,
3037 								    0,
3038 								    0,
3039 								    SCTP_LOG_TSN_ACKED);
3040 							}
3041 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3042 						}
3043 						/*-
3044 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3045 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3046 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3047 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3048 						 * Separate pseudo_cumack trackers for first transmissions and
3049 						 * retransmissions.
3050 						 */
3051 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3052 							if (tp1->rec.data.chunk_was_revoked == 0) {
3053 								tp1->whoTo->new_pseudo_cumack = 1;
3054 							}
3055 							tp1->whoTo->find_pseudo_cumack = 1;
3056 						}
3057 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3058 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3059 						}
3060 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3061 							if (tp1->rec.data.chunk_was_revoked == 0) {
3062 								tp1->whoTo->new_pseudo_cumack = 1;
3063 							}
3064 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3065 						}
3066 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3067 							sctp_log_sack(*biggest_newly_acked_tsn,
3068 							    last_tsn,
3069 							    tp1->rec.data.tsn,
3070 							    frag_strt,
3071 							    frag_end,
3072 							    SCTP_LOG_TSN_ACKED);
3073 						}
3074 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3075 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3076 							    tp1->whoTo->flight_size,
3077 							    tp1->book_size,
3078 							    (uint32_t)(uintptr_t)tp1->whoTo,
3079 							    tp1->rec.data.tsn);
3080 						}
3081 						sctp_flight_size_decrease(tp1);
3082 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3083 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3084 							    tp1);
3085 						}
3086 						sctp_total_flight_decrease(stcb, tp1);
3087 
3088 						tp1->whoTo->net_ack += tp1->send_size;
3089 						if (tp1->snd_count < 2) {
3090 							/*-
3091 							 * True non-retransmitted chunk
3092 							 */
3093 							tp1->whoTo->net_ack2 += tp1->send_size;
3094 
3095 							/*-
3096 							 * update RTO too ?
3097 							 */
3098 							if (tp1->do_rtt) {
3099 								if (*rto_ok &&
3100 								    sctp_calculate_rto(stcb,
3101 								    &stcb->asoc,
3102 								    tp1->whoTo,
3103 								    &tp1->sent_rcv_time,
3104 								    SCTP_RTT_FROM_DATA)) {
3105 									*rto_ok = 0;
3106 								}
3107 								if (tp1->whoTo->rto_needed == 0) {
3108 									tp1->whoTo->rto_needed = 1;
3109 								}
3110 								tp1->do_rtt = 0;
3111 							}
3112 						}
3113 					}
3114 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3115 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3116 						    stcb->asoc.this_sack_highest_gap)) {
3117 							stcb->asoc.this_sack_highest_gap =
3118 							    tp1->rec.data.tsn;
3119 						}
3120 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3121 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3122 #ifdef SCTP_AUDITING_ENABLED
3123 							sctp_audit_log(0xB2,
3124 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3125 #endif
3126 						}
3127 					}
3128 					/*-
3129 					 * All chunks NOT UNSENT fall through here and are marked
3130 					 * (leave PR-SCTP ones that are to skip alone though)
3131 					 */
3132 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3133 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3134 						tp1->sent = SCTP_DATAGRAM_MARKED;
3135 					}
3136 					if (tp1->rec.data.chunk_was_revoked) {
3137 						/* deflate the cwnd */
3138 						tp1->whoTo->cwnd -= tp1->book_size;
3139 						tp1->rec.data.chunk_was_revoked = 0;
3140 					}
3141 					/* NR Sack code here */
3142 					if (nr_sacking &&
3143 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3144 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3145 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3146 #ifdef INVARIANTS
3147 						} else {
3148 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3149 #endif
3150 						}
3151 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3152 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3153 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3154 							stcb->asoc.trigger_reset = 1;
3155 						}
3156 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3157 						if (tp1->data) {
3158 							/*
3159 							 * sa_ignore
3160 							 * NO_NULL_CHK
3161 							 */
3162 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3163 							sctp_m_freem(tp1->data);
3164 							tp1->data = NULL;
3165 						}
3166 						wake_him++;
3167 					}
3168 				}
3169 				break;
3170 			}	/* if (tp1->tsn == theTSN) */
3171 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3172 				break;
3173 			}
3174 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3175 			if ((tp1 == NULL) && (circled == 0)) {
3176 				circled++;
3177 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3178 			}
3179 		}		/* end while (tp1) */
3180 		if (tp1 == NULL) {
3181 			circled = 0;
3182 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3183 		}
3184 		/* In case the fragments were not in order we must reset */
3185 	}			/* end for (j = fragStart */
3186 	*p_tp1 = tp1;
3187 	return (wake_him);	/* Return value only used for nr-sack */
3188 }
3189 
3190 static int
3191 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3192     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3193     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3194     int num_seg, int num_nr_seg, int *rto_ok)
3195 {
3196 	struct sctp_gap_ack_block *frag, block;
3197 	struct sctp_tmit_chunk *tp1;
3198 	int i;
3199 	int num_frs = 0;
3200 	int chunk_freed;
3201 	int non_revocable;
3202 	uint16_t frag_strt, frag_end, prev_frag_end;
3203 
3204 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205 	prev_frag_end = 0;
3206 	chunk_freed = 0;
3207 
3208 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3209 		if (i == num_seg) {
3210 			prev_frag_end = 0;
3211 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3212 		}
3213 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3214 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3215 		*offset += sizeof(block);
3216 		if (frag == NULL) {
3217 			return (chunk_freed);
3218 		}
3219 		frag_strt = ntohs(frag->start);
3220 		frag_end = ntohs(frag->end);
3221 
3222 		if (frag_strt > frag_end) {
3223 			/* This gap report is malformed, skip it. */
3224 			continue;
3225 		}
3226 		if (frag_strt <= prev_frag_end) {
3227 			/* This gap report is not in order, so restart. */
3228 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3229 		}
3230 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3231 			*biggest_tsn_acked = last_tsn + frag_end;
3232 		}
3233 		if (i < num_seg) {
3234 			non_revocable = 0;
3235 		} else {
3236 			non_revocable = 1;
3237 		}
3238 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3239 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3240 		    this_sack_lowest_newack, rto_ok)) {
3241 			chunk_freed = 1;
3242 		}
3243 		prev_frag_end = frag_end;
3244 	}
3245 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3246 		if (num_frs)
3247 			sctp_log_fr(*biggest_tsn_acked,
3248 			    *biggest_newly_acked_tsn,
3249 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3250 	}
3251 	return (chunk_freed);
3252 }
3253 
3254 static void
3255 sctp_check_for_revoked(struct sctp_tcb *stcb,
3256     struct sctp_association *asoc, uint32_t cumack,
3257     uint32_t biggest_tsn_acked)
3258 {
3259 	struct sctp_tmit_chunk *tp1;
3260 
3261 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3262 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3263 			/*
3264 			 * ok this guy is either ACK or MARKED. If it is
3265 			 * ACKED it has been previously acked but not this
3266 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3267 			 * again.
3268 			 */
3269 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3270 				break;
3271 			}
3272 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3273 				/* it has been revoked */
3274 				tp1->sent = SCTP_DATAGRAM_SENT;
3275 				tp1->rec.data.chunk_was_revoked = 1;
3276 				/*
3277 				 * We must add this stuff back in to assure
3278 				 * timers and such get started.
3279 				 */
3280 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3281 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3282 					    tp1->whoTo->flight_size,
3283 					    tp1->book_size,
3284 					    (uint32_t)(uintptr_t)tp1->whoTo,
3285 					    tp1->rec.data.tsn);
3286 				}
3287 				sctp_flight_size_increase(tp1);
3288 				sctp_total_flight_increase(stcb, tp1);
3289 				/*
3290 				 * We inflate the cwnd to compensate for our
3291 				 * artificial inflation of the flight_size.
3292 				 */
3293 				tp1->whoTo->cwnd += tp1->book_size;
3294 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3295 					sctp_log_sack(asoc->last_acked_seq,
3296 					    cumack,
3297 					    tp1->rec.data.tsn,
3298 					    0,
3299 					    0,
3300 					    SCTP_LOG_TSN_REVOKED);
3301 				}
3302 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3303 				/* it has been re-acked in this SACK */
3304 				tp1->sent = SCTP_DATAGRAM_ACKED;
3305 			}
3306 		}
3307 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3308 			break;
3309 	}
3310 }
3311 
3312 static void
3313 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3314     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3315 {
3316 	struct sctp_tmit_chunk *tp1;
3317 	int strike_flag = 0;
3318 	struct timeval now;
3319 	int tot_retrans = 0;
3320 	uint32_t sending_seq;
3321 	struct sctp_nets *net;
3322 	int num_dests_sacked = 0;
3323 
3324 	/*
3325 	 * select the sending_seq, this is either the next thing ready to be
3326 	 * sent but not transmitted, OR, the next seq we assign.
3327 	 */
3328 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3329 	if (tp1 == NULL) {
3330 		sending_seq = asoc->sending_seq;
3331 	} else {
3332 		sending_seq = tp1->rec.data.tsn;
3333 	}
3334 
3335 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3336 	if ((asoc->sctp_cmt_on_off > 0) &&
3337 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3338 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3339 			if (net->saw_newack)
3340 				num_dests_sacked++;
3341 		}
3342 	}
3343 	if (stcb->asoc.prsctp_supported) {
3344 		(void)SCTP_GETTIME_TIMEVAL(&now);
3345 	}
3346 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3347 		strike_flag = 0;
3348 		if (tp1->no_fr_allowed) {
3349 			/* this one had a timeout or something */
3350 			continue;
3351 		}
3352 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3353 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3354 				sctp_log_fr(biggest_tsn_newly_acked,
3355 				    tp1->rec.data.tsn,
3356 				    tp1->sent,
3357 				    SCTP_FR_LOG_CHECK_STRIKE);
3358 		}
3359 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3360 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3361 			/* done */
3362 			break;
3363 		}
3364 		if (stcb->asoc.prsctp_supported) {
3365 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3366 				/* Is it expired? */
3367 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3368 					/* Yes so drop it */
3369 					if (tp1->data != NULL) {
3370 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3371 						    SCTP_SO_NOT_LOCKED);
3372 					}
3373 					continue;
3374 				}
3375 			}
3376 		}
3377 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3378 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3379 			/* we are beyond the tsn in the sack  */
3380 			break;
3381 		}
3382 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3383 			/* either a RESEND, ACKED, or MARKED */
3384 			/* skip */
3385 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3386 				/* Continue strikin FWD-TSN chunks */
3387 				tp1->rec.data.fwd_tsn_cnt++;
3388 			}
3389 			continue;
3390 		}
3391 		/*
3392 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3393 		 */
3394 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3395 			/*
3396 			 * No new acks were receieved for data sent to this
3397 			 * dest. Therefore, according to the SFR algo for
3398 			 * CMT, no data sent to this dest can be marked for
3399 			 * FR using this SACK.
3400 			 */
3401 			continue;
3402 		} else if (tp1->whoTo &&
3403 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3404 			    tp1->whoTo->this_sack_highest_newack) &&
3405 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3406 			/*
3407 			 * CMT: New acks were receieved for data sent to
3408 			 * this dest. But no new acks were seen for data
3409 			 * sent after tp1. Therefore, according to the SFR
3410 			 * algo for CMT, tp1 cannot be marked for FR using
3411 			 * this SACK. This step covers part of the DAC algo
3412 			 * and the HTNA algo as well.
3413 			 */
3414 			continue;
3415 		}
3416 		/*
3417 		 * Here we check to see if we were have already done a FR
3418 		 * and if so we see if the biggest TSN we saw in the sack is
3419 		 * smaller than the recovery point. If so we don't strike
3420 		 * the tsn... otherwise we CAN strike the TSN.
3421 		 */
3422 		/*
3423 		 * @@@ JRI: Check for CMT if (accum_moved &&
3424 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3425 		 * 0)) {
3426 		 */
3427 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3428 			/*
3429 			 * Strike the TSN if in fast-recovery and cum-ack
3430 			 * moved.
3431 			 */
3432 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3433 				sctp_log_fr(biggest_tsn_newly_acked,
3434 				    tp1->rec.data.tsn,
3435 				    tp1->sent,
3436 				    SCTP_FR_LOG_STRIKE_CHUNK);
3437 			}
3438 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3439 				tp1->sent++;
3440 			}
3441 			if ((asoc->sctp_cmt_on_off > 0) &&
3442 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3443 				/*
3444 				 * CMT DAC algorithm: If SACK flag is set to
3445 				 * 0, then lowest_newack test will not pass
3446 				 * because it would have been set to the
3447 				 * cumack earlier. If not already to be
3448 				 * rtx'd, If not a mixed sack and if tp1 is
3449 				 * not between two sacked TSNs, then mark by
3450 				 * one more. NOTE that we are marking by one
3451 				 * additional time since the SACK DAC flag
3452 				 * indicates that two packets have been
3453 				 * received after this missing TSN.
3454 				 */
3455 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3456 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3457 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3458 						sctp_log_fr(16 + num_dests_sacked,
3459 						    tp1->rec.data.tsn,
3460 						    tp1->sent,
3461 						    SCTP_FR_LOG_STRIKE_CHUNK);
3462 					}
3463 					tp1->sent++;
3464 				}
3465 			}
3466 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3467 		    (asoc->sctp_cmt_on_off == 0)) {
3468 			/*
3469 			 * For those that have done a FR we must take
3470 			 * special consideration if we strike. I.e the
3471 			 * biggest_newly_acked must be higher than the
3472 			 * sending_seq at the time we did the FR.
3473 			 */
3474 			if (
3475 #ifdef SCTP_FR_TO_ALTERNATE
3476 			/*
3477 			 * If FR's go to new networks, then we must only do
3478 			 * this for singly homed asoc's. However if the FR's
3479 			 * go to the same network (Armando's work) then its
3480 			 * ok to FR multiple times.
3481 			 */
3482 			    (asoc->numnets < 2)
3483 #else
3484 			    (1)
3485 #endif
3486 			    ) {
3487 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3488 				    tp1->rec.data.fast_retran_tsn)) {
3489 					/*
3490 					 * Strike the TSN, since this ack is
3491 					 * beyond where things were when we
3492 					 * did a FR.
3493 					 */
3494 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3495 						sctp_log_fr(biggest_tsn_newly_acked,
3496 						    tp1->rec.data.tsn,
3497 						    tp1->sent,
3498 						    SCTP_FR_LOG_STRIKE_CHUNK);
3499 					}
3500 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3501 						tp1->sent++;
3502 					}
3503 					strike_flag = 1;
3504 					if ((asoc->sctp_cmt_on_off > 0) &&
3505 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3506 						/*
3507 						 * CMT DAC algorithm: If
3508 						 * SACK flag is set to 0,
3509 						 * then lowest_newack test
3510 						 * will not pass because it
3511 						 * would have been set to
3512 						 * the cumack earlier. If
3513 						 * not already to be rtx'd,
3514 						 * If not a mixed sack and
3515 						 * if tp1 is not between two
3516 						 * sacked TSNs, then mark by
3517 						 * one more. NOTE that we
3518 						 * are marking by one
3519 						 * additional time since the
3520 						 * SACK DAC flag indicates
3521 						 * that two packets have
3522 						 * been received after this
3523 						 * missing TSN.
3524 						 */
3525 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3526 						    (num_dests_sacked == 1) &&
3527 						    SCTP_TSN_GT(this_sack_lowest_newack,
3528 						    tp1->rec.data.tsn)) {
3529 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3530 								sctp_log_fr(32 + num_dests_sacked,
3531 								    tp1->rec.data.tsn,
3532 								    tp1->sent,
3533 								    SCTP_FR_LOG_STRIKE_CHUNK);
3534 							}
3535 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3536 								tp1->sent++;
3537 							}
3538 						}
3539 					}
3540 				}
3541 			}
3542 			/*
3543 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3544 			 * algo covers HTNA.
3545 			 */
3546 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3547 		    biggest_tsn_newly_acked)) {
3548 			/*
3549 			 * We don't strike these: This is the  HTNA
3550 			 * algorithm i.e. we don't strike If our TSN is
3551 			 * larger than the Highest TSN Newly Acked.
3552 			 */
3553 			;
3554 		} else {
3555 			/* Strike the TSN */
3556 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3557 				sctp_log_fr(biggest_tsn_newly_acked,
3558 				    tp1->rec.data.tsn,
3559 				    tp1->sent,
3560 				    SCTP_FR_LOG_STRIKE_CHUNK);
3561 			}
3562 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3563 				tp1->sent++;
3564 			}
3565 			if ((asoc->sctp_cmt_on_off > 0) &&
3566 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3567 				/*
3568 				 * CMT DAC algorithm: If SACK flag is set to
3569 				 * 0, then lowest_newack test will not pass
3570 				 * because it would have been set to the
3571 				 * cumack earlier. If not already to be
3572 				 * rtx'd, If not a mixed sack and if tp1 is
3573 				 * not between two sacked TSNs, then mark by
3574 				 * one more. NOTE that we are marking by one
3575 				 * additional time since the SACK DAC flag
3576 				 * indicates that two packets have been
3577 				 * received after this missing TSN.
3578 				 */
3579 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3580 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3581 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3582 						sctp_log_fr(48 + num_dests_sacked,
3583 						    tp1->rec.data.tsn,
3584 						    tp1->sent,
3585 						    SCTP_FR_LOG_STRIKE_CHUNK);
3586 					}
3587 					tp1->sent++;
3588 				}
3589 			}
3590 		}
3591 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3592 			struct sctp_nets *alt;
3593 
3594 			/* fix counts and things */
3595 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3596 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3597 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3598 				    tp1->book_size,
3599 				    (uint32_t)(uintptr_t)tp1->whoTo,
3600 				    tp1->rec.data.tsn);
3601 			}
3602 			if (tp1->whoTo) {
3603 				tp1->whoTo->net_ack++;
3604 				sctp_flight_size_decrease(tp1);
3605 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3606 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3607 					    tp1);
3608 				}
3609 			}
3610 
3611 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3612 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3613 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3614 			}
3615 			/* add back to the rwnd */
3616 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3617 
3618 			/* remove from the total flight */
3619 			sctp_total_flight_decrease(stcb, tp1);
3620 
3621 			if ((stcb->asoc.prsctp_supported) &&
3622 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3623 				/*
3624 				 * Has it been retransmitted tv_sec times? -
3625 				 * we store the retran count there.
3626 				 */
3627 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3628 					/* Yes, so drop it */
3629 					if (tp1->data != NULL) {
3630 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3631 						    SCTP_SO_NOT_LOCKED);
3632 					}
3633 					/* Make sure to flag we had a FR */
3634 					if (tp1->whoTo != NULL) {
3635 						tp1->whoTo->net_ack++;
3636 					}
3637 					continue;
3638 				}
3639 			}
3640 			/*
3641 			 * SCTP_PRINTF("OK, we are now ready to FR this
3642 			 * guy\n");
3643 			 */
3644 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3645 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3646 				    0, SCTP_FR_MARKED);
3647 			}
3648 			if (strike_flag) {
3649 				/* This is a subsequent FR */
3650 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3651 			}
3652 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3653 			if (asoc->sctp_cmt_on_off > 0) {
3654 				/*
3655 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3656 				 * If CMT is being used, then pick dest with
3657 				 * largest ssthresh for any retransmission.
3658 				 */
3659 				tp1->no_fr_allowed = 1;
3660 				alt = tp1->whoTo;
3661 				/* sa_ignore NO_NULL_CHK */
3662 				if (asoc->sctp_cmt_pf > 0) {
3663 					/*
3664 					 * JRS 5/18/07 - If CMT PF is on,
3665 					 * use the PF version of
3666 					 * find_alt_net()
3667 					 */
3668 					alt = sctp_find_alternate_net(stcb, alt, 2);
3669 				} else {
3670 					/*
3671 					 * JRS 5/18/07 - If only CMT is on,
3672 					 * use the CMT version of
3673 					 * find_alt_net()
3674 					 */
3675 					/* sa_ignore NO_NULL_CHK */
3676 					alt = sctp_find_alternate_net(stcb, alt, 1);
3677 				}
3678 				if (alt == NULL) {
3679 					alt = tp1->whoTo;
3680 				}
3681 				/*
3682 				 * CUCv2: If a different dest is picked for
3683 				 * the retransmission, then new
3684 				 * (rtx-)pseudo_cumack needs to be tracked
3685 				 * for orig dest. Let CUCv2 track new (rtx-)
3686 				 * pseudo-cumack always.
3687 				 */
3688 				if (tp1->whoTo) {
3689 					tp1->whoTo->find_pseudo_cumack = 1;
3690 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3691 				}
3692 
3693 			} else {	/* CMT is OFF */
3694 
3695 #ifdef SCTP_FR_TO_ALTERNATE
3696 				/* Can we find an alternate? */
3697 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3698 #else
3699 				/*
3700 				 * default behavior is to NOT retransmit
3701 				 * FR's to an alternate. Armando Caro's
3702 				 * paper details why.
3703 				 */
3704 				alt = tp1->whoTo;
3705 #endif
3706 			}
3707 
3708 			tp1->rec.data.doing_fast_retransmit = 1;
3709 			tot_retrans++;
3710 			/* mark the sending seq for possible subsequent FR's */
3711 			/*
3712 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3713 			 * (uint32_t)tpi->rec.data.tsn);
3714 			 */
3715 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3716 				/*
3717 				 * If the queue of send is empty then its
3718 				 * the next sequence number that will be
3719 				 * assigned so we subtract one from this to
3720 				 * get the one we last sent.
3721 				 */
3722 				tp1->rec.data.fast_retran_tsn = sending_seq;
3723 			} else {
3724 				/*
3725 				 * If there are chunks on the send queue
3726 				 * (unsent data that has made it from the
3727 				 * stream queues but not out the door, we
3728 				 * take the first one (which will have the
3729 				 * lowest TSN) and subtract one to get the
3730 				 * one we last sent.
3731 				 */
3732 				struct sctp_tmit_chunk *ttt;
3733 
3734 				ttt = TAILQ_FIRST(&asoc->send_queue);
3735 				tp1->rec.data.fast_retran_tsn =
3736 				    ttt->rec.data.tsn;
3737 			}
3738 
3739 			if (tp1->do_rtt) {
3740 				/*
3741 				 * this guy had a RTO calculation pending on
3742 				 * it, cancel it
3743 				 */
3744 				if ((tp1->whoTo != NULL) &&
3745 				    (tp1->whoTo->rto_needed == 0)) {
3746 					tp1->whoTo->rto_needed = 1;
3747 				}
3748 				tp1->do_rtt = 0;
3749 			}
3750 			if (alt != tp1->whoTo) {
3751 				/* yes, there is an alternate. */
3752 				sctp_free_remote_addr(tp1->whoTo);
3753 				/* sa_ignore FREED_MEMORY */
3754 				tp1->whoTo = alt;
3755 				atomic_add_int(&alt->ref_count, 1);
3756 			}
3757 		}
3758 	}
3759 }
3760 
3761 struct sctp_tmit_chunk *
3762 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3763     struct sctp_association *asoc)
3764 {
3765 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3766 	struct timeval now;
3767 	int now_filled = 0;
3768 
3769 	if (asoc->prsctp_supported == 0) {
3770 		return (NULL);
3771 	}
3772 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3773 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3774 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3775 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3776 			/* no chance to advance, out of here */
3777 			break;
3778 		}
3779 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3780 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3781 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3782 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3783 				    asoc->advanced_peer_ack_point,
3784 				    tp1->rec.data.tsn, 0, 0);
3785 			}
3786 		}
3787 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3788 			/*
3789 			 * We can't fwd-tsn past any that are reliable aka
3790 			 * retransmitted until the asoc fails.
3791 			 */
3792 			break;
3793 		}
3794 		if (!now_filled) {
3795 			(void)SCTP_GETTIME_TIMEVAL(&now);
3796 			now_filled = 1;
3797 		}
3798 		/*
3799 		 * now we got a chunk which is marked for another
3800 		 * retransmission to a PR-stream but has run out its chances
3801 		 * already maybe OR has been marked to skip now. Can we skip
3802 		 * it if its a resend?
3803 		 */
3804 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3805 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3806 			/*
3807 			 * Now is this one marked for resend and its time is
3808 			 * now up?
3809 			 */
3810 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3811 				/* Yes so drop it */
3812 				if (tp1->data) {
3813 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3814 					    1, SCTP_SO_NOT_LOCKED);
3815 				}
3816 			} else {
3817 				/*
3818 				 * No, we are done when hit one for resend
3819 				 * whos time as not expired.
3820 				 */
3821 				break;
3822 			}
3823 		}
3824 		/*
3825 		 * Ok now if this chunk is marked to drop it we can clean up
3826 		 * the chunk, advance our peer ack point and we can check
3827 		 * the next chunk.
3828 		 */
3829 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3830 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3831 			/* advance PeerAckPoint goes forward */
3832 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3833 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3834 				a_adv = tp1;
3835 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3836 				/* No update but we do save the chk */
3837 				a_adv = tp1;
3838 			}
3839 		} else {
3840 			/*
3841 			 * If it is still in RESEND we can advance no
3842 			 * further
3843 			 */
3844 			break;
3845 		}
3846 	}
3847 	return (a_adv);
3848 }
3849 
3850 static int
3851 sctp_fs_audit(struct sctp_association *asoc)
3852 {
3853 	struct sctp_tmit_chunk *chk;
3854 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3855 	int ret;
3856 #ifndef INVARIANTS
3857 	int entry_flight, entry_cnt;
3858 #endif
3859 
3860 	ret = 0;
3861 #ifndef INVARIANTS
3862 	entry_flight = asoc->total_flight;
3863 	entry_cnt = asoc->total_flight_count;
3864 #endif
3865 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3866 		return (0);
3867 
3868 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3869 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3870 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3871 			    chk->rec.data.tsn,
3872 			    chk->send_size,
3873 			    chk->snd_count);
3874 			inflight++;
3875 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3876 			resend++;
3877 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3878 			inbetween++;
3879 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3880 			above++;
3881 		} else {
3882 			acked++;
3883 		}
3884 	}
3885 
3886 	if ((inflight > 0) || (inbetween > 0)) {
3887 #ifdef INVARIANTS
3888 		panic("Flight size-express incorrect? \n");
3889 #else
3890 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3891 		    entry_flight, entry_cnt);
3892 
3893 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3894 		    inflight, inbetween, resend, above, acked);
3895 		ret = 1;
3896 #endif
3897 	}
3898 	return (ret);
3899 }
3900 
3901 static void
3902 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3903     struct sctp_association *asoc,
3904     struct sctp_tmit_chunk *tp1)
3905 {
3906 	tp1->window_probe = 0;
3907 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3908 		/* TSN's skipped we do NOT move back. */
3909 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3910 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3911 		    tp1->book_size,
3912 		    (uint32_t)(uintptr_t)tp1->whoTo,
3913 		    tp1->rec.data.tsn);
3914 		return;
3915 	}
3916 	/* First setup this by shrinking flight */
3917 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3918 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3919 		    tp1);
3920 	}
3921 	sctp_flight_size_decrease(tp1);
3922 	sctp_total_flight_decrease(stcb, tp1);
3923 	/* Now mark for resend */
3924 	tp1->sent = SCTP_DATAGRAM_RESEND;
3925 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3926 
3927 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3928 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3929 		    tp1->whoTo->flight_size,
3930 		    tp1->book_size,
3931 		    (uint32_t)(uintptr_t)tp1->whoTo,
3932 		    tp1->rec.data.tsn);
3933 	}
3934 }
3935 
3936 void
3937 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3938     uint32_t rwnd, int *abort_now, int ecne_seen)
3939 {
3940 	struct sctp_nets *net;
3941 	struct sctp_association *asoc;
3942 	struct sctp_tmit_chunk *tp1, *tp2;
3943 	uint32_t old_rwnd;
3944 	int win_probe_recovery = 0;
3945 	int win_probe_recovered = 0;
3946 	int j, done_once = 0;
3947 	int rto_ok = 1;
3948 	uint32_t send_s;
3949 
3950 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3951 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3952 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3953 	}
3954 	SCTP_TCB_LOCK_ASSERT(stcb);
3955 #ifdef SCTP_ASOCLOG_OF_TSNS
3956 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3957 	stcb->asoc.cumack_log_at++;
3958 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3959 		stcb->asoc.cumack_log_at = 0;
3960 	}
3961 #endif
3962 	asoc = &stcb->asoc;
3963 	old_rwnd = asoc->peers_rwnd;
3964 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3965 		/* old ack */
3966 		return;
3967 	} else if (asoc->last_acked_seq == cumack) {
3968 		/* Window update sack */
3969 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3970 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3971 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3972 			/* SWS sender side engages */
3973 			asoc->peers_rwnd = 0;
3974 		}
3975 		if (asoc->peers_rwnd > old_rwnd) {
3976 			goto again;
3977 		}
3978 		return;
3979 	}
3980 
3981 	/* First setup for CC stuff */
3982 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3983 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3984 			/* Drag along the window_tsn for cwr's */
3985 			net->cwr_window_tsn = cumack;
3986 		}
3987 		net->prev_cwnd = net->cwnd;
3988 		net->net_ack = 0;
3989 		net->net_ack2 = 0;
3990 
3991 		/*
3992 		 * CMT: Reset CUC and Fast recovery algo variables before
3993 		 * SACK processing
3994 		 */
3995 		net->new_pseudo_cumack = 0;
3996 		net->will_exit_fast_recovery = 0;
3997 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3998 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3999 		}
4000 	}
4001 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4002 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4003 		    sctpchunk_listhead);
4004 		send_s = tp1->rec.data.tsn + 1;
4005 	} else {
4006 		send_s = asoc->sending_seq;
4007 	}
4008 	if (SCTP_TSN_GE(cumack, send_s)) {
4009 		struct mbuf *op_err;
4010 		char msg[SCTP_DIAG_INFO_LEN];
4011 
4012 		*abort_now = 1;
4013 		/* XXX */
4014 		SCTP_SNPRINTF(msg, sizeof(msg),
4015 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4016 		    cumack, send_s);
4017 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4018 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4019 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4020 		return;
4021 	}
4022 	asoc->this_sack_highest_gap = cumack;
4023 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4024 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4025 		    stcb->asoc.overall_error_count,
4026 		    0,
4027 		    SCTP_FROM_SCTP_INDATA,
4028 		    __LINE__);
4029 	}
4030 	stcb->asoc.overall_error_count = 0;
4031 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4032 		/* process the new consecutive TSN first */
4033 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4034 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4035 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4036 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4037 				}
4038 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4039 					/*
4040 					 * If it is less than ACKED, it is
4041 					 * now no-longer in flight. Higher
4042 					 * values may occur during marking
4043 					 */
4044 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4045 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4046 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4047 							    tp1->whoTo->flight_size,
4048 							    tp1->book_size,
4049 							    (uint32_t)(uintptr_t)tp1->whoTo,
4050 							    tp1->rec.data.tsn);
4051 						}
4052 						sctp_flight_size_decrease(tp1);
4053 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4054 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4055 							    tp1);
4056 						}
4057 						/* sa_ignore NO_NULL_CHK */
4058 						sctp_total_flight_decrease(stcb, tp1);
4059 					}
4060 					tp1->whoTo->net_ack += tp1->send_size;
4061 					if (tp1->snd_count < 2) {
4062 						/*
4063 						 * True non-retransmitted
4064 						 * chunk
4065 						 */
4066 						tp1->whoTo->net_ack2 +=
4067 						    tp1->send_size;
4068 
4069 						/* update RTO too? */
4070 						if (tp1->do_rtt) {
4071 							if (rto_ok &&
4072 							    sctp_calculate_rto(stcb,
4073 							    &stcb->asoc,
4074 							    tp1->whoTo,
4075 							    &tp1->sent_rcv_time,
4076 							    SCTP_RTT_FROM_DATA)) {
4077 								rto_ok = 0;
4078 							}
4079 							if (tp1->whoTo->rto_needed == 0) {
4080 								tp1->whoTo->rto_needed = 1;
4081 							}
4082 							tp1->do_rtt = 0;
4083 						}
4084 					}
4085 					/*
4086 					 * CMT: CUCv2 algorithm. From the
4087 					 * cumack'd TSNs, for each TSN being
4088 					 * acked for the first time, set the
4089 					 * following variables for the
4090 					 * corresp destination.
4091 					 * new_pseudo_cumack will trigger a
4092 					 * cwnd update.
4093 					 * find_(rtx_)pseudo_cumack will
4094 					 * trigger search for the next
4095 					 * expected (rtx-)pseudo-cumack.
4096 					 */
4097 					tp1->whoTo->new_pseudo_cumack = 1;
4098 					tp1->whoTo->find_pseudo_cumack = 1;
4099 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4100 
4101 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4102 						/* sa_ignore NO_NULL_CHK */
4103 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4104 					}
4105 				}
4106 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4107 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4108 				}
4109 				if (tp1->rec.data.chunk_was_revoked) {
4110 					/* deflate the cwnd */
4111 					tp1->whoTo->cwnd -= tp1->book_size;
4112 					tp1->rec.data.chunk_was_revoked = 0;
4113 				}
4114 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4115 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4116 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4117 #ifdef INVARIANTS
4118 					} else {
4119 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4120 #endif
4121 					}
4122 				}
4123 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4124 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4125 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4126 					asoc->trigger_reset = 1;
4127 				}
4128 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4129 				if (tp1->data) {
4130 					/* sa_ignore NO_NULL_CHK */
4131 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4132 					sctp_m_freem(tp1->data);
4133 					tp1->data = NULL;
4134 				}
4135 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4136 					sctp_log_sack(asoc->last_acked_seq,
4137 					    cumack,
4138 					    tp1->rec.data.tsn,
4139 					    0,
4140 					    0,
4141 					    SCTP_LOG_FREE_SENT);
4142 				}
4143 				asoc->sent_queue_cnt--;
4144 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4145 			} else {
4146 				break;
4147 			}
4148 		}
4149 	}
4150 	/* sa_ignore NO_NULL_CHK */
4151 	if (stcb->sctp_socket) {
4152 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4153 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4154 			/* sa_ignore NO_NULL_CHK */
4155 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4156 		}
4157 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4158 	} else {
4159 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4160 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4161 		}
4162 	}
4163 
4164 	/* JRS - Use the congestion control given in the CC module */
4165 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4166 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4167 			if (net->net_ack2 > 0) {
4168 				/*
4169 				 * Karn's rule applies to clearing error
4170 				 * count, this is optional.
4171 				 */
4172 				net->error_count = 0;
4173 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4174 					/* addr came good */
4175 					net->dest_state |= SCTP_ADDR_REACHABLE;
4176 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4177 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4178 				}
4179 				if (net == stcb->asoc.primary_destination) {
4180 					if (stcb->asoc.alternate) {
4181 						/*
4182 						 * release the alternate,
4183 						 * primary is good
4184 						 */
4185 						sctp_free_remote_addr(stcb->asoc.alternate);
4186 						stcb->asoc.alternate = NULL;
4187 					}
4188 				}
4189 				if (net->dest_state & SCTP_ADDR_PF) {
4190 					net->dest_state &= ~SCTP_ADDR_PF;
4191 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4192 					    stcb->sctp_ep, stcb, net,
4193 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4194 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4195 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4196 					/* Done with this net */
4197 					net->net_ack = 0;
4198 				}
4199 				/* restore any doubled timers */
4200 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4201 				if (net->RTO < stcb->asoc.minrto) {
4202 					net->RTO = stcb->asoc.minrto;
4203 				}
4204 				if (net->RTO > stcb->asoc.maxrto) {
4205 					net->RTO = stcb->asoc.maxrto;
4206 				}
4207 			}
4208 		}
4209 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4210 	}
4211 	asoc->last_acked_seq = cumack;
4212 
4213 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4214 		/* nothing left in-flight */
4215 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4216 			net->flight_size = 0;
4217 			net->partial_bytes_acked = 0;
4218 		}
4219 		asoc->total_flight = 0;
4220 		asoc->total_flight_count = 0;
4221 	}
4222 
4223 	/* RWND update */
4224 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4225 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4226 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4227 		/* SWS sender side engages */
4228 		asoc->peers_rwnd = 0;
4229 	}
4230 	if (asoc->peers_rwnd > old_rwnd) {
4231 		win_probe_recovery = 1;
4232 	}
4233 	/* Now assure a timer where data is queued at */
4234 again:
4235 	j = 0;
4236 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4237 		if (win_probe_recovery && (net->window_probe)) {
4238 			win_probe_recovered = 1;
4239 			/*
4240 			 * Find first chunk that was used with window probe
4241 			 * and clear the sent
4242 			 */
4243 			/* sa_ignore FREED_MEMORY */
4244 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4245 				if (tp1->window_probe) {
4246 					/* move back to data send queue */
4247 					sctp_window_probe_recovery(stcb, asoc, tp1);
4248 					break;
4249 				}
4250 			}
4251 		}
4252 		if (net->flight_size) {
4253 			j++;
4254 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4255 			if (net->window_probe) {
4256 				net->window_probe = 0;
4257 			}
4258 		} else {
4259 			if (net->window_probe) {
4260 				/*
4261 				 * In window probes we must assure a timer
4262 				 * is still running there
4263 				 */
4264 				net->window_probe = 0;
4265 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4266 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4267 				}
4268 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4269 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4270 				    stcb, net,
4271 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4272 			}
4273 		}
4274 	}
4275 	if ((j == 0) &&
4276 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4277 	    (asoc->sent_queue_retran_cnt == 0) &&
4278 	    (win_probe_recovered == 0) &&
4279 	    (done_once == 0)) {
4280 		/*
4281 		 * huh, this should not happen unless all packets are
4282 		 * PR-SCTP and marked to skip of course.
4283 		 */
4284 		if (sctp_fs_audit(asoc)) {
4285 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4286 				net->flight_size = 0;
4287 			}
4288 			asoc->total_flight = 0;
4289 			asoc->total_flight_count = 0;
4290 			asoc->sent_queue_retran_cnt = 0;
4291 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4292 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4293 					sctp_flight_size_increase(tp1);
4294 					sctp_total_flight_increase(stcb, tp1);
4295 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4296 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4297 				}
4298 			}
4299 		}
4300 		done_once = 1;
4301 		goto again;
4302 	}
4303 	/**********************************/
4304 	/* Now what about shutdown issues */
4305 	/**********************************/
4306 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4307 		/* nothing left on sendqueue.. consider done */
4308 		/* clean up */
4309 		if ((asoc->stream_queue_cnt == 1) &&
4310 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4311 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4312 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4313 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4314 		}
4315 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4316 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4317 		    (asoc->stream_queue_cnt == 1) &&
4318 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4319 			struct mbuf *op_err;
4320 
4321 			*abort_now = 1;
4322 			/* XXX */
4323 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4324 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4325 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4326 			return;
4327 		}
4328 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4329 		    (asoc->stream_queue_cnt == 0)) {
4330 			struct sctp_nets *netp;
4331 
4332 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4333 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4334 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4335 			}
4336 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4337 			sctp_stop_timers_for_shutdown(stcb);
4338 			if (asoc->alternate) {
4339 				netp = asoc->alternate;
4340 			} else {
4341 				netp = asoc->primary_destination;
4342 			}
4343 			sctp_send_shutdown(stcb, netp);
4344 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4345 			    stcb->sctp_ep, stcb, netp);
4346 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4347 			    stcb->sctp_ep, stcb, NULL);
4348 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4349 		    (asoc->stream_queue_cnt == 0)) {
4350 			struct sctp_nets *netp;
4351 
4352 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4353 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4354 			sctp_stop_timers_for_shutdown(stcb);
4355 			if (asoc->alternate) {
4356 				netp = asoc->alternate;
4357 			} else {
4358 				netp = asoc->primary_destination;
4359 			}
4360 			sctp_send_shutdown_ack(stcb, netp);
4361 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4362 			    stcb->sctp_ep, stcb, netp);
4363 		}
4364 	}
4365 	/*********************************************/
4366 	/* Here we perform PR-SCTP procedures        */
4367 	/* (section 4.2)                             */
4368 	/*********************************************/
4369 	/* C1. update advancedPeerAckPoint */
4370 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4371 		asoc->advanced_peer_ack_point = cumack;
4372 	}
4373 	/* PR-Sctp issues need to be addressed too */
4374 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4375 		struct sctp_tmit_chunk *lchk;
4376 		uint32_t old_adv_peer_ack_point;
4377 
4378 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4379 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4380 		/* C3. See if we need to send a Fwd-TSN */
4381 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4382 			/*
4383 			 * ISSUE with ECN, see FWD-TSN processing.
4384 			 */
4385 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4386 				send_forward_tsn(stcb, asoc);
4387 			} else if (lchk) {
4388 				/* try to FR fwd-tsn's that get lost too */
4389 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4390 					send_forward_tsn(stcb, asoc);
4391 				}
4392 			}
4393 		}
4394 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4395 			if (lchk->whoTo != NULL) {
4396 				break;
4397 			}
4398 		}
4399 		if (lchk != NULL) {
4400 			/* Assure a timer is up */
4401 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4402 			    stcb->sctp_ep, stcb, lchk->whoTo);
4403 		}
4404 	}
4405 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4406 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4407 		    rwnd,
4408 		    stcb->asoc.peers_rwnd,
4409 		    stcb->asoc.total_flight,
4410 		    stcb->asoc.total_output_queue_size);
4411 	}
4412 }
4413 
4414 void
4415 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4416     struct sctp_tcb *stcb,
4417     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4418     int *abort_now, uint8_t flags,
4419     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4420 {
4421 	struct sctp_association *asoc;
4422 	struct sctp_tmit_chunk *tp1, *tp2;
4423 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4424 	uint16_t wake_him = 0;
4425 	uint32_t send_s = 0;
4426 	long j;
4427 	int accum_moved = 0;
4428 	int will_exit_fast_recovery = 0;
4429 	uint32_t a_rwnd, old_rwnd;
4430 	int win_probe_recovery = 0;
4431 	int win_probe_recovered = 0;
4432 	struct sctp_nets *net = NULL;
4433 	int done_once;
4434 	int rto_ok = 1;
4435 	uint8_t reneged_all = 0;
4436 	uint8_t cmt_dac_flag;
4437 
4438 	/*
4439 	 * we take any chance we can to service our queues since we cannot
4440 	 * get awoken when the socket is read from :<
4441 	 */
4442 	/*
4443 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4444 	 * old sack, if so discard. 2) If there is nothing left in the send
4445 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4446 	 * too, update any rwnd change and verify no timers are running.
4447 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4448 	 * moved process these first and note that it moved. 4) Process any
4449 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4450 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4451 	 * sync up flightsizes and things, stop all timers and also check
4452 	 * for shutdown_pending state. If so then go ahead and send off the
4453 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4454 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4455 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4456 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4457 	 * if in shutdown_recv state.
4458 	 */
4459 	SCTP_TCB_LOCK_ASSERT(stcb);
4460 	/* CMT DAC algo */
4461 	this_sack_lowest_newack = 0;
4462 	SCTP_STAT_INCR(sctps_slowpath_sack);
4463 	last_tsn = cum_ack;
4464 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4465 #ifdef SCTP_ASOCLOG_OF_TSNS
4466 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4467 	stcb->asoc.cumack_log_at++;
4468 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4469 		stcb->asoc.cumack_log_at = 0;
4470 	}
4471 #endif
4472 	a_rwnd = rwnd;
4473 
4474 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4475 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4476 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4477 	}
4478 
4479 	old_rwnd = stcb->asoc.peers_rwnd;
4480 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4481 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4482 		    stcb->asoc.overall_error_count,
4483 		    0,
4484 		    SCTP_FROM_SCTP_INDATA,
4485 		    __LINE__);
4486 	}
4487 	stcb->asoc.overall_error_count = 0;
4488 	asoc = &stcb->asoc;
4489 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4490 		sctp_log_sack(asoc->last_acked_seq,
4491 		    cum_ack,
4492 		    0,
4493 		    num_seg,
4494 		    num_dup,
4495 		    SCTP_LOG_NEW_SACK);
4496 	}
4497 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4498 		uint16_t i;
4499 		uint32_t *dupdata, dblock;
4500 
4501 		for (i = 0; i < num_dup; i++) {
4502 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4503 			    sizeof(uint32_t), (uint8_t *)&dblock);
4504 			if (dupdata == NULL) {
4505 				break;
4506 			}
4507 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4508 		}
4509 	}
4510 	/* reality check */
4511 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4512 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4513 		    sctpchunk_listhead);
4514 		send_s = tp1->rec.data.tsn + 1;
4515 	} else {
4516 		tp1 = NULL;
4517 		send_s = asoc->sending_seq;
4518 	}
4519 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4520 		struct mbuf *op_err;
4521 		char msg[SCTP_DIAG_INFO_LEN];
4522 
4523 		/*
4524 		 * no way, we have not even sent this TSN out yet. Peer is
4525 		 * hopelessly messed up with us.
4526 		 */
4527 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4528 		    cum_ack, send_s);
4529 		if (tp1) {
4530 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4531 			    tp1->rec.data.tsn, (void *)tp1);
4532 		}
4533 hopeless_peer:
4534 		*abort_now = 1;
4535 		/* XXX */
4536 		SCTP_SNPRINTF(msg, sizeof(msg),
4537 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4538 		    cum_ack, send_s);
4539 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4540 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4541 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4542 		return;
4543 	}
4544 	/**********************/
4545 	/* 1) check the range */
4546 	/**********************/
4547 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4548 		/* acking something behind */
4549 		return;
4550 	}
4551 
4552 	/* update the Rwnd of the peer */
4553 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4554 	    TAILQ_EMPTY(&asoc->send_queue) &&
4555 	    (asoc->stream_queue_cnt == 0)) {
4556 		/* nothing left on send/sent and strmq */
4557 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4558 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4559 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4560 		}
4561 		asoc->peers_rwnd = a_rwnd;
4562 		if (asoc->sent_queue_retran_cnt) {
4563 			asoc->sent_queue_retran_cnt = 0;
4564 		}
4565 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4566 			/* SWS sender side engages */
4567 			asoc->peers_rwnd = 0;
4568 		}
4569 		/* stop any timers */
4570 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4571 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4572 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4573 			net->partial_bytes_acked = 0;
4574 			net->flight_size = 0;
4575 		}
4576 		asoc->total_flight = 0;
4577 		asoc->total_flight_count = 0;
4578 		return;
4579 	}
4580 	/*
4581 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4582 	 * things. The total byte count acked is tracked in netAckSz AND
4583 	 * netAck2 is used to track the total bytes acked that are un-
4584 	 * amibguious and were never retransmitted. We track these on a per
4585 	 * destination address basis.
4586 	 */
4587 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4589 			/* Drag along the window_tsn for cwr's */
4590 			net->cwr_window_tsn = cum_ack;
4591 		}
4592 		net->prev_cwnd = net->cwnd;
4593 		net->net_ack = 0;
4594 		net->net_ack2 = 0;
4595 
4596 		/*
4597 		 * CMT: Reset CUC and Fast recovery algo variables before
4598 		 * SACK processing
4599 		 */
4600 		net->new_pseudo_cumack = 0;
4601 		net->will_exit_fast_recovery = 0;
4602 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4603 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4604 		}
4605 
4606 		/*
4607 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4608 		 * to be greater than the cumack. Also reset saw_newack to 0
4609 		 * for all dests.
4610 		 */
4611 		net->saw_newack = 0;
4612 		net->this_sack_highest_newack = last_tsn;
4613 	}
4614 	/* process the new consecutive TSN first */
4615 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4616 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4617 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4618 				accum_moved = 1;
4619 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4620 					/*
4621 					 * If it is less than ACKED, it is
4622 					 * now no-longer in flight. Higher
4623 					 * values may occur during marking
4624 					 */
4625 					if ((tp1->whoTo->dest_state &
4626 					    SCTP_ADDR_UNCONFIRMED) &&
4627 					    (tp1->snd_count < 2)) {
4628 						/*
4629 						 * If there was no retran
4630 						 * and the address is
4631 						 * un-confirmed and we sent
4632 						 * there and are now
4633 						 * sacked.. its confirmed,
4634 						 * mark it so.
4635 						 */
4636 						tp1->whoTo->dest_state &=
4637 						    ~SCTP_ADDR_UNCONFIRMED;
4638 					}
4639 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4640 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4641 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4642 							    tp1->whoTo->flight_size,
4643 							    tp1->book_size,
4644 							    (uint32_t)(uintptr_t)tp1->whoTo,
4645 							    tp1->rec.data.tsn);
4646 						}
4647 						sctp_flight_size_decrease(tp1);
4648 						sctp_total_flight_decrease(stcb, tp1);
4649 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4650 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4651 							    tp1);
4652 						}
4653 					}
4654 					tp1->whoTo->net_ack += tp1->send_size;
4655 
4656 					/* CMT SFR and DAC algos */
4657 					this_sack_lowest_newack = tp1->rec.data.tsn;
4658 					tp1->whoTo->saw_newack = 1;
4659 
4660 					if (tp1->snd_count < 2) {
4661 						/*
4662 						 * True non-retransmitted
4663 						 * chunk
4664 						 */
4665 						tp1->whoTo->net_ack2 +=
4666 						    tp1->send_size;
4667 
4668 						/* update RTO too? */
4669 						if (tp1->do_rtt) {
4670 							if (rto_ok &&
4671 							    sctp_calculate_rto(stcb,
4672 							    &stcb->asoc,
4673 							    tp1->whoTo,
4674 							    &tp1->sent_rcv_time,
4675 							    SCTP_RTT_FROM_DATA)) {
4676 								rto_ok = 0;
4677 							}
4678 							if (tp1->whoTo->rto_needed == 0) {
4679 								tp1->whoTo->rto_needed = 1;
4680 							}
4681 							tp1->do_rtt = 0;
4682 						}
4683 					}
4684 					/*
4685 					 * CMT: CUCv2 algorithm. From the
4686 					 * cumack'd TSNs, for each TSN being
4687 					 * acked for the first time, set the
4688 					 * following variables for the
4689 					 * corresp destination.
4690 					 * new_pseudo_cumack will trigger a
4691 					 * cwnd update.
4692 					 * find_(rtx_)pseudo_cumack will
4693 					 * trigger search for the next
4694 					 * expected (rtx-)pseudo-cumack.
4695 					 */
4696 					tp1->whoTo->new_pseudo_cumack = 1;
4697 					tp1->whoTo->find_pseudo_cumack = 1;
4698 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4699 
4700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4701 						sctp_log_sack(asoc->last_acked_seq,
4702 						    cum_ack,
4703 						    tp1->rec.data.tsn,
4704 						    0,
4705 						    0,
4706 						    SCTP_LOG_TSN_ACKED);
4707 					}
4708 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4709 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4710 					}
4711 				}
4712 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4713 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4714 #ifdef SCTP_AUDITING_ENABLED
4715 					sctp_audit_log(0xB3,
4716 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4717 #endif
4718 				}
4719 				if (tp1->rec.data.chunk_was_revoked) {
4720 					/* deflate the cwnd */
4721 					tp1->whoTo->cwnd -= tp1->book_size;
4722 					tp1->rec.data.chunk_was_revoked = 0;
4723 				}
4724 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4725 					tp1->sent = SCTP_DATAGRAM_ACKED;
4726 				}
4727 			}
4728 		} else {
4729 			break;
4730 		}
4731 	}
4732 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4733 	/* always set this up to cum-ack */
4734 	asoc->this_sack_highest_gap = last_tsn;
4735 
4736 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4737 		/*
4738 		 * thisSackHighestGap will increase while handling NEW
4739 		 * segments this_sack_highest_newack will increase while
4740 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4741 		 * used for CMT DAC algo. saw_newack will also change.
4742 		 */
4743 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4744 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4745 		    num_seg, num_nr_seg, &rto_ok)) {
4746 			wake_him++;
4747 		}
4748 		/*
4749 		 * validate the biggest_tsn_acked in the gap acks if strict
4750 		 * adherence is wanted.
4751 		 */
4752 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4753 			/*
4754 			 * peer is either confused or we are under attack.
4755 			 * We must abort.
4756 			 */
4757 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4758 			    biggest_tsn_acked, send_s);
4759 			goto hopeless_peer;
4760 		}
4761 	}
4762 	/*******************************************/
4763 	/* cancel ALL T3-send timer if accum moved */
4764 	/*******************************************/
4765 	if (asoc->sctp_cmt_on_off > 0) {
4766 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4767 			if (net->new_pseudo_cumack)
4768 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4769 				    stcb, net,
4770 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4771 		}
4772 	} else {
4773 		if (accum_moved) {
4774 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4775 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4776 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4777 			}
4778 		}
4779 	}
4780 	/********************************************/
4781 	/* drop the acked chunks from the sentqueue */
4782 	/********************************************/
4783 	asoc->last_acked_seq = cum_ack;
4784 
4785 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4786 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4787 			break;
4788 		}
4789 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4790 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4791 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4792 #ifdef INVARIANTS
4793 			} else {
4794 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4795 #endif
4796 			}
4797 		}
4798 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4799 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4800 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4801 			asoc->trigger_reset = 1;
4802 		}
4803 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4804 		if (PR_SCTP_ENABLED(tp1->flags)) {
4805 			if (asoc->pr_sctp_cnt != 0)
4806 				asoc->pr_sctp_cnt--;
4807 		}
4808 		asoc->sent_queue_cnt--;
4809 		if (tp1->data) {
4810 			/* sa_ignore NO_NULL_CHK */
4811 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4812 			sctp_m_freem(tp1->data);
4813 			tp1->data = NULL;
4814 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4815 				asoc->sent_queue_cnt_removeable--;
4816 			}
4817 		}
4818 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4819 			sctp_log_sack(asoc->last_acked_seq,
4820 			    cum_ack,
4821 			    tp1->rec.data.tsn,
4822 			    0,
4823 			    0,
4824 			    SCTP_LOG_FREE_SENT);
4825 		}
4826 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4827 		wake_him++;
4828 	}
4829 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4830 #ifdef INVARIANTS
4831 		panic("Warning flight size is positive and should be 0");
4832 #else
4833 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4834 		    asoc->total_flight);
4835 #endif
4836 		asoc->total_flight = 0;
4837 	}
4838 
4839 	/* sa_ignore NO_NULL_CHK */
4840 	if ((wake_him) && (stcb->sctp_socket)) {
4841 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4842 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4843 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4844 		}
4845 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4846 	} else {
4847 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4848 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4849 		}
4850 	}
4851 
4852 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4853 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4854 			/* Setup so we will exit RFC2582 fast recovery */
4855 			will_exit_fast_recovery = 1;
4856 		}
4857 	}
4858 	/*
4859 	 * Check for revoked fragments:
4860 	 *
4861 	 * if Previous sack - Had no frags then we can't have any revoked if
4862 	 * Previous sack - Had frag's then - If we now have frags aka
4863 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4864 	 * some of them. else - The peer revoked all ACKED fragments, since
4865 	 * we had some before and now we have NONE.
4866 	 */
4867 
4868 	if (num_seg) {
4869 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4870 		asoc->saw_sack_with_frags = 1;
4871 	} else if (asoc->saw_sack_with_frags) {
4872 		int cnt_revoked = 0;
4873 
4874 		/* Peer revoked all dg's marked or acked */
4875 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4876 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4877 				tp1->sent = SCTP_DATAGRAM_SENT;
4878 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4879 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4880 					    tp1->whoTo->flight_size,
4881 					    tp1->book_size,
4882 					    (uint32_t)(uintptr_t)tp1->whoTo,
4883 					    tp1->rec.data.tsn);
4884 				}
4885 				sctp_flight_size_increase(tp1);
4886 				sctp_total_flight_increase(stcb, tp1);
4887 				tp1->rec.data.chunk_was_revoked = 1;
4888 				/*
4889 				 * To ensure that this increase in
4890 				 * flightsize, which is artificial, does not
4891 				 * throttle the sender, we also increase the
4892 				 * cwnd artificially.
4893 				 */
4894 				tp1->whoTo->cwnd += tp1->book_size;
4895 				cnt_revoked++;
4896 			}
4897 		}
4898 		if (cnt_revoked) {
4899 			reneged_all = 1;
4900 		}
4901 		asoc->saw_sack_with_frags = 0;
4902 	}
4903 	if (num_nr_seg > 0)
4904 		asoc->saw_sack_with_nr_frags = 1;
4905 	else
4906 		asoc->saw_sack_with_nr_frags = 0;
4907 
4908 	/* JRS - Use the congestion control given in the CC module */
4909 	if (ecne_seen == 0) {
4910 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4911 			if (net->net_ack2 > 0) {
4912 				/*
4913 				 * Karn's rule applies to clearing error
4914 				 * count, this is optional.
4915 				 */
4916 				net->error_count = 0;
4917 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4918 					/* addr came good */
4919 					net->dest_state |= SCTP_ADDR_REACHABLE;
4920 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4921 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4922 				}
4923 
4924 				if (net == stcb->asoc.primary_destination) {
4925 					if (stcb->asoc.alternate) {
4926 						/*
4927 						 * release the alternate,
4928 						 * primary is good
4929 						 */
4930 						sctp_free_remote_addr(stcb->asoc.alternate);
4931 						stcb->asoc.alternate = NULL;
4932 					}
4933 				}
4934 
4935 				if (net->dest_state & SCTP_ADDR_PF) {
4936 					net->dest_state &= ~SCTP_ADDR_PF;
4937 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4938 					    stcb->sctp_ep, stcb, net,
4939 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4940 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4941 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4942 					/* Done with this net */
4943 					net->net_ack = 0;
4944 				}
4945 				/* restore any doubled timers */
4946 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4947 				if (net->RTO < stcb->asoc.minrto) {
4948 					net->RTO = stcb->asoc.minrto;
4949 				}
4950 				if (net->RTO > stcb->asoc.maxrto) {
4951 					net->RTO = stcb->asoc.maxrto;
4952 				}
4953 			}
4954 		}
4955 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4956 	}
4957 
4958 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4959 		/* nothing left in-flight */
4960 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4961 			/* stop all timers */
4962 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4963 			    stcb, net,
4964 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4965 			net->flight_size = 0;
4966 			net->partial_bytes_acked = 0;
4967 		}
4968 		asoc->total_flight = 0;
4969 		asoc->total_flight_count = 0;
4970 	}
4971 
4972 	/**********************************/
4973 	/* Now what about shutdown issues */
4974 	/**********************************/
4975 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4976 		/* nothing left on sendqueue.. consider done */
4977 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4978 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4979 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4980 		}
4981 		asoc->peers_rwnd = a_rwnd;
4982 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4983 			/* SWS sender side engages */
4984 			asoc->peers_rwnd = 0;
4985 		}
4986 		/* clean up */
4987 		if ((asoc->stream_queue_cnt == 1) &&
4988 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4989 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4990 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4991 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4992 		}
4993 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4994 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4995 		    (asoc->stream_queue_cnt == 1) &&
4996 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4997 			struct mbuf *op_err;
4998 
4999 			*abort_now = 1;
5000 			/* XXX */
5001 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5002 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5003 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5004 			return;
5005 		}
5006 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5007 		    (asoc->stream_queue_cnt == 0)) {
5008 			struct sctp_nets *netp;
5009 
5010 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5011 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5012 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5013 			}
5014 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5015 			sctp_stop_timers_for_shutdown(stcb);
5016 			if (asoc->alternate) {
5017 				netp = asoc->alternate;
5018 			} else {
5019 				netp = asoc->primary_destination;
5020 			}
5021 			sctp_send_shutdown(stcb, netp);
5022 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5023 			    stcb->sctp_ep, stcb, netp);
5024 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5025 			    stcb->sctp_ep, stcb, NULL);
5026 			return;
5027 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5028 		    (asoc->stream_queue_cnt == 0)) {
5029 			struct sctp_nets *netp;
5030 
5031 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5032 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5033 			sctp_stop_timers_for_shutdown(stcb);
5034 			if (asoc->alternate) {
5035 				netp = asoc->alternate;
5036 			} else {
5037 				netp = asoc->primary_destination;
5038 			}
5039 			sctp_send_shutdown_ack(stcb, netp);
5040 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5041 			    stcb->sctp_ep, stcb, netp);
5042 			return;
5043 		}
5044 	}
5045 	/*
5046 	 * Now here we are going to recycle net_ack for a different use...
5047 	 * HEADS UP.
5048 	 */
5049 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5050 		net->net_ack = 0;
5051 	}
5052 
5053 	/*
5054 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5055 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5056 	 * automatically ensure that.
5057 	 */
5058 	if ((asoc->sctp_cmt_on_off > 0) &&
5059 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5060 	    (cmt_dac_flag == 0)) {
5061 		this_sack_lowest_newack = cum_ack;
5062 	}
5063 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5064 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5065 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5066 	}
5067 	/* JRS - Use the congestion control given in the CC module */
5068 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5069 
5070 	/* Now are we exiting loss recovery ? */
5071 	if (will_exit_fast_recovery) {
5072 		/* Ok, we must exit fast recovery */
5073 		asoc->fast_retran_loss_recovery = 0;
5074 	}
5075 	if ((asoc->sat_t3_loss_recovery) &&
5076 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5077 		/* end satellite t3 loss recovery */
5078 		asoc->sat_t3_loss_recovery = 0;
5079 	}
5080 	/*
5081 	 * CMT Fast recovery
5082 	 */
5083 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5084 		if (net->will_exit_fast_recovery) {
5085 			/* Ok, we must exit fast recovery */
5086 			net->fast_retran_loss_recovery = 0;
5087 		}
5088 	}
5089 
5090 	/* Adjust and set the new rwnd value */
5091 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5092 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5093 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5094 	}
5095 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5096 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5097 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5098 		/* SWS sender side engages */
5099 		asoc->peers_rwnd = 0;
5100 	}
5101 	if (asoc->peers_rwnd > old_rwnd) {
5102 		win_probe_recovery = 1;
5103 	}
5104 
5105 	/*
5106 	 * Now we must setup so we have a timer up for anyone with
5107 	 * outstanding data.
5108 	 */
5109 	done_once = 0;
5110 again:
5111 	j = 0;
5112 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5113 		if (win_probe_recovery && (net->window_probe)) {
5114 			win_probe_recovered = 1;
5115 			/*-
5116 			 * Find first chunk that was used with
5117 			 * window probe and clear the event. Put
5118 			 * it back into the send queue as if has
5119 			 * not been sent.
5120 			 */
5121 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5122 				if (tp1->window_probe) {
5123 					sctp_window_probe_recovery(stcb, asoc, tp1);
5124 					break;
5125 				}
5126 			}
5127 		}
5128 		if (net->flight_size) {
5129 			j++;
5130 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5131 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5132 				    stcb->sctp_ep, stcb, net);
5133 			}
5134 			if (net->window_probe) {
5135 				net->window_probe = 0;
5136 			}
5137 		} else {
5138 			if (net->window_probe) {
5139 				/*
5140 				 * In window probes we must assure a timer
5141 				 * is still running there
5142 				 */
5143 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5144 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5145 					    stcb->sctp_ep, stcb, net);
5146 				}
5147 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5148 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5149 				    stcb, net,
5150 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5151 			}
5152 		}
5153 	}
5154 	if ((j == 0) &&
5155 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5156 	    (asoc->sent_queue_retran_cnt == 0) &&
5157 	    (win_probe_recovered == 0) &&
5158 	    (done_once == 0)) {
5159 		/*
5160 		 * huh, this should not happen unless all packets are
5161 		 * PR-SCTP and marked to skip of course.
5162 		 */
5163 		if (sctp_fs_audit(asoc)) {
5164 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5165 				net->flight_size = 0;
5166 			}
5167 			asoc->total_flight = 0;
5168 			asoc->total_flight_count = 0;
5169 			asoc->sent_queue_retran_cnt = 0;
5170 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5171 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5172 					sctp_flight_size_increase(tp1);
5173 					sctp_total_flight_increase(stcb, tp1);
5174 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5175 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5176 				}
5177 			}
5178 		}
5179 		done_once = 1;
5180 		goto again;
5181 	}
5182 	/*********************************************/
5183 	/* Here we perform PR-SCTP procedures        */
5184 	/* (section 4.2)                             */
5185 	/*********************************************/
5186 	/* C1. update advancedPeerAckPoint */
5187 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5188 		asoc->advanced_peer_ack_point = cum_ack;
5189 	}
5190 	/* C2. try to further move advancedPeerAckPoint ahead */
5191 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5192 		struct sctp_tmit_chunk *lchk;
5193 		uint32_t old_adv_peer_ack_point;
5194 
5195 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5196 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5197 		/* C3. See if we need to send a Fwd-TSN */
5198 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5199 			/*
5200 			 * ISSUE with ECN, see FWD-TSN processing.
5201 			 */
5202 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5203 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5204 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5205 				    old_adv_peer_ack_point);
5206 			}
5207 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5208 				send_forward_tsn(stcb, asoc);
5209 			} else if (lchk) {
5210 				/* try to FR fwd-tsn's that get lost too */
5211 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5212 					send_forward_tsn(stcb, asoc);
5213 				}
5214 			}
5215 		}
5216 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5217 			if (lchk->whoTo != NULL) {
5218 				break;
5219 			}
5220 		}
5221 		if (lchk != NULL) {
5222 			/* Assure a timer is up */
5223 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5224 			    stcb->sctp_ep, stcb, lchk->whoTo);
5225 		}
5226 	}
5227 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5228 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5229 		    a_rwnd,
5230 		    stcb->asoc.peers_rwnd,
5231 		    stcb->asoc.total_flight,
5232 		    stcb->asoc.total_output_queue_size);
5233 	}
5234 }
5235 
5236 void
5237 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5238 {
5239 	/* Copy cum-ack */
5240 	uint32_t cum_ack, a_rwnd;
5241 
5242 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5243 	/* Arrange so a_rwnd does NOT change */
5244 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5245 
5246 	/* Now call the express sack handling */
5247 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5248 }
5249 
5250 static void
5251 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5252     struct sctp_stream_in *strmin)
5253 {
5254 	struct sctp_queued_to_read *control, *ncontrol;
5255 	struct sctp_association *asoc;
5256 	uint32_t mid;
5257 	int need_reasm_check = 0;
5258 
5259 	asoc = &stcb->asoc;
5260 	mid = strmin->last_mid_delivered;
5261 	/*
5262 	 * First deliver anything prior to and including the stream no that
5263 	 * came in.
5264 	 */
5265 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5266 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5267 			/* this is deliverable now */
5268 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5269 				if (control->on_strm_q) {
5270 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5271 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5272 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5273 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5274 #ifdef INVARIANTS
5275 					} else {
5276 						panic("strmin: %p ctl: %p unknown %d",
5277 						    strmin, control, control->on_strm_q);
5278 #endif
5279 					}
5280 					control->on_strm_q = 0;
5281 				}
5282 				/* subtract pending on streams */
5283 				if (asoc->size_on_all_streams >= control->length) {
5284 					asoc->size_on_all_streams -= control->length;
5285 				} else {
5286 #ifdef INVARIANTS
5287 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5288 #else
5289 					asoc->size_on_all_streams = 0;
5290 #endif
5291 				}
5292 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5293 				/* deliver it to at least the delivery-q */
5294 				if (stcb->sctp_socket) {
5295 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5296 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5297 					    control,
5298 					    &stcb->sctp_socket->so_rcv,
5299 					    1, SCTP_READ_LOCK_HELD,
5300 					    SCTP_SO_NOT_LOCKED);
5301 				}
5302 			} else {
5303 				/* Its a fragmented message */
5304 				if (control->first_frag_seen) {
5305 					/*
5306 					 * Make it so this is next to
5307 					 * deliver, we restore later
5308 					 */
5309 					strmin->last_mid_delivered = control->mid - 1;
5310 					need_reasm_check = 1;
5311 					break;
5312 				}
5313 			}
5314 		} else {
5315 			/* no more delivery now. */
5316 			break;
5317 		}
5318 	}
5319 	if (need_reasm_check) {
5320 		int ret;
5321 
5322 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5323 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5324 			/* Restore the next to deliver unless we are ahead */
5325 			strmin->last_mid_delivered = mid;
5326 		}
5327 		if (ret == 0) {
5328 			/* Left the front Partial one on */
5329 			return;
5330 		}
5331 		need_reasm_check = 0;
5332 	}
5333 	/*
5334 	 * now we must deliver things in queue the normal way  if any are
5335 	 * now ready.
5336 	 */
5337 	mid = strmin->last_mid_delivered + 1;
5338 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5339 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5340 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5341 				/* this is deliverable now */
5342 				if (control->on_strm_q) {
5343 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5344 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5345 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5346 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5347 #ifdef INVARIANTS
5348 					} else {
5349 						panic("strmin: %p ctl: %p unknown %d",
5350 						    strmin, control, control->on_strm_q);
5351 #endif
5352 					}
5353 					control->on_strm_q = 0;
5354 				}
5355 				/* subtract pending on streams */
5356 				if (asoc->size_on_all_streams >= control->length) {
5357 					asoc->size_on_all_streams -= control->length;
5358 				} else {
5359 #ifdef INVARIANTS
5360 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5361 #else
5362 					asoc->size_on_all_streams = 0;
5363 #endif
5364 				}
5365 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5366 				/* deliver it to at least the delivery-q */
5367 				strmin->last_mid_delivered = control->mid;
5368 				if (stcb->sctp_socket) {
5369 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5370 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5371 					    control,
5372 					    &stcb->sctp_socket->so_rcv, 1,
5373 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5374 				}
5375 				mid = strmin->last_mid_delivered + 1;
5376 			} else {
5377 				/* Its a fragmented message */
5378 				if (control->first_frag_seen) {
5379 					/*
5380 					 * Make it so this is next to
5381 					 * deliver
5382 					 */
5383 					strmin->last_mid_delivered = control->mid - 1;
5384 					need_reasm_check = 1;
5385 					break;
5386 				}
5387 			}
5388 		} else {
5389 			break;
5390 		}
5391 	}
5392 	if (need_reasm_check) {
5393 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5394 	}
5395 }
5396 
5397 static void
5398 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5399     struct sctp_association *asoc, struct sctp_stream_in *strm,
5400     struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5401 {
5402 	struct sctp_tmit_chunk *chk, *nchk;
5403 	int cnt_removed = 0;
5404 
5405 	/*
5406 	 * For now large messages held on the stream reasm that are complete
5407 	 * will be tossed too. We could in theory do more work to spin
5408 	 * through and stop after dumping one msg aka seeing the start of a
5409 	 * new msg at the head, and call the delivery function... to see if
5410 	 * it can be delivered... But for now we just dump everything on the
5411 	 * queue.
5412 	 */
5413 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5414 		return;
5415 	}
5416 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5417 		/* Purge hanging chunks */
5418 		if (!asoc->idata_supported && (ordered == 0)) {
5419 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5420 				break;
5421 			}
5422 		}
5423 		cnt_removed++;
5424 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5425 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5426 			asoc->size_on_reasm_queue -= chk->send_size;
5427 		} else {
5428 #ifdef INVARIANTS
5429 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5430 #else
5431 			asoc->size_on_reasm_queue = 0;
5432 #endif
5433 		}
5434 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5435 		if (chk->data) {
5436 			sctp_m_freem(chk->data);
5437 			chk->data = NULL;
5438 		}
5439 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5440 	}
5441 	if (!TAILQ_EMPTY(&control->reasm)) {
5442 		/* This has to be old data, unordered */
5443 		if (control->data) {
5444 			sctp_m_freem(control->data);
5445 			control->data = NULL;
5446 		}
5447 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5448 		chk = TAILQ_FIRST(&control->reasm);
5449 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5450 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5451 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5452 			    chk, SCTP_READ_LOCK_HELD);
5453 		}
5454 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5455 		return;
5456 	}
5457 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5458 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5459 		if (asoc->size_on_all_streams >= control->length) {
5460 			asoc->size_on_all_streams -= control->length;
5461 		} else {
5462 #ifdef INVARIANTS
5463 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5464 #else
5465 			asoc->size_on_all_streams = 0;
5466 #endif
5467 		}
5468 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5469 		control->on_strm_q = 0;
5470 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5471 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5472 		control->on_strm_q = 0;
5473 #ifdef INVARIANTS
5474 	} else if (control->on_strm_q) {
5475 		panic("strm: %p ctl: %p unknown %d",
5476 		    strm, control, control->on_strm_q);
5477 #endif
5478 	}
5479 	control->on_strm_q = 0;
5480 	if (control->on_read_q == 0) {
5481 		sctp_free_remote_addr(control->whoFrom);
5482 		if (control->data) {
5483 			sctp_m_freem(control->data);
5484 			control->data = NULL;
5485 		}
5486 		sctp_free_a_readq(stcb, control);
5487 	}
5488 }
5489 
5490 void
5491 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5492     struct sctp_forward_tsn_chunk *fwd,
5493     int *abort_flag, struct mbuf *m, int offset)
5494 {
5495 	/* The pr-sctp fwd tsn */
5496 	/*
5497 	 * here we will perform all the data receiver side steps for
5498 	 * processing FwdTSN, as required in by pr-sctp draft:
5499 	 *
5500 	 * Assume we get FwdTSN(x):
5501 	 *
5502 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5503 	 * + others we have 3) examine and update re-ordering queue on
5504 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5505 	 * report where we are.
5506 	 */
5507 	struct sctp_association *asoc;
5508 	uint32_t new_cum_tsn, gap;
5509 	unsigned int i, fwd_sz, m_size;
5510 	uint32_t str_seq;
5511 	struct sctp_stream_in *strm;
5512 	struct sctp_queued_to_read *control, *sv;
5513 
5514 	asoc = &stcb->asoc;
5515 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5516 		SCTPDBG(SCTP_DEBUG_INDATA1,
5517 		    "Bad size too small/big fwd-tsn\n");
5518 		return;
5519 	}
5520 	m_size = (stcb->asoc.mapping_array_size << 3);
5521 	/*************************************************************/
5522 	/* 1. Here we update local cumTSN and shift the bitmap array */
5523 	/*************************************************************/
5524 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5525 
5526 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5527 		/* Already got there ... */
5528 		return;
5529 	}
5530 	/*
5531 	 * now we know the new TSN is more advanced, let's find the actual
5532 	 * gap
5533 	 */
5534 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5535 	asoc->cumulative_tsn = new_cum_tsn;
5536 	if (gap >= m_size) {
5537 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5538 			struct mbuf *op_err;
5539 			char msg[SCTP_DIAG_INFO_LEN];
5540 
5541 			/*
5542 			 * out of range (of single byte chunks in the rwnd I
5543 			 * give out). This must be an attacker.
5544 			 */
5545 			*abort_flag = 1;
5546 			SCTP_SNPRINTF(msg, sizeof(msg),
5547 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5548 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5549 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5550 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5551 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5552 			return;
5553 		}
5554 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5555 
5556 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5557 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5558 		asoc->highest_tsn_inside_map = new_cum_tsn;
5559 
5560 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5561 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5562 
5563 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5564 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5565 		}
5566 	} else {
5567 		SCTP_TCB_LOCK_ASSERT(stcb);
5568 		for (i = 0; i <= gap; i++) {
5569 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5570 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5571 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5572 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5573 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5574 				}
5575 			}
5576 		}
5577 	}
5578 	/*************************************************************/
5579 	/* 2. Clear up re-assembly queue                             */
5580 	/*************************************************************/
5581 
5582 	/* This is now done as part of clearing up the stream/seq */
5583 	if (asoc->idata_supported == 0) {
5584 		uint16_t sid;
5585 
5586 		/* Flush all the un-ordered data based on cum-tsn */
5587 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5588 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5589 			strm = &asoc->strmin[sid];
5590 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5591 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5592 			}
5593 		}
5594 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5595 	}
5596 	/*******************************************************/
5597 	/* 3. Update the PR-stream re-ordering queues and fix  */
5598 	/* delivery issues as needed.                       */
5599 	/*******************************************************/
5600 	fwd_sz -= sizeof(*fwd);
5601 	if (m && fwd_sz) {
5602 		/* New method. */
5603 		unsigned int num_str;
5604 		uint32_t mid;
5605 		uint16_t sid;
5606 		uint16_t ordered, flags;
5607 		struct sctp_strseq *stseq, strseqbuf;
5608 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5609 
5610 		offset += sizeof(*fwd);
5611 
5612 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5613 		if (asoc->idata_supported) {
5614 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5615 		} else {
5616 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5617 		}
5618 		for (i = 0; i < num_str; i++) {
5619 			if (asoc->idata_supported) {
5620 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5621 				    sizeof(struct sctp_strseq_mid),
5622 				    (uint8_t *)&strseqbuf_m);
5623 				offset += sizeof(struct sctp_strseq_mid);
5624 				if (stseq_m == NULL) {
5625 					break;
5626 				}
5627 				sid = ntohs(stseq_m->sid);
5628 				mid = ntohl(stseq_m->mid);
5629 				flags = ntohs(stseq_m->flags);
5630 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5631 					ordered = 0;
5632 				} else {
5633 					ordered = 1;
5634 				}
5635 			} else {
5636 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5637 				    sizeof(struct sctp_strseq),
5638 				    (uint8_t *)&strseqbuf);
5639 				offset += sizeof(struct sctp_strseq);
5640 				if (stseq == NULL) {
5641 					break;
5642 				}
5643 				sid = ntohs(stseq->sid);
5644 				mid = (uint32_t)ntohs(stseq->ssn);
5645 				ordered = 1;
5646 			}
5647 			/* Convert */
5648 
5649 			/* now process */
5650 
5651 			/*
5652 			 * Ok we now look for the stream/seq on the read
5653 			 * queue where its not all delivered. If we find it
5654 			 * we transmute the read entry into a PDI_ABORTED.
5655 			 */
5656 			if (sid >= asoc->streamincnt) {
5657 				/* screwed up streams, stop!  */
5658 				break;
5659 			}
5660 			if ((asoc->str_of_pdapi == sid) &&
5661 			    (asoc->ssn_of_pdapi == mid)) {
5662 				/*
5663 				 * If this is the one we were partially
5664 				 * delivering now then we no longer are.
5665 				 * Note this will change with the reassembly
5666 				 * re-write.
5667 				 */
5668 				asoc->fragmented_delivery_inprogress = 0;
5669 			}
5670 			strm = &asoc->strmin[sid];
5671 			if (ordered) {
5672 				TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5673 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5674 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5675 					}
5676 				}
5677 			} else {
5678 				if (asoc->idata_supported) {
5679 					TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5680 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5681 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5682 						}
5683 					}
5684 				} else {
5685 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5686 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5687 					}
5688 				}
5689 			}
5690 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5691 				if ((control->sinfo_stream == sid) &&
5692 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5693 					str_seq = (sid << 16) | (0x0000ffff & mid);
5694 					control->pdapi_aborted = 1;
5695 					sv = stcb->asoc.control_pdapi;
5696 					control->end_added = 1;
5697 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5698 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5699 						if (asoc->size_on_all_streams >= control->length) {
5700 							asoc->size_on_all_streams -= control->length;
5701 						} else {
5702 #ifdef INVARIANTS
5703 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5704 #else
5705 							asoc->size_on_all_streams = 0;
5706 #endif
5707 						}
5708 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5709 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5710 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5711 #ifdef INVARIANTS
5712 					} else if (control->on_strm_q) {
5713 						panic("strm: %p ctl: %p unknown %d",
5714 						    strm, control, control->on_strm_q);
5715 #endif
5716 					}
5717 					control->on_strm_q = 0;
5718 					stcb->asoc.control_pdapi = control;
5719 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5720 					    stcb,
5721 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5722 					    (void *)&str_seq,
5723 					    SCTP_SO_NOT_LOCKED);
5724 					stcb->asoc.control_pdapi = sv;
5725 					break;
5726 				} else if ((control->sinfo_stream == sid) &&
5727 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5728 					/* We are past our victim SSN */
5729 					break;
5730 				}
5731 			}
5732 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5733 				/* Update the sequence number */
5734 				strm->last_mid_delivered = mid;
5735 			}
5736 			/* now kick the stream the new way */
5737 			/* sa_ignore NO_NULL_CHK */
5738 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5739 		}
5740 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5741 	}
5742 	/*
5743 	 * Now slide thing forward.
5744 	 */
5745 	sctp_slide_mapping_arrays(stcb);
5746 }
5747