xref: /freebsd/sys/netinet/sctp_indata.c (revision e796cc77c586c2955b2f3940dbf4991b31e8d289)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 	if (stcb->asoc.sb_cc == 0 &&
98 	    asoc->cnt_on_reasm_queue == 0 &&
99 	    asoc->cnt_on_all_streams == 0) {
100 		/* Full rwnd granted */
101 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
102 		return (calc);
103 	}
104 	/* get actual space */
105 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 	/*
107 	 * take out what has NOT been put on socket queue and we yet hold
108 	 * for putting up.
109 	 */
110 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 	    asoc->cnt_on_reasm_queue * MSIZE));
112 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 	    asoc->cnt_on_all_streams * MSIZE));
114 	if (calc == 0) {
115 		/* out of space */
116 		return (calc);
117 	}
118 	/* what is the overhead of all these rwnd's */
119 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
120 	/*
121 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
122 	 * even it is 0. SWS engaged
123 	 */
124 	if (calc < stcb->asoc.my_rwnd_control_len) {
125 		calc = 1;
126 	}
127 	return (calc);
128 }
129 
130 
131 
132 /*
133  * Build out our readq entry based on the incoming packet.
134  */
135 struct sctp_queued_to_read *
136 sctp_build_readq_entry(struct sctp_tcb *stcb,
137     struct sctp_nets *net,
138     uint32_t tsn, uint32_t ppid,
139     uint32_t context, uint16_t sid,
140     uint32_t mid, uint8_t flags,
141     struct mbuf *dm)
142 {
143 	struct sctp_queued_to_read *read_queue_e = NULL;
144 
145 	sctp_alloc_a_readq(stcb, read_queue_e);
146 	if (read_queue_e == NULL) {
147 		goto failed_build;
148 	}
149 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
150 	read_queue_e->sinfo_stream = sid;
151 	read_queue_e->sinfo_flags = (flags << 8);
152 	read_queue_e->sinfo_ppid = ppid;
153 	read_queue_e->sinfo_context = context;
154 	read_queue_e->sinfo_tsn = tsn;
155 	read_queue_e->sinfo_cumtsn = tsn;
156 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
157 	read_queue_e->mid = mid;
158 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
159 	TAILQ_INIT(&read_queue_e->reasm);
160 	read_queue_e->whoFrom = net;
161 	atomic_add_int(&net->ref_count, 1);
162 	read_queue_e->data = dm;
163 	read_queue_e->stcb = stcb;
164 	read_queue_e->port_from = stcb->rport;
165 failed_build:
166 	return (read_queue_e);
167 }
168 
169 struct mbuf *
170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
171 {
172 	struct sctp_extrcvinfo *seinfo;
173 	struct sctp_sndrcvinfo *outinfo;
174 	struct sctp_rcvinfo *rcvinfo;
175 	struct sctp_nxtinfo *nxtinfo;
176 	struct cmsghdr *cmh;
177 	struct mbuf *ret;
178 	int len;
179 	int use_extended;
180 	int provide_nxt;
181 
182 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
183 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
184 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
185 		/* user does not want any ancillary data */
186 		return (NULL);
187 	}
188 	len = 0;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
190 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
191 	}
192 	seinfo = (struct sctp_extrcvinfo *)sinfo;
193 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
194 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
195 		provide_nxt = 1;
196 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
197 	} else {
198 		provide_nxt = 0;
199 	}
200 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
201 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
202 			use_extended = 1;
203 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
204 		} else {
205 			use_extended = 0;
206 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
207 		}
208 	} else {
209 		use_extended = 0;
210 	}
211 
212 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
213 	if (ret == NULL) {
214 		/* No space */
215 		return (ret);
216 	}
217 	SCTP_BUF_LEN(ret) = 0;
218 
219 	/* We need a CMSG header followed by the struct */
220 	cmh = mtod(ret, struct cmsghdr *);
221 	/*
222 	 * Make sure that there is no un-initialized padding between the
223 	 * cmsg header and cmsg data and after the cmsg data.
224 	 */
225 	memset(cmh, 0, len);
226 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
227 		cmh->cmsg_level = IPPROTO_SCTP;
228 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
229 		cmh->cmsg_type = SCTP_RCVINFO;
230 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
231 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
232 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
233 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
234 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
235 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
236 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
237 		rcvinfo->rcv_context = sinfo->sinfo_context;
238 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
239 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
240 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
241 	}
242 	if (provide_nxt) {
243 		cmh->cmsg_level = IPPROTO_SCTP;
244 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
245 		cmh->cmsg_type = SCTP_NXTINFO;
246 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
247 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
248 		nxtinfo->nxt_flags = 0;
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
250 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
251 		}
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
253 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
256 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
257 		}
258 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
259 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
260 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
261 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
262 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
263 	}
264 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
265 		cmh->cmsg_level = IPPROTO_SCTP;
266 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
267 		if (use_extended) {
268 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
269 			cmh->cmsg_type = SCTP_EXTRCV;
270 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
271 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
272 		} else {
273 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
274 			cmh->cmsg_type = SCTP_SNDRCV;
275 			*outinfo = *sinfo;
276 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
277 		}
278 	}
279 	return (ret);
280 }
281 
282 
283 static void
284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
285 {
286 	uint32_t gap, i, cumackp1;
287 	int fnd = 0;
288 	int in_r = 0, in_nr = 0;
289 
290 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
291 		return;
292 	}
293 	cumackp1 = asoc->cumulative_tsn + 1;
294 	if (SCTP_TSN_GT(cumackp1, tsn)) {
295 		/*
296 		 * this tsn is behind the cum ack and thus we don't need to
297 		 * worry about it being moved from one to the other.
298 		 */
299 		return;
300 	}
301 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 	if ((in_r == 0) && (in_nr == 0)) {
305 #ifdef INVARIANTS
306 		panic("Things are really messed up now");
307 #else
308 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
309 		sctp_print_mapping_array(asoc);
310 #endif
311 	}
312 	if (in_nr == 0)
313 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
314 	if (in_r)
315 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
317 		asoc->highest_tsn_inside_nr_map = tsn;
318 	}
319 	if (tsn == asoc->highest_tsn_inside_map) {
320 		/* We must back down to see what the new highest is */
321 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
322 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
323 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
324 				asoc->highest_tsn_inside_map = i;
325 				fnd = 1;
326 				break;
327 			}
328 		}
329 		if (!fnd) {
330 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
331 		}
332 	}
333 }
334 
335 static int
336 sctp_place_control_in_stream(struct sctp_stream_in *strm,
337     struct sctp_association *asoc,
338     struct sctp_queued_to_read *control)
339 {
340 	struct sctp_queued_to_read *at;
341 	struct sctp_readhead *q;
342 	uint8_t flags, unordered;
343 
344 	flags = (control->sinfo_flags >> 8);
345 	unordered = flags & SCTP_DATA_UNORDERED;
346 	if (unordered) {
347 		q = &strm->uno_inqueue;
348 		if (asoc->idata_supported == 0) {
349 			if (!TAILQ_EMPTY(q)) {
350 				/*
351 				 * Only one stream can be here in old style
352 				 * -- abort
353 				 */
354 				return (-1);
355 			}
356 			TAILQ_INSERT_TAIL(q, control, next_instrm);
357 			control->on_strm_q = SCTP_ON_UNORDERED;
358 			return (0);
359 		}
360 	} else {
361 		q = &strm->inqueue;
362 	}
363 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
364 		control->end_added = 1;
365 		control->first_frag_seen = 1;
366 		control->last_frag_seen = 1;
367 	}
368 	if (TAILQ_EMPTY(q)) {
369 		/* Empty queue */
370 		TAILQ_INSERT_HEAD(q, control, next_instrm);
371 		if (unordered) {
372 			control->on_strm_q = SCTP_ON_UNORDERED;
373 		} else {
374 			control->on_strm_q = SCTP_ON_ORDERED;
375 		}
376 		return (0);
377 	} else {
378 		TAILQ_FOREACH(at, q, next_instrm) {
379 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
380 				/*
381 				 * one in queue is bigger than the new one,
382 				 * insert before this one
383 				 */
384 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
385 				if (unordered) {
386 					control->on_strm_q = SCTP_ON_UNORDERED;
387 				} else {
388 					control->on_strm_q = SCTP_ON_ORDERED;
389 				}
390 				break;
391 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
392 				/*
393 				 * Gak, He sent me a duplicate msg id
394 				 * number?? return -1 to abort.
395 				 */
396 				return (-1);
397 			} else {
398 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
399 					/*
400 					 * We are at the end, insert it
401 					 * after this one
402 					 */
403 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
404 						sctp_log_strm_del(control, at,
405 						    SCTP_STR_LOG_FROM_INSERT_TL);
406 					}
407 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
408 					if (unordered) {
409 						control->on_strm_q = SCTP_ON_UNORDERED;
410 					} else {
411 						control->on_strm_q = SCTP_ON_ORDERED;
412 					}
413 					break;
414 				}
415 			}
416 		}
417 	}
418 	return (0);
419 }
420 
421 static void
422 sctp_abort_in_reasm(struct sctp_tcb *stcb,
423     struct sctp_queued_to_read *control,
424     struct sctp_tmit_chunk *chk,
425     int *abort_flag, int opspot)
426 {
427 	char msg[SCTP_DIAG_INFO_LEN];
428 	struct mbuf *oper;
429 
430 	if (stcb->asoc.idata_supported) {
431 		snprintf(msg, sizeof(msg),
432 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
433 		    opspot,
434 		    control->fsn_included,
435 		    chk->rec.data.tsn,
436 		    chk->rec.data.sid,
437 		    chk->rec.data.fsn, chk->rec.data.mid);
438 	} else {
439 		snprintf(msg, sizeof(msg),
440 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
441 		    opspot,
442 		    control->fsn_included,
443 		    chk->rec.data.tsn,
444 		    chk->rec.data.sid,
445 		    chk->rec.data.fsn,
446 		    (uint16_t)chk->rec.data.mid);
447 	}
448 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
449 	sctp_m_freem(chk->data);
450 	chk->data = NULL;
451 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
452 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
453 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
454 	*abort_flag = 1;
455 }
456 
457 static void
458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
459 {
460 	/*
461 	 * The control could not be placed and must be cleaned.
462 	 */
463 	struct sctp_tmit_chunk *chk, *nchk;
464 
465 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
466 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
467 		if (chk->data)
468 			sctp_m_freem(chk->data);
469 		chk->data = NULL;
470 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
471 	}
472 	sctp_free_a_readq(stcb, control);
473 }
474 
475 /*
476  * Queue the chunk either right into the socket buffer if it is the next one
477  * to go OR put it in the correct place in the delivery queue.  If we do
478  * append to the so_buf, keep doing so until we are out of order as
479  * long as the control's entered are non-fragmented.
480  */
481 static void
482 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
483     struct sctp_association *asoc,
484     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
485 {
486 	/*
487 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
488 	 * all the data in one stream this could happen quite rapidly. One
489 	 * could use the TSN to keep track of things, but this scheme breaks
490 	 * down in the other type of stream usage that could occur. Send a
491 	 * single msg to stream 0, send 4Billion messages to stream 1, now
492 	 * send a message to stream 0. You have a situation where the TSN
493 	 * has wrapped but not in the stream. Is this worth worrying about
494 	 * or should we just change our queue sort at the bottom to be by
495 	 * TSN.
496 	 *
497 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
498 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
499 	 * assignment this could happen... and I don't see how this would be
500 	 * a violation. So for now I am undecided an will leave the sort by
501 	 * SSN alone. Maybe a hybred approach is the answer
502 	 *
503 	 */
504 	struct sctp_queued_to_read *at;
505 	int queue_needed;
506 	uint32_t nxt_todel;
507 	struct mbuf *op_err;
508 	struct sctp_stream_in *strm;
509 	char msg[SCTP_DIAG_INFO_LEN];
510 
511 	strm = &asoc->strmin[control->sinfo_stream];
512 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
513 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
514 	}
515 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
516 		/* The incoming sseq is behind where we last delivered? */
517 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
518 		    strm->last_mid_delivered, control->mid);
519 		/*
520 		 * throw it in the stream so it gets cleaned up in
521 		 * association destruction
522 		 */
523 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
524 		if (asoc->idata_supported) {
525 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
526 			    strm->last_mid_delivered, control->sinfo_tsn,
527 			    control->sinfo_stream, control->mid);
528 		} else {
529 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
530 			    (uint16_t)strm->last_mid_delivered,
531 			    control->sinfo_tsn,
532 			    control->sinfo_stream,
533 			    (uint16_t)control->mid);
534 		}
535 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
536 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
537 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
538 		*abort_flag = 1;
539 		return;
540 
541 	}
542 	queue_needed = 1;
543 	asoc->size_on_all_streams += control->length;
544 	sctp_ucount_incr(asoc->cnt_on_all_streams);
545 	nxt_todel = strm->last_mid_delivered + 1;
546 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
548 		struct socket *so;
549 
550 		so = SCTP_INP_SO(stcb->sctp_ep);
551 		atomic_add_int(&stcb->asoc.refcnt, 1);
552 		SCTP_TCB_UNLOCK(stcb);
553 		SCTP_SOCKET_LOCK(so, 1);
554 		SCTP_TCB_LOCK(stcb);
555 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
556 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
557 			SCTP_SOCKET_UNLOCK(so, 1);
558 			return;
559 		}
560 #endif
561 		/* can be delivered right away? */
562 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
563 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
564 		}
565 		/* EY it wont be queued if it could be delivered directly */
566 		queue_needed = 0;
567 		if (asoc->size_on_all_streams >= control->length) {
568 			asoc->size_on_all_streams -= control->length;
569 		} else {
570 #ifdef INVARIANTS
571 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
572 #else
573 			asoc->size_on_all_streams = 0;
574 #endif
575 		}
576 		sctp_ucount_decr(asoc->cnt_on_all_streams);
577 		strm->last_mid_delivered++;
578 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
579 		sctp_add_to_readq(stcb->sctp_ep, stcb,
580 		    control,
581 		    &stcb->sctp_socket->so_rcv, 1,
582 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
583 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
584 			/* all delivered */
585 			nxt_todel = strm->last_mid_delivered + 1;
586 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
587 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
588 				if (control->on_strm_q == SCTP_ON_ORDERED) {
589 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
590 					if (asoc->size_on_all_streams >= control->length) {
591 						asoc->size_on_all_streams -= control->length;
592 					} else {
593 #ifdef INVARIANTS
594 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
595 #else
596 						asoc->size_on_all_streams = 0;
597 #endif
598 					}
599 					sctp_ucount_decr(asoc->cnt_on_all_streams);
600 #ifdef INVARIANTS
601 				} else {
602 					panic("Huh control: %p is on_strm_q: %d",
603 					    control, control->on_strm_q);
604 #endif
605 				}
606 				control->on_strm_q = 0;
607 				strm->last_mid_delivered++;
608 				/*
609 				 * We ignore the return of deliver_data here
610 				 * since we always can hold the chunk on the
611 				 * d-queue. And we have a finite number that
612 				 * can be delivered from the strq.
613 				 */
614 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
615 					sctp_log_strm_del(control, NULL,
616 					    SCTP_STR_LOG_FROM_IMMED_DEL);
617 				}
618 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
619 				sctp_add_to_readq(stcb->sctp_ep, stcb,
620 				    control,
621 				    &stcb->sctp_socket->so_rcv, 1,
622 				    SCTP_READ_LOCK_NOT_HELD,
623 				    SCTP_SO_LOCKED);
624 				continue;
625 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
626 				*need_reasm = 1;
627 			}
628 			break;
629 		}
630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
631 		SCTP_SOCKET_UNLOCK(so, 1);
632 #endif
633 	}
634 	if (queue_needed) {
635 		/*
636 		 * Ok, we did not deliver this guy, find the correct place
637 		 * to put it on the queue.
638 		 */
639 		if (sctp_place_control_in_stream(strm, asoc, control)) {
640 			snprintf(msg, sizeof(msg),
641 			    "Queue to str MID: %u duplicate",
642 			    control->mid);
643 			sctp_clean_up_control(stcb, control);
644 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
645 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
646 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
647 			*abort_flag = 1;
648 		}
649 	}
650 }
651 
652 
653 static void
654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
655 {
656 	struct mbuf *m, *prev = NULL;
657 	struct sctp_tcb *stcb;
658 
659 	stcb = control->stcb;
660 	control->held_length = 0;
661 	control->length = 0;
662 	m = control->data;
663 	while (m) {
664 		if (SCTP_BUF_LEN(m) == 0) {
665 			/* Skip mbufs with NO length */
666 			if (prev == NULL) {
667 				/* First one */
668 				control->data = sctp_m_free(m);
669 				m = control->data;
670 			} else {
671 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
672 				m = SCTP_BUF_NEXT(prev);
673 			}
674 			if (m == NULL) {
675 				control->tail_mbuf = prev;
676 			}
677 			continue;
678 		}
679 		prev = m;
680 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
681 		if (control->on_read_q) {
682 			/*
683 			 * On read queue so we must increment the SB stuff,
684 			 * we assume caller has done any locks of SB.
685 			 */
686 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
687 		}
688 		m = SCTP_BUF_NEXT(m);
689 	}
690 	if (prev) {
691 		control->tail_mbuf = prev;
692 	}
693 }
694 
695 static void
696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
697 {
698 	struct mbuf *prev = NULL;
699 	struct sctp_tcb *stcb;
700 
701 	stcb = control->stcb;
702 	if (stcb == NULL) {
703 #ifdef INVARIANTS
704 		panic("Control broken");
705 #else
706 		return;
707 #endif
708 	}
709 	if (control->tail_mbuf == NULL) {
710 		/* TSNH */
711 		control->data = m;
712 		sctp_setup_tail_pointer(control);
713 		return;
714 	}
715 	control->tail_mbuf->m_next = m;
716 	while (m) {
717 		if (SCTP_BUF_LEN(m) == 0) {
718 			/* Skip mbufs with NO length */
719 			if (prev == NULL) {
720 				/* First one */
721 				control->tail_mbuf->m_next = sctp_m_free(m);
722 				m = control->tail_mbuf->m_next;
723 			} else {
724 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
725 				m = SCTP_BUF_NEXT(prev);
726 			}
727 			if (m == NULL) {
728 				control->tail_mbuf = prev;
729 			}
730 			continue;
731 		}
732 		prev = m;
733 		if (control->on_read_q) {
734 			/*
735 			 * On read queue so we must increment the SB stuff,
736 			 * we assume caller has done any locks of SB.
737 			 */
738 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
739 		}
740 		*added += SCTP_BUF_LEN(m);
741 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
742 		m = SCTP_BUF_NEXT(m);
743 	}
744 	if (prev) {
745 		control->tail_mbuf = prev;
746 	}
747 }
748 
749 static void
750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
751 {
752 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
753 	nc->sinfo_stream = control->sinfo_stream;
754 	nc->mid = control->mid;
755 	TAILQ_INIT(&nc->reasm);
756 	nc->top_fsn = control->top_fsn;
757 	nc->mid = control->mid;
758 	nc->sinfo_flags = control->sinfo_flags;
759 	nc->sinfo_ppid = control->sinfo_ppid;
760 	nc->sinfo_context = control->sinfo_context;
761 	nc->fsn_included = 0xffffffff;
762 	nc->sinfo_tsn = control->sinfo_tsn;
763 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
764 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
765 	nc->whoFrom = control->whoFrom;
766 	atomic_add_int(&nc->whoFrom->ref_count, 1);
767 	nc->stcb = control->stcb;
768 	nc->port_from = control->port_from;
769 }
770 
771 static void
772 sctp_reset_a_control(struct sctp_queued_to_read *control,
773     struct sctp_inpcb *inp, uint32_t tsn)
774 {
775 	control->fsn_included = tsn;
776 	if (control->on_read_q) {
777 		/*
778 		 * We have to purge it from there, hopefully this will work
779 		 * :-)
780 		 */
781 		TAILQ_REMOVE(&inp->read_queue, control, next);
782 		control->on_read_q = 0;
783 	}
784 }
785 
786 static int
787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
788     struct sctp_association *asoc,
789     struct sctp_stream_in *strm,
790     struct sctp_queued_to_read *control,
791     uint32_t pd_point,
792     int inp_read_lock_held)
793 {
794 	/*
795 	 * Special handling for the old un-ordered data chunk. All the
796 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
797 	 * to see if we have it all. If you return one, no other control
798 	 * entries on the un-ordered queue will be looked at. In theory
799 	 * there should be no others entries in reality, unless the guy is
800 	 * sending both unordered NDATA and unordered DATA...
801 	 */
802 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
803 	uint32_t fsn;
804 	struct sctp_queued_to_read *nc;
805 	int cnt_added;
806 
807 	if (control->first_frag_seen == 0) {
808 		/* Nothing we can do, we have not seen the first piece yet */
809 		return (1);
810 	}
811 	/* Collapse any we can */
812 	cnt_added = 0;
813 restart:
814 	fsn = control->fsn_included + 1;
815 	/* Now what can we add? */
816 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
817 		if (chk->rec.data.fsn == fsn) {
818 			/* Ok lets add it */
819 			sctp_alloc_a_readq(stcb, nc);
820 			if (nc == NULL) {
821 				break;
822 			}
823 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
824 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
825 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
826 			fsn++;
827 			cnt_added++;
828 			chk = NULL;
829 			if (control->end_added) {
830 				/* We are done */
831 				if (!TAILQ_EMPTY(&control->reasm)) {
832 					/*
833 					 * Ok we have to move anything left
834 					 * on the control queue to a new
835 					 * control.
836 					 */
837 					sctp_build_readq_entry_from_ctl(nc, control);
838 					tchk = TAILQ_FIRST(&control->reasm);
839 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
840 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
841 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
842 							asoc->size_on_reasm_queue -= tchk->send_size;
843 						} else {
844 #ifdef INVARIANTS
845 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
846 #else
847 							asoc->size_on_reasm_queue = 0;
848 #endif
849 						}
850 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
851 						nc->first_frag_seen = 1;
852 						nc->fsn_included = tchk->rec.data.fsn;
853 						nc->data = tchk->data;
854 						nc->sinfo_ppid = tchk->rec.data.ppid;
855 						nc->sinfo_tsn = tchk->rec.data.tsn;
856 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
857 						tchk->data = NULL;
858 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
859 						sctp_setup_tail_pointer(nc);
860 						tchk = TAILQ_FIRST(&control->reasm);
861 					}
862 					/* Spin the rest onto the queue */
863 					while (tchk) {
864 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
865 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
866 						tchk = TAILQ_FIRST(&control->reasm);
867 					}
868 					/*
869 					 * Now lets add it to the queue
870 					 * after removing control
871 					 */
872 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
873 					nc->on_strm_q = SCTP_ON_UNORDERED;
874 					if (control->on_strm_q) {
875 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
876 						control->on_strm_q = 0;
877 					}
878 				}
879 				if (control->pdapi_started) {
880 					strm->pd_api_started = 0;
881 					control->pdapi_started = 0;
882 				}
883 				if (control->on_strm_q) {
884 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 					control->on_strm_q = 0;
886 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
887 				}
888 				if (control->on_read_q == 0) {
889 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
890 					    &stcb->sctp_socket->so_rcv, control->end_added,
891 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
892 				}
893 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
894 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
895 					/*
896 					 * Switch to the new guy and
897 					 * continue
898 					 */
899 					control = nc;
900 					goto restart;
901 				} else {
902 					if (nc->on_strm_q == 0) {
903 						sctp_free_a_readq(stcb, nc);
904 					}
905 				}
906 				return (1);
907 			} else {
908 				sctp_free_a_readq(stcb, nc);
909 			}
910 		} else {
911 			/* Can't add more */
912 			break;
913 		}
914 	}
915 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
916 		strm->pd_api_started = 1;
917 		control->pdapi_started = 1;
918 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
919 		    &stcb->sctp_socket->so_rcv, control->end_added,
920 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
921 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
922 		return (0);
923 	} else {
924 		return (1);
925 	}
926 }
927 
928 static void
929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
930     struct sctp_association *asoc,
931     struct sctp_queued_to_read *control,
932     struct sctp_tmit_chunk *chk,
933     int *abort_flag)
934 {
935 	struct sctp_tmit_chunk *at;
936 	int inserted;
937 
938 	/*
939 	 * Here we need to place the chunk into the control structure sorted
940 	 * in the correct order.
941 	 */
942 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
943 		/* Its the very first one. */
944 		SCTPDBG(SCTP_DEBUG_XXX,
945 		    "chunk is a first fsn: %u becomes fsn_included\n",
946 		    chk->rec.data.fsn);
947 		if (control->first_frag_seen) {
948 			/*
949 			 * In old un-ordered we can reassembly on one
950 			 * control multiple messages. As long as the next
951 			 * FIRST is greater then the old first (TSN i.e. FSN
952 			 * wise)
953 			 */
954 			struct mbuf *tdata;
955 			uint32_t tmp;
956 
957 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
958 				/*
959 				 * Easy way the start of a new guy beyond
960 				 * the lowest
961 				 */
962 				goto place_chunk;
963 			}
964 			if ((chk->rec.data.fsn == control->fsn_included) ||
965 			    (control->pdapi_started)) {
966 				/*
967 				 * Ok this should not happen, if it does we
968 				 * started the pd-api on the higher TSN
969 				 * (since the equals part is a TSN failure
970 				 * it must be that).
971 				 *
972 				 * We are completly hosed in that case since
973 				 * I have no way to recover. This really
974 				 * will only happen if we can get more TSN's
975 				 * higher before the pd-api-point.
976 				 */
977 				sctp_abort_in_reasm(stcb, control, chk,
978 				    abort_flag,
979 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
980 
981 				return;
982 			}
983 			/*
984 			 * Ok we have two firsts and the one we just got is
985 			 * smaller than the one we previously placed.. yuck!
986 			 * We must swap them out.
987 			 */
988 			/* swap the mbufs */
989 			tdata = control->data;
990 			control->data = chk->data;
991 			chk->data = tdata;
992 			/* Save the lengths */
993 			chk->send_size = control->length;
994 			/* Recompute length of control and tail pointer */
995 			sctp_setup_tail_pointer(control);
996 			/* Fix the FSN included */
997 			tmp = control->fsn_included;
998 			control->fsn_included = chk->rec.data.fsn;
999 			chk->rec.data.fsn = tmp;
1000 			/* Fix the TSN included */
1001 			tmp = control->sinfo_tsn;
1002 			control->sinfo_tsn = chk->rec.data.tsn;
1003 			chk->rec.data.tsn = tmp;
1004 			/* Fix the PPID included */
1005 			tmp = control->sinfo_ppid;
1006 			control->sinfo_ppid = chk->rec.data.ppid;
1007 			chk->rec.data.ppid = tmp;
1008 			/* Fix tail pointer */
1009 			goto place_chunk;
1010 		}
1011 		control->first_frag_seen = 1;
1012 		control->fsn_included = chk->rec.data.fsn;
1013 		control->top_fsn = chk->rec.data.fsn;
1014 		control->sinfo_tsn = chk->rec.data.tsn;
1015 		control->sinfo_ppid = chk->rec.data.ppid;
1016 		control->data = chk->data;
1017 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1018 		chk->data = NULL;
1019 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1020 		sctp_setup_tail_pointer(control);
1021 		return;
1022 	}
1023 place_chunk:
1024 	inserted = 0;
1025 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1026 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1027 			/*
1028 			 * This one in queue is bigger than the new one,
1029 			 * insert the new one before at.
1030 			 */
1031 			asoc->size_on_reasm_queue += chk->send_size;
1032 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1033 			inserted = 1;
1034 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1035 			break;
1036 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1037 			/*
1038 			 * They sent a duplicate fsn number. This really
1039 			 * should not happen since the FSN is a TSN and it
1040 			 * should have been dropped earlier.
1041 			 */
1042 			sctp_abort_in_reasm(stcb, control, chk,
1043 			    abort_flag,
1044 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1045 			return;
1046 		}
1047 	}
1048 	if (inserted == 0) {
1049 		/* Its at the end */
1050 		asoc->size_on_reasm_queue += chk->send_size;
1051 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1052 		control->top_fsn = chk->rec.data.fsn;
1053 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1054 	}
1055 }
1056 
1057 static int
1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1059     struct sctp_stream_in *strm, int inp_read_lock_held)
1060 {
1061 	/*
1062 	 * Given a stream, strm, see if any of the SSN's on it that are
1063 	 * fragmented are ready to deliver. If so go ahead and place them on
1064 	 * the read queue. In so placing if we have hit the end, then we
1065 	 * need to remove them from the stream's queue.
1066 	 */
1067 	struct sctp_queued_to_read *control, *nctl = NULL;
1068 	uint32_t next_to_del;
1069 	uint32_t pd_point;
1070 	int ret = 0;
1071 
1072 	if (stcb->sctp_socket) {
1073 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1074 		    stcb->sctp_ep->partial_delivery_point);
1075 	} else {
1076 		pd_point = stcb->sctp_ep->partial_delivery_point;
1077 	}
1078 	control = TAILQ_FIRST(&strm->uno_inqueue);
1079 
1080 	if ((control != NULL) &&
1081 	    (asoc->idata_supported == 0)) {
1082 		/* Special handling needed for "old" data format */
1083 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1084 			goto done_un;
1085 		}
1086 	}
1087 	if (strm->pd_api_started) {
1088 		/* Can't add more */
1089 		return (0);
1090 	}
1091 	while (control) {
1092 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1093 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1094 		nctl = TAILQ_NEXT(control, next_instrm);
1095 		if (control->end_added) {
1096 			/* We just put the last bit on */
1097 			if (control->on_strm_q) {
1098 #ifdef INVARIANTS
1099 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1100 					panic("Huh control: %p on_q: %d -- not unordered?",
1101 					    control, control->on_strm_q);
1102 				}
1103 #endif
1104 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1105 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1106 				control->on_strm_q = 0;
1107 			}
1108 			if (control->on_read_q == 0) {
1109 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1110 				    control,
1111 				    &stcb->sctp_socket->so_rcv, control->end_added,
1112 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1113 			}
1114 		} else {
1115 			/* Can we do a PD-API for this un-ordered guy? */
1116 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1117 				strm->pd_api_started = 1;
1118 				control->pdapi_started = 1;
1119 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1120 				    control,
1121 				    &stcb->sctp_socket->so_rcv, control->end_added,
1122 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1123 
1124 				break;
1125 			}
1126 		}
1127 		control = nctl;
1128 	}
1129 done_un:
1130 	control = TAILQ_FIRST(&strm->inqueue);
1131 	if (strm->pd_api_started) {
1132 		/* Can't add more */
1133 		return (0);
1134 	}
1135 	if (control == NULL) {
1136 		return (ret);
1137 	}
1138 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1139 		/*
1140 		 * Ok the guy at the top was being partially delivered
1141 		 * completed, so we remove it. Note the pd_api flag was
1142 		 * taken off when the chunk was merged on in
1143 		 * sctp_queue_data_for_reasm below.
1144 		 */
1145 		nctl = TAILQ_NEXT(control, next_instrm);
1146 		SCTPDBG(SCTP_DEBUG_XXX,
1147 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1148 		    control, control->end_added, control->mid,
1149 		    control->top_fsn, control->fsn_included,
1150 		    strm->last_mid_delivered);
1151 		if (control->end_added) {
1152 			if (control->on_strm_q) {
1153 #ifdef INVARIANTS
1154 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1155 					panic("Huh control: %p on_q: %d -- not ordered?",
1156 					    control, control->on_strm_q);
1157 				}
1158 #endif
1159 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1160 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1161 				if (asoc->size_on_all_streams >= control->length) {
1162 					asoc->size_on_all_streams -= control->length;
1163 				} else {
1164 #ifdef INVARIANTS
1165 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1166 #else
1167 					asoc->size_on_all_streams = 0;
1168 #endif
1169 				}
1170 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1171 				control->on_strm_q = 0;
1172 			}
1173 			if (strm->pd_api_started && control->pdapi_started) {
1174 				control->pdapi_started = 0;
1175 				strm->pd_api_started = 0;
1176 			}
1177 			if (control->on_read_q == 0) {
1178 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1179 				    control,
1180 				    &stcb->sctp_socket->so_rcv, control->end_added,
1181 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1182 			}
1183 			control = nctl;
1184 		}
1185 	}
1186 	if (strm->pd_api_started) {
1187 		/*
1188 		 * Can't add more must have gotten an un-ordered above being
1189 		 * partially delivered.
1190 		 */
1191 		return (0);
1192 	}
1193 deliver_more:
1194 	next_to_del = strm->last_mid_delivered + 1;
1195 	if (control) {
1196 		SCTPDBG(SCTP_DEBUG_XXX,
1197 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1198 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1199 		    next_to_del);
1200 		nctl = TAILQ_NEXT(control, next_instrm);
1201 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1202 		    (control->first_frag_seen)) {
1203 			int done;
1204 
1205 			/* Ok we can deliver it onto the stream. */
1206 			if (control->end_added) {
1207 				/* We are done with it afterwards */
1208 				if (control->on_strm_q) {
1209 #ifdef INVARIANTS
1210 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1211 						panic("Huh control: %p on_q: %d -- not ordered?",
1212 						    control, control->on_strm_q);
1213 					}
1214 #endif
1215 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1216 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1217 					if (asoc->size_on_all_streams >= control->length) {
1218 						asoc->size_on_all_streams -= control->length;
1219 					} else {
1220 #ifdef INVARIANTS
1221 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1222 #else
1223 						asoc->size_on_all_streams = 0;
1224 #endif
1225 					}
1226 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1227 					control->on_strm_q = 0;
1228 				}
1229 				ret++;
1230 			}
1231 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1232 				/*
1233 				 * A singleton now slipping through - mark
1234 				 * it non-revokable too
1235 				 */
1236 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1237 			} else if (control->end_added == 0) {
1238 				/*
1239 				 * Check if we can defer adding until its
1240 				 * all there
1241 				 */
1242 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1243 					/*
1244 					 * Don't need it or cannot add more
1245 					 * (one being delivered that way)
1246 					 */
1247 					goto out;
1248 				}
1249 			}
1250 			done = (control->end_added) && (control->last_frag_seen);
1251 			if (control->on_read_q == 0) {
1252 				if (!done) {
1253 					if (asoc->size_on_all_streams >= control->length) {
1254 						asoc->size_on_all_streams -= control->length;
1255 					} else {
1256 #ifdef INVARIANTS
1257 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1258 #else
1259 						asoc->size_on_all_streams = 0;
1260 #endif
1261 					}
1262 					strm->pd_api_started = 1;
1263 					control->pdapi_started = 1;
1264 				}
1265 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1266 				    control,
1267 				    &stcb->sctp_socket->so_rcv, control->end_added,
1268 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1269 			}
1270 			strm->last_mid_delivered = next_to_del;
1271 			if (done) {
1272 				control = nctl;
1273 				goto deliver_more;
1274 			}
1275 		}
1276 	}
1277 out:
1278 	return (ret);
1279 }
1280 
1281 
1282 uint32_t
1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1284     struct sctp_stream_in *strm,
1285     struct sctp_tcb *stcb, struct sctp_association *asoc,
1286     struct sctp_tmit_chunk *chk, int hold_rlock)
1287 {
1288 	/*
1289 	 * Given a control and a chunk, merge the data from the chk onto the
1290 	 * control and free up the chunk resources.
1291 	 */
1292 	uint32_t added = 0;
1293 	int i_locked = 0;
1294 
1295 	if (control->on_read_q && (hold_rlock == 0)) {
1296 		/*
1297 		 * Its being pd-api'd so we must do some locks.
1298 		 */
1299 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1300 		i_locked = 1;
1301 	}
1302 	if (control->data == NULL) {
1303 		control->data = chk->data;
1304 		sctp_setup_tail_pointer(control);
1305 	} else {
1306 		sctp_add_to_tail_pointer(control, chk->data, &added);
1307 	}
1308 	control->fsn_included = chk->rec.data.fsn;
1309 	asoc->size_on_reasm_queue -= chk->send_size;
1310 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1311 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1312 	chk->data = NULL;
1313 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1314 		control->first_frag_seen = 1;
1315 		control->sinfo_tsn = chk->rec.data.tsn;
1316 		control->sinfo_ppid = chk->rec.data.ppid;
1317 	}
1318 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1319 		/* Its complete */
1320 		if ((control->on_strm_q) && (control->on_read_q)) {
1321 			if (control->pdapi_started) {
1322 				control->pdapi_started = 0;
1323 				strm->pd_api_started = 0;
1324 			}
1325 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1326 				/* Unordered */
1327 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1328 				control->on_strm_q = 0;
1329 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1330 				/* Ordered */
1331 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1332 				/*
1333 				 * Don't need to decrement
1334 				 * size_on_all_streams, since control is on
1335 				 * the read queue.
1336 				 */
1337 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1338 				control->on_strm_q = 0;
1339 #ifdef INVARIANTS
1340 			} else if (control->on_strm_q) {
1341 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1342 				    control->on_strm_q);
1343 #endif
1344 			}
1345 		}
1346 		control->end_added = 1;
1347 		control->last_frag_seen = 1;
1348 	}
1349 	if (i_locked) {
1350 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1351 	}
1352 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1353 	return (added);
1354 }
1355 
1356 /*
1357  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1358  * queue, see if anthing can be delivered. If so pull it off (or as much as
1359  * we can. If we run out of space then we must dump what we can and set the
1360  * appropriate flag to say we queued what we could.
1361  */
1362 static void
1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1364     struct sctp_queued_to_read *control,
1365     struct sctp_tmit_chunk *chk,
1366     int created_control,
1367     int *abort_flag, uint32_t tsn)
1368 {
1369 	uint32_t next_fsn;
1370 	struct sctp_tmit_chunk *at, *nat;
1371 	struct sctp_stream_in *strm;
1372 	int do_wakeup, unordered;
1373 	uint32_t lenadded;
1374 
1375 	strm = &asoc->strmin[control->sinfo_stream];
1376 	/*
1377 	 * For old un-ordered data chunks.
1378 	 */
1379 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1380 		unordered = 1;
1381 	} else {
1382 		unordered = 0;
1383 	}
1384 	/* Must be added to the stream-in queue */
1385 	if (created_control) {
1386 		if (unordered == 0) {
1387 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1388 		}
1389 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1390 			/* Duplicate SSN? */
1391 			sctp_abort_in_reasm(stcb, control, chk,
1392 			    abort_flag,
1393 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1394 			sctp_clean_up_control(stcb, control);
1395 			return;
1396 		}
1397 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1398 			/*
1399 			 * Ok we created this control and now lets validate
1400 			 * that its legal i.e. there is a B bit set, if not
1401 			 * and we have up to the cum-ack then its invalid.
1402 			 */
1403 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1404 				sctp_abort_in_reasm(stcb, control, chk,
1405 				    abort_flag,
1406 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1407 				return;
1408 			}
1409 		}
1410 	}
1411 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1412 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1413 		return;
1414 	}
1415 	/*
1416 	 * Ok we must queue the chunk into the reasembly portion: o if its
1417 	 * the first it goes to the control mbuf. o if its not first but the
1418 	 * next in sequence it goes to the control, and each succeeding one
1419 	 * in order also goes. o if its not in order we place it on the list
1420 	 * in its place.
1421 	 */
1422 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1423 		/* Its the very first one. */
1424 		SCTPDBG(SCTP_DEBUG_XXX,
1425 		    "chunk is a first fsn: %u becomes fsn_included\n",
1426 		    chk->rec.data.fsn);
1427 		if (control->first_frag_seen) {
1428 			/*
1429 			 * Error on senders part, they either sent us two
1430 			 * data chunks with FIRST, or they sent two
1431 			 * un-ordered chunks that were fragmented at the
1432 			 * same time in the same stream.
1433 			 */
1434 			sctp_abort_in_reasm(stcb, control, chk,
1435 			    abort_flag,
1436 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1437 			return;
1438 		}
1439 		control->first_frag_seen = 1;
1440 		control->sinfo_ppid = chk->rec.data.ppid;
1441 		control->sinfo_tsn = chk->rec.data.tsn;
1442 		control->fsn_included = chk->rec.data.fsn;
1443 		control->data = chk->data;
1444 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1445 		chk->data = NULL;
1446 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1447 		sctp_setup_tail_pointer(control);
1448 		asoc->size_on_all_streams += control->length;
1449 	} else {
1450 		/* Place the chunk in our list */
1451 		int inserted = 0;
1452 
1453 		if (control->last_frag_seen == 0) {
1454 			/* Still willing to raise highest FSN seen */
1455 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1456 				SCTPDBG(SCTP_DEBUG_XXX,
1457 				    "We have a new top_fsn: %u\n",
1458 				    chk->rec.data.fsn);
1459 				control->top_fsn = chk->rec.data.fsn;
1460 			}
1461 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1462 				SCTPDBG(SCTP_DEBUG_XXX,
1463 				    "The last fsn is now in place fsn: %u\n",
1464 				    chk->rec.data.fsn);
1465 				control->last_frag_seen = 1;
1466 			}
1467 			if (asoc->idata_supported || control->first_frag_seen) {
1468 				/*
1469 				 * For IDATA we always check since we know
1470 				 * that the first fragment is 0. For old
1471 				 * DATA we have to receive the first before
1472 				 * we know the first FSN (which is the TSN).
1473 				 */
1474 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1475 					/*
1476 					 * We have already delivered up to
1477 					 * this so its a dup
1478 					 */
1479 					sctp_abort_in_reasm(stcb, control, chk,
1480 					    abort_flag,
1481 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1482 					return;
1483 				}
1484 			}
1485 		} else {
1486 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 				/* Second last? huh? */
1488 				SCTPDBG(SCTP_DEBUG_XXX,
1489 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1490 				    chk->rec.data.fsn, control->top_fsn);
1491 				sctp_abort_in_reasm(stcb, control,
1492 				    chk, abort_flag,
1493 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1494 				return;
1495 			}
1496 			if (asoc->idata_supported || control->first_frag_seen) {
1497 				/*
1498 				 * For IDATA we always check since we know
1499 				 * that the first fragment is 0. For old
1500 				 * DATA we have to receive the first before
1501 				 * we know the first FSN (which is the TSN).
1502 				 */
1503 
1504 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1505 					/*
1506 					 * We have already delivered up to
1507 					 * this so its a dup
1508 					 */
1509 					SCTPDBG(SCTP_DEBUG_XXX,
1510 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1511 					    chk->rec.data.fsn, control->fsn_included);
1512 					sctp_abort_in_reasm(stcb, control, chk,
1513 					    abort_flag,
1514 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1515 					return;
1516 				}
1517 			}
1518 			/*
1519 			 * validate not beyond top FSN if we have seen last
1520 			 * one
1521 			 */
1522 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1523 				SCTPDBG(SCTP_DEBUG_XXX,
1524 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1525 				    chk->rec.data.fsn,
1526 				    control->top_fsn);
1527 				sctp_abort_in_reasm(stcb, control, chk,
1528 				    abort_flag,
1529 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1530 				return;
1531 			}
1532 		}
1533 		/*
1534 		 * If we reach here, we need to place the new chunk in the
1535 		 * reassembly for this control.
1536 		 */
1537 		SCTPDBG(SCTP_DEBUG_XXX,
1538 		    "chunk is a not first fsn: %u needs to be inserted\n",
1539 		    chk->rec.data.fsn);
1540 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1541 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1542 				/*
1543 				 * This one in queue is bigger than the new
1544 				 * one, insert the new one before at.
1545 				 */
1546 				SCTPDBG(SCTP_DEBUG_XXX,
1547 				    "Insert it before fsn: %u\n",
1548 				    at->rec.data.fsn);
1549 				asoc->size_on_reasm_queue += chk->send_size;
1550 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1551 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1552 				inserted = 1;
1553 				break;
1554 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1555 				/*
1556 				 * Gak, He sent me a duplicate str seq
1557 				 * number
1558 				 */
1559 				/*
1560 				 * foo bar, I guess I will just free this
1561 				 * new guy, should we abort too? FIX ME
1562 				 * MAYBE? Or it COULD be that the SSN's have
1563 				 * wrapped. Maybe I should compare to TSN
1564 				 * somehow... sigh for now just blow away
1565 				 * the chunk!
1566 				 */
1567 				SCTPDBG(SCTP_DEBUG_XXX,
1568 				    "Duplicate to fsn: %u -- abort\n",
1569 				    at->rec.data.fsn);
1570 				sctp_abort_in_reasm(stcb, control,
1571 				    chk, abort_flag,
1572 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1573 				return;
1574 			}
1575 		}
1576 		if (inserted == 0) {
1577 			/* Goes on the end */
1578 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1579 			    chk->rec.data.fsn);
1580 			asoc->size_on_reasm_queue += chk->send_size;
1581 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1582 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1583 		}
1584 	}
1585 	/*
1586 	 * Ok lets see if we can suck any up into the control structure that
1587 	 * are in seq if it makes sense.
1588 	 */
1589 	do_wakeup = 0;
1590 	/*
1591 	 * If the first fragment has not been seen there is no sense in
1592 	 * looking.
1593 	 */
1594 	if (control->first_frag_seen) {
1595 		next_fsn = control->fsn_included + 1;
1596 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1597 			if (at->rec.data.fsn == next_fsn) {
1598 				/* We can add this one now to the control */
1599 				SCTPDBG(SCTP_DEBUG_XXX,
1600 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1601 				    control, at,
1602 				    at->rec.data.fsn,
1603 				    next_fsn, control->fsn_included);
1604 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1605 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1606 				if (control->on_read_q) {
1607 					do_wakeup = 1;
1608 				} else {
1609 					/*
1610 					 * We only add to the
1611 					 * size-on-all-streams if its not on
1612 					 * the read q. The read q flag will
1613 					 * cause a sballoc so its accounted
1614 					 * for there.
1615 					 */
1616 					asoc->size_on_all_streams += lenadded;
1617 				}
1618 				next_fsn++;
1619 				if (control->end_added && control->pdapi_started) {
1620 					if (strm->pd_api_started) {
1621 						strm->pd_api_started = 0;
1622 						control->pdapi_started = 0;
1623 					}
1624 					if (control->on_read_q == 0) {
1625 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1626 						    control,
1627 						    &stcb->sctp_socket->so_rcv, control->end_added,
1628 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1629 					}
1630 					break;
1631 				}
1632 			} else {
1633 				break;
1634 			}
1635 		}
1636 	}
1637 	if (do_wakeup) {
1638 		/* Need to wakeup the reader */
1639 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1640 	}
1641 }
1642 
1643 static struct sctp_queued_to_read *
1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1645 {
1646 	struct sctp_queued_to_read *control;
1647 
1648 	if (ordered) {
1649 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1650 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1651 				break;
1652 			}
1653 		}
1654 	} else {
1655 		if (idata_supported) {
1656 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1657 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1658 					break;
1659 				}
1660 			}
1661 		} else {
1662 			control = TAILQ_FIRST(&strm->uno_inqueue);
1663 		}
1664 	}
1665 	return (control);
1666 }
1667 
1668 static int
1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1670     struct mbuf **m, int offset, int chk_length,
1671     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1672     int *break_flag, int last_chunk, uint8_t chk_type)
1673 {
1674 	/* Process a data chunk */
1675 	/* struct sctp_tmit_chunk *chk; */
1676 	struct sctp_tmit_chunk *chk;
1677 	uint32_t tsn, fsn, gap, mid;
1678 	struct mbuf *dmbuf;
1679 	int the_len;
1680 	int need_reasm_check = 0;
1681 	uint16_t sid;
1682 	struct mbuf *op_err;
1683 	char msg[SCTP_DIAG_INFO_LEN];
1684 	struct sctp_queued_to_read *control, *ncontrol;
1685 	uint32_t ppid;
1686 	uint8_t chk_flags;
1687 	struct sctp_stream_reset_list *liste;
1688 	int ordered;
1689 	size_t clen;
1690 	int created_control = 0;
1691 
1692 	if (chk_type == SCTP_IDATA) {
1693 		struct sctp_idata_chunk *chunk, chunk_buf;
1694 
1695 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1696 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1697 		chk_flags = chunk->ch.chunk_flags;
1698 		clen = sizeof(struct sctp_idata_chunk);
1699 		tsn = ntohl(chunk->dp.tsn);
1700 		sid = ntohs(chunk->dp.sid);
1701 		mid = ntohl(chunk->dp.mid);
1702 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1703 			fsn = 0;
1704 			ppid = chunk->dp.ppid_fsn.ppid;
1705 		} else {
1706 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1707 			ppid = 0xffffffff;	/* Use as an invalid value. */
1708 		}
1709 	} else {
1710 		struct sctp_data_chunk *chunk, chunk_buf;
1711 
1712 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1713 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1714 		chk_flags = chunk->ch.chunk_flags;
1715 		clen = sizeof(struct sctp_data_chunk);
1716 		tsn = ntohl(chunk->dp.tsn);
1717 		sid = ntohs(chunk->dp.sid);
1718 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1719 		fsn = tsn;
1720 		ppid = chunk->dp.ppid;
1721 	}
1722 	if ((size_t)chk_length == clen) {
1723 		/*
1724 		 * Need to send an abort since we had a empty data chunk.
1725 		 */
1726 		op_err = sctp_generate_no_user_data_cause(tsn);
1727 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1728 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1729 		*abort_flag = 1;
1730 		return (0);
1731 	}
1732 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1733 		asoc->send_sack = 1;
1734 	}
1735 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1736 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1737 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1738 	}
1739 	if (stcb == NULL) {
1740 		return (0);
1741 	}
1742 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1743 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1744 		/* It is a duplicate */
1745 		SCTP_STAT_INCR(sctps_recvdupdata);
1746 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1747 			/* Record a dup for the next outbound sack */
1748 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1749 			asoc->numduptsns++;
1750 		}
1751 		asoc->send_sack = 1;
1752 		return (0);
1753 	}
1754 	/* Calculate the number of TSN's between the base and this TSN */
1755 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1756 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1757 		/* Can't hold the bit in the mapping at max array, toss it */
1758 		return (0);
1759 	}
1760 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1761 		SCTP_TCB_LOCK_ASSERT(stcb);
1762 		if (sctp_expand_mapping_array(asoc, gap)) {
1763 			/* Can't expand, drop it */
1764 			return (0);
1765 		}
1766 	}
1767 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1768 		*high_tsn = tsn;
1769 	}
1770 	/* See if we have received this one already */
1771 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1772 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1773 		SCTP_STAT_INCR(sctps_recvdupdata);
1774 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 			/* Record a dup for the next outbound sack */
1776 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1777 			asoc->numduptsns++;
1778 		}
1779 		asoc->send_sack = 1;
1780 		return (0);
1781 	}
1782 	/*
1783 	 * Check to see about the GONE flag, duplicates would cause a sack
1784 	 * to be sent up above
1785 	 */
1786 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1787 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1788 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1789 		/*
1790 		 * wait a minute, this guy is gone, there is no longer a
1791 		 * receiver. Send peer an ABORT!
1792 		 */
1793 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1794 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1795 		*abort_flag = 1;
1796 		return (0);
1797 	}
1798 	/*
1799 	 * Now before going further we see if there is room. If NOT then we
1800 	 * MAY let one through only IF this TSN is the one we are waiting
1801 	 * for on a partial delivery API.
1802 	 */
1803 
1804 	/* Is the stream valid? */
1805 	if (sid >= asoc->streamincnt) {
1806 		struct sctp_error_invalid_stream *cause;
1807 
1808 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1809 		    0, M_NOWAIT, 1, MT_DATA);
1810 		if (op_err != NULL) {
1811 			/* add some space up front so prepend will work well */
1812 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1813 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1814 			/*
1815 			 * Error causes are just param's and this one has
1816 			 * two back to back phdr, one with the error type
1817 			 * and size, the other with the streamid and a rsvd
1818 			 */
1819 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1820 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1821 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1822 			cause->stream_id = htons(sid);
1823 			cause->reserved = htons(0);
1824 			sctp_queue_op_err(stcb, op_err);
1825 		}
1826 		SCTP_STAT_INCR(sctps_badsid);
1827 		SCTP_TCB_LOCK_ASSERT(stcb);
1828 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1829 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1830 			asoc->highest_tsn_inside_nr_map = tsn;
1831 		}
1832 		if (tsn == (asoc->cumulative_tsn + 1)) {
1833 			/* Update cum-ack */
1834 			asoc->cumulative_tsn = tsn;
1835 		}
1836 		return (0);
1837 	}
1838 	/*
1839 	 * If its a fragmented message, lets see if we can find the control
1840 	 * on the reassembly queues.
1841 	 */
1842 	if ((chk_type == SCTP_IDATA) &&
1843 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1844 	    (fsn == 0)) {
1845 		/*
1846 		 * The first *must* be fsn 0, and other (middle/end) pieces
1847 		 * can *not* be fsn 0. XXX: This can happen in case of a
1848 		 * wrap around. Ignore is for now.
1849 		 */
1850 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1851 		    mid, chk_flags);
1852 		goto err_out;
1853 	}
1854 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1855 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1856 	    chk_flags, control);
1857 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1858 		/* See if we can find the re-assembly entity */
1859 		if (control != NULL) {
1860 			/* We found something, does it belong? */
1861 			if (ordered && (mid != control->mid)) {
1862 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1863 		err_out:
1864 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1865 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1866 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1867 				*abort_flag = 1;
1868 				return (0);
1869 			}
1870 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1871 				/*
1872 				 * We can't have a switched order with an
1873 				 * unordered chunk
1874 				 */
1875 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1876 				    tsn);
1877 				goto err_out;
1878 			}
1879 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1880 				/*
1881 				 * We can't have a switched unordered with a
1882 				 * ordered chunk
1883 				 */
1884 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1885 				    tsn);
1886 				goto err_out;
1887 			}
1888 		}
1889 	} else {
1890 		/*
1891 		 * Its a complete segment. Lets validate we don't have a
1892 		 * re-assembly going on with the same Stream/Seq (for
1893 		 * ordered) or in the same Stream for unordered.
1894 		 */
1895 		if (control != NULL) {
1896 			if (ordered || asoc->idata_supported) {
1897 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1898 				    chk_flags, mid);
1899 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1900 				goto err_out;
1901 			} else {
1902 				if ((tsn == control->fsn_included + 1) &&
1903 				    (control->end_added == 0)) {
1904 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1905 					goto err_out;
1906 				} else {
1907 					control = NULL;
1908 				}
1909 			}
1910 		}
1911 	}
1912 	/* now do the tests */
1913 	if (((asoc->cnt_on_all_streams +
1914 	    asoc->cnt_on_reasm_queue +
1915 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1916 	    (((int)asoc->my_rwnd) <= 0)) {
1917 		/*
1918 		 * When we have NO room in the rwnd we check to make sure
1919 		 * the reader is doing its job...
1920 		 */
1921 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1922 			/* some to read, wake-up */
1923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1924 			struct socket *so;
1925 
1926 			so = SCTP_INP_SO(stcb->sctp_ep);
1927 			atomic_add_int(&stcb->asoc.refcnt, 1);
1928 			SCTP_TCB_UNLOCK(stcb);
1929 			SCTP_SOCKET_LOCK(so, 1);
1930 			SCTP_TCB_LOCK(stcb);
1931 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1932 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1933 				/* assoc was freed while we were unlocked */
1934 				SCTP_SOCKET_UNLOCK(so, 1);
1935 				return (0);
1936 			}
1937 #endif
1938 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1939 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1940 			SCTP_SOCKET_UNLOCK(so, 1);
1941 #endif
1942 		}
1943 		/* now is it in the mapping array of what we have accepted? */
1944 		if (chk_type == SCTP_DATA) {
1945 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1946 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1947 				/* Nope not in the valid range dump it */
1948 		dump_packet:
1949 				sctp_set_rwnd(stcb, asoc);
1950 				if ((asoc->cnt_on_all_streams +
1951 				    asoc->cnt_on_reasm_queue +
1952 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1953 					SCTP_STAT_INCR(sctps_datadropchklmt);
1954 				} else {
1955 					SCTP_STAT_INCR(sctps_datadroprwnd);
1956 				}
1957 				*break_flag = 1;
1958 				return (0);
1959 			}
1960 		} else {
1961 			if (control == NULL) {
1962 				goto dump_packet;
1963 			}
1964 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1965 				goto dump_packet;
1966 			}
1967 		}
1968 	}
1969 #ifdef SCTP_ASOCLOG_OF_TSNS
1970 	SCTP_TCB_LOCK_ASSERT(stcb);
1971 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1972 		asoc->tsn_in_at = 0;
1973 		asoc->tsn_in_wrapped = 1;
1974 	}
1975 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1976 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1977 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1978 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1979 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1980 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1981 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1982 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1983 	asoc->tsn_in_at++;
1984 #endif
1985 	/*
1986 	 * Before we continue lets validate that we are not being fooled by
1987 	 * an evil attacker. We can only have Nk chunks based on our TSN
1988 	 * spread allowed by the mapping array N * 8 bits, so there is no
1989 	 * way our stream sequence numbers could have wrapped. We of course
1990 	 * only validate the FIRST fragment so the bit must be set.
1991 	 */
1992 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1993 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1994 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1995 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1996 		/* The incoming sseq is behind where we last delivered? */
1997 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1998 		    mid, asoc->strmin[sid].last_mid_delivered);
1999 
2000 		if (asoc->idata_supported) {
2001 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2002 			    asoc->strmin[sid].last_mid_delivered,
2003 			    tsn,
2004 			    sid,
2005 			    mid);
2006 		} else {
2007 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2008 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2009 			    tsn,
2010 			    sid,
2011 			    (uint16_t)mid);
2012 		}
2013 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2014 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2015 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2016 		*abort_flag = 1;
2017 		return (0);
2018 	}
2019 	if (chk_type == SCTP_IDATA) {
2020 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2021 	} else {
2022 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2023 	}
2024 	if (last_chunk == 0) {
2025 		if (chk_type == SCTP_IDATA) {
2026 			dmbuf = SCTP_M_COPYM(*m,
2027 			    (offset + sizeof(struct sctp_idata_chunk)),
2028 			    the_len, M_NOWAIT);
2029 		} else {
2030 			dmbuf = SCTP_M_COPYM(*m,
2031 			    (offset + sizeof(struct sctp_data_chunk)),
2032 			    the_len, M_NOWAIT);
2033 		}
2034 #ifdef SCTP_MBUF_LOGGING
2035 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2036 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2037 		}
2038 #endif
2039 	} else {
2040 		/* We can steal the last chunk */
2041 		int l_len;
2042 
2043 		dmbuf = *m;
2044 		/* lop off the top part */
2045 		if (chk_type == SCTP_IDATA) {
2046 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2047 		} else {
2048 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2049 		}
2050 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2051 			l_len = SCTP_BUF_LEN(dmbuf);
2052 		} else {
2053 			/*
2054 			 * need to count up the size hopefully does not hit
2055 			 * this to often :-0
2056 			 */
2057 			struct mbuf *lat;
2058 
2059 			l_len = 0;
2060 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2061 				l_len += SCTP_BUF_LEN(lat);
2062 			}
2063 		}
2064 		if (l_len > the_len) {
2065 			/* Trim the end round bytes off  too */
2066 			m_adj(dmbuf, -(l_len - the_len));
2067 		}
2068 	}
2069 	if (dmbuf == NULL) {
2070 		SCTP_STAT_INCR(sctps_nomem);
2071 		return (0);
2072 	}
2073 	/*
2074 	 * Now no matter what, we need a control, get one if we don't have
2075 	 * one (we may have gotten it above when we found the message was
2076 	 * fragmented
2077 	 */
2078 	if (control == NULL) {
2079 		sctp_alloc_a_readq(stcb, control);
2080 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2081 		    ppid,
2082 		    sid,
2083 		    chk_flags,
2084 		    NULL, fsn, mid);
2085 		if (control == NULL) {
2086 			SCTP_STAT_INCR(sctps_nomem);
2087 			return (0);
2088 		}
2089 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2090 			struct mbuf *mm;
2091 
2092 			control->data = dmbuf;
2093 			for (mm = control->data; mm; mm = mm->m_next) {
2094 				control->length += SCTP_BUF_LEN(mm);
2095 			}
2096 			control->tail_mbuf = NULL;
2097 			control->end_added = 1;
2098 			control->last_frag_seen = 1;
2099 			control->first_frag_seen = 1;
2100 			control->fsn_included = fsn;
2101 			control->top_fsn = fsn;
2102 		}
2103 		created_control = 1;
2104 	}
2105 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2106 	    chk_flags, ordered, mid, control);
2107 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2108 	    TAILQ_EMPTY(&asoc->resetHead) &&
2109 	    ((ordered == 0) ||
2110 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2111 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2112 		/* Candidate for express delivery */
2113 		/*
2114 		 * Its not fragmented, No PD-API is up, Nothing in the
2115 		 * delivery queue, Its un-ordered OR ordered and the next to
2116 		 * deliver AND nothing else is stuck on the stream queue,
2117 		 * And there is room for it in the socket buffer. Lets just
2118 		 * stuff it up the buffer....
2119 		 */
2120 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2121 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2122 			asoc->highest_tsn_inside_nr_map = tsn;
2123 		}
2124 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2125 		    control, mid);
2126 
2127 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2128 		    control, &stcb->sctp_socket->so_rcv,
2129 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2130 
2131 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2132 			/* for ordered, bump what we delivered */
2133 			asoc->strmin[sid].last_mid_delivered++;
2134 		}
2135 		SCTP_STAT_INCR(sctps_recvexpress);
2136 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2137 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2138 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2139 		}
2140 		control = NULL;
2141 		goto finish_express_del;
2142 	}
2143 	/* Now will we need a chunk too? */
2144 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2145 		sctp_alloc_a_chunk(stcb, chk);
2146 		if (chk == NULL) {
2147 			/* No memory so we drop the chunk */
2148 			SCTP_STAT_INCR(sctps_nomem);
2149 			if (last_chunk == 0) {
2150 				/* we copied it, free the copy */
2151 				sctp_m_freem(dmbuf);
2152 			}
2153 			return (0);
2154 		}
2155 		chk->rec.data.tsn = tsn;
2156 		chk->no_fr_allowed = 0;
2157 		chk->rec.data.fsn = fsn;
2158 		chk->rec.data.mid = mid;
2159 		chk->rec.data.sid = sid;
2160 		chk->rec.data.ppid = ppid;
2161 		chk->rec.data.context = stcb->asoc.context;
2162 		chk->rec.data.doing_fast_retransmit = 0;
2163 		chk->rec.data.rcv_flags = chk_flags;
2164 		chk->asoc = asoc;
2165 		chk->send_size = the_len;
2166 		chk->whoTo = net;
2167 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2168 		    chk,
2169 		    control, mid);
2170 		atomic_add_int(&net->ref_count, 1);
2171 		chk->data = dmbuf;
2172 	}
2173 	/* Set the appropriate TSN mark */
2174 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2175 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2176 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2177 			asoc->highest_tsn_inside_nr_map = tsn;
2178 		}
2179 	} else {
2180 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2181 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2182 			asoc->highest_tsn_inside_map = tsn;
2183 		}
2184 	}
2185 	/* Now is it complete (i.e. not fragmented)? */
2186 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2187 		/*
2188 		 * Special check for when streams are resetting. We could be
2189 		 * more smart about this and check the actual stream to see
2190 		 * if it is not being reset.. that way we would not create a
2191 		 * HOLB when amongst streams being reset and those not being
2192 		 * reset.
2193 		 *
2194 		 */
2195 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2196 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2197 			/*
2198 			 * yep its past where we need to reset... go ahead
2199 			 * and queue it.
2200 			 */
2201 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2202 				/* first one on */
2203 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2204 			} else {
2205 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2206 				unsigned char inserted = 0;
2207 
2208 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2209 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2210 
2211 						continue;
2212 					} else {
2213 						/* found it */
2214 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2215 						inserted = 1;
2216 						break;
2217 					}
2218 				}
2219 				if (inserted == 0) {
2220 					/*
2221 					 * must be put at end, use prevP
2222 					 * (all setup from loop) to setup
2223 					 * nextP.
2224 					 */
2225 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2226 				}
2227 			}
2228 			goto finish_express_del;
2229 		}
2230 		if (chk_flags & SCTP_DATA_UNORDERED) {
2231 			/* queue directly into socket buffer */
2232 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2233 			    control, mid);
2234 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2235 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2236 			    control,
2237 			    &stcb->sctp_socket->so_rcv, 1,
2238 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2239 
2240 		} else {
2241 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2242 			    mid);
2243 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2244 			if (*abort_flag) {
2245 				if (last_chunk) {
2246 					*m = NULL;
2247 				}
2248 				return (0);
2249 			}
2250 		}
2251 		goto finish_express_del;
2252 	}
2253 	/* If we reach here its a reassembly */
2254 	need_reasm_check = 1;
2255 	SCTPDBG(SCTP_DEBUG_XXX,
2256 	    "Queue data to stream for reasm control: %p MID: %u\n",
2257 	    control, mid);
2258 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2259 	if (*abort_flag) {
2260 		/*
2261 		 * the assoc is now gone and chk was put onto the reasm
2262 		 * queue, which has all been freed.
2263 		 */
2264 		if (last_chunk) {
2265 			*m = NULL;
2266 		}
2267 		return (0);
2268 	}
2269 finish_express_del:
2270 	/* Here we tidy up things */
2271 	if (tsn == (asoc->cumulative_tsn + 1)) {
2272 		/* Update cum-ack */
2273 		asoc->cumulative_tsn = tsn;
2274 	}
2275 	if (last_chunk) {
2276 		*m = NULL;
2277 	}
2278 	if (ordered) {
2279 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2280 	} else {
2281 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2282 	}
2283 	SCTP_STAT_INCR(sctps_recvdata);
2284 	/* Set it present please */
2285 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2286 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2287 	}
2288 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2289 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2290 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2291 	}
2292 	if (need_reasm_check) {
2293 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2294 		need_reasm_check = 0;
2295 	}
2296 	/* check the special flag for stream resets */
2297 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2298 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2299 		/*
2300 		 * we have finished working through the backlogged TSN's now
2301 		 * time to reset streams. 1: call reset function. 2: free
2302 		 * pending_reply space 3: distribute any chunks in
2303 		 * pending_reply_queue.
2304 		 */
2305 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2306 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2307 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2308 		SCTP_FREE(liste, SCTP_M_STRESET);
2309 		/* sa_ignore FREED_MEMORY */
2310 		liste = TAILQ_FIRST(&asoc->resetHead);
2311 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2312 			/* All can be removed */
2313 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2314 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2315 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2316 				if (*abort_flag) {
2317 					return (0);
2318 				}
2319 				if (need_reasm_check) {
2320 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2321 					need_reasm_check = 0;
2322 				}
2323 			}
2324 		} else {
2325 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2326 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2327 					break;
2328 				}
2329 				/*
2330 				 * if control->sinfo_tsn is <= liste->tsn we
2331 				 * can process it which is the NOT of
2332 				 * control->sinfo_tsn > liste->tsn
2333 				 */
2334 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2335 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2336 				if (*abort_flag) {
2337 					return (0);
2338 				}
2339 				if (need_reasm_check) {
2340 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2341 					need_reasm_check = 0;
2342 				}
2343 			}
2344 		}
2345 	}
2346 	return (1);
2347 }
2348 
2349 static const int8_t sctp_map_lookup_tab[256] = {
2350 	0, 1, 0, 2, 0, 1, 0, 3,
2351 	0, 1, 0, 2, 0, 1, 0, 4,
2352 	0, 1, 0, 2, 0, 1, 0, 3,
2353 	0, 1, 0, 2, 0, 1, 0, 5,
2354 	0, 1, 0, 2, 0, 1, 0, 3,
2355 	0, 1, 0, 2, 0, 1, 0, 4,
2356 	0, 1, 0, 2, 0, 1, 0, 3,
2357 	0, 1, 0, 2, 0, 1, 0, 6,
2358 	0, 1, 0, 2, 0, 1, 0, 3,
2359 	0, 1, 0, 2, 0, 1, 0, 4,
2360 	0, 1, 0, 2, 0, 1, 0, 3,
2361 	0, 1, 0, 2, 0, 1, 0, 5,
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 4,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 7,
2366 	0, 1, 0, 2, 0, 1, 0, 3,
2367 	0, 1, 0, 2, 0, 1, 0, 4,
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 5,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 4,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 6,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 4,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 5,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 4,
2380 	0, 1, 0, 2, 0, 1, 0, 3,
2381 	0, 1, 0, 2, 0, 1, 0, 8
2382 };
2383 
2384 
2385 void
2386 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2387 {
2388 	/*
2389 	 * Now we also need to check the mapping array in a couple of ways.
2390 	 * 1) Did we move the cum-ack point?
2391 	 *
2392 	 * When you first glance at this you might think that all entries
2393 	 * that make up the position of the cum-ack would be in the
2394 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2395 	 * deliverable. Thats true with one exception, when its a fragmented
2396 	 * message we may not deliver the data until some threshold (or all
2397 	 * of it) is in place. So we must OR the nr_mapping_array and
2398 	 * mapping_array to get a true picture of the cum-ack.
2399 	 */
2400 	struct sctp_association *asoc;
2401 	int at;
2402 	uint8_t val;
2403 	int slide_from, slide_end, lgap, distance;
2404 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2405 
2406 	asoc = &stcb->asoc;
2407 
2408 	old_cumack = asoc->cumulative_tsn;
2409 	old_base = asoc->mapping_array_base_tsn;
2410 	old_highest = asoc->highest_tsn_inside_map;
2411 	/*
2412 	 * We could probably improve this a small bit by calculating the
2413 	 * offset of the current cum-ack as the starting point.
2414 	 */
2415 	at = 0;
2416 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2417 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2418 		if (val == 0xff) {
2419 			at += 8;
2420 		} else {
2421 			/* there is a 0 bit */
2422 			at += sctp_map_lookup_tab[val];
2423 			break;
2424 		}
2425 	}
2426 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2427 
2428 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2429 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2430 #ifdef INVARIANTS
2431 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2432 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2433 #else
2434 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2435 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2436 		sctp_print_mapping_array(asoc);
2437 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2438 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2439 		}
2440 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2441 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2442 #endif
2443 	}
2444 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2445 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2446 	} else {
2447 		highest_tsn = asoc->highest_tsn_inside_map;
2448 	}
2449 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2450 		/* The complete array was completed by a single FR */
2451 		/* highest becomes the cum-ack */
2452 		int clr;
2453 #ifdef INVARIANTS
2454 		unsigned int i;
2455 #endif
2456 
2457 		/* clear the array */
2458 		clr = ((at + 7) >> 3);
2459 		if (clr > asoc->mapping_array_size) {
2460 			clr = asoc->mapping_array_size;
2461 		}
2462 		memset(asoc->mapping_array, 0, clr);
2463 		memset(asoc->nr_mapping_array, 0, clr);
2464 #ifdef INVARIANTS
2465 		for (i = 0; i < asoc->mapping_array_size; i++) {
2466 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2467 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2468 				sctp_print_mapping_array(asoc);
2469 			}
2470 		}
2471 #endif
2472 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2473 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2474 	} else if (at >= 8) {
2475 		/* we can slide the mapping array down */
2476 		/* slide_from holds where we hit the first NON 0xff byte */
2477 
2478 		/*
2479 		 * now calculate the ceiling of the move using our highest
2480 		 * TSN value
2481 		 */
2482 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2483 		slide_end = (lgap >> 3);
2484 		if (slide_end < slide_from) {
2485 			sctp_print_mapping_array(asoc);
2486 #ifdef INVARIANTS
2487 			panic("impossible slide");
2488 #else
2489 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2490 			    lgap, slide_end, slide_from, at);
2491 			return;
2492 #endif
2493 		}
2494 		if (slide_end > asoc->mapping_array_size) {
2495 #ifdef INVARIANTS
2496 			panic("would overrun buffer");
2497 #else
2498 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2499 			    asoc->mapping_array_size, slide_end);
2500 			slide_end = asoc->mapping_array_size;
2501 #endif
2502 		}
2503 		distance = (slide_end - slide_from) + 1;
2504 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2505 			sctp_log_map(old_base, old_cumack, old_highest,
2506 			    SCTP_MAP_PREPARE_SLIDE);
2507 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2508 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2509 		}
2510 		if (distance + slide_from > asoc->mapping_array_size ||
2511 		    distance < 0) {
2512 			/*
2513 			 * Here we do NOT slide forward the array so that
2514 			 * hopefully when more data comes in to fill it up
2515 			 * we will be able to slide it forward. Really I
2516 			 * don't think this should happen :-0
2517 			 */
2518 
2519 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2521 				    (uint32_t)asoc->mapping_array_size,
2522 				    SCTP_MAP_SLIDE_NONE);
2523 			}
2524 		} else {
2525 			int ii;
2526 
2527 			for (ii = 0; ii < distance; ii++) {
2528 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2529 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2530 
2531 			}
2532 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2533 				asoc->mapping_array[ii] = 0;
2534 				asoc->nr_mapping_array[ii] = 0;
2535 			}
2536 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2537 				asoc->highest_tsn_inside_map += (slide_from << 3);
2538 			}
2539 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2540 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2541 			}
2542 			asoc->mapping_array_base_tsn += (slide_from << 3);
2543 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2544 				sctp_log_map(asoc->mapping_array_base_tsn,
2545 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2546 				    SCTP_MAP_SLIDE_RESULT);
2547 			}
2548 		}
2549 	}
2550 }
2551 
2552 void
2553 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2554 {
2555 	struct sctp_association *asoc;
2556 	uint32_t highest_tsn;
2557 	int is_a_gap;
2558 
2559 	sctp_slide_mapping_arrays(stcb);
2560 	asoc = &stcb->asoc;
2561 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2562 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2563 	} else {
2564 		highest_tsn = asoc->highest_tsn_inside_map;
2565 	}
2566 	/* Is there a gap now? */
2567 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2568 
2569 	/*
2570 	 * Now we need to see if we need to queue a sack or just start the
2571 	 * timer (if allowed).
2572 	 */
2573 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2574 		/*
2575 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2576 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2577 		 * SACK
2578 		 */
2579 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2580 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2581 			    stcb->sctp_ep, stcb, NULL,
2582 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2583 		}
2584 		sctp_send_shutdown(stcb,
2585 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2586 		if (is_a_gap) {
2587 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2588 		}
2589 	} else {
2590 		/*
2591 		 * CMT DAC algorithm: increase number of packets received
2592 		 * since last ack
2593 		 */
2594 		stcb->asoc.cmt_dac_pkts_rcvd++;
2595 
2596 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2597 							 * SACK */
2598 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2599 							 * longer is one */
2600 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2601 		    (is_a_gap) ||	/* is still a gap */
2602 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2603 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2604 		    ) {
2605 
2606 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2607 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2608 			    (stcb->asoc.send_sack == 0) &&
2609 			    (stcb->asoc.numduptsns == 0) &&
2610 			    (stcb->asoc.delayed_ack) &&
2611 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2612 
2613 				/*
2614 				 * CMT DAC algorithm: With CMT, delay acks
2615 				 * even in the face of
2616 				 *
2617 				 * reordering. Therefore, if acks that do
2618 				 * not have to be sent because of the above
2619 				 * reasons, will be delayed. That is, acks
2620 				 * that would have been sent due to gap
2621 				 * reports will be delayed with DAC. Start
2622 				 * the delayed ack timer.
2623 				 */
2624 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2625 				    stcb->sctp_ep, stcb, NULL);
2626 			} else {
2627 				/*
2628 				 * Ok we must build a SACK since the timer
2629 				 * is pending, we got our first packet OR
2630 				 * there are gaps or duplicates.
2631 				 */
2632 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2633 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2634 			}
2635 		} else {
2636 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2637 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2638 				    stcb->sctp_ep, stcb, NULL);
2639 			}
2640 		}
2641 	}
2642 }
2643 
2644 int
2645 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2646     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2647     struct sctp_nets *net, uint32_t *high_tsn)
2648 {
2649 	struct sctp_chunkhdr *ch, chunk_buf;
2650 	struct sctp_association *asoc;
2651 	int num_chunks = 0;	/* number of control chunks processed */
2652 	int stop_proc = 0;
2653 	int break_flag, last_chunk;
2654 	int abort_flag = 0, was_a_gap;
2655 	struct mbuf *m;
2656 	uint32_t highest_tsn;
2657 	uint16_t chk_length;
2658 
2659 	/* set the rwnd */
2660 	sctp_set_rwnd(stcb, &stcb->asoc);
2661 
2662 	m = *mm;
2663 	SCTP_TCB_LOCK_ASSERT(stcb);
2664 	asoc = &stcb->asoc;
2665 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2666 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2667 	} else {
2668 		highest_tsn = asoc->highest_tsn_inside_map;
2669 	}
2670 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2671 	/*
2672 	 * setup where we got the last DATA packet from for any SACK that
2673 	 * may need to go out. Don't bump the net. This is done ONLY when a
2674 	 * chunk is assigned.
2675 	 */
2676 	asoc->last_data_chunk_from = net;
2677 
2678 	/*-
2679 	 * Now before we proceed we must figure out if this is a wasted
2680 	 * cluster... i.e. it is a small packet sent in and yet the driver
2681 	 * underneath allocated a full cluster for it. If so we must copy it
2682 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2683 	 * with cluster starvation. Note for __Panda__ we don't do this
2684 	 * since it has clusters all the way down to 64 bytes.
2685 	 */
2686 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2687 		/* we only handle mbufs that are singletons.. not chains */
2688 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2689 		if (m) {
2690 			/* ok lets see if we can copy the data up */
2691 			caddr_t *from, *to;
2692 
2693 			/* get the pointers and copy */
2694 			to = mtod(m, caddr_t *);
2695 			from = mtod((*mm), caddr_t *);
2696 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2697 			/* copy the length and free up the old */
2698 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2699 			sctp_m_freem(*mm);
2700 			/* success, back copy */
2701 			*mm = m;
2702 		} else {
2703 			/* We are in trouble in the mbuf world .. yikes */
2704 			m = *mm;
2705 		}
2706 	}
2707 	/* get pointer to the first chunk header */
2708 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2709 	    sizeof(struct sctp_chunkhdr),
2710 	    (uint8_t *)&chunk_buf);
2711 	if (ch == NULL) {
2712 		return (1);
2713 	}
2714 	/*
2715 	 * process all DATA chunks...
2716 	 */
2717 	*high_tsn = asoc->cumulative_tsn;
2718 	break_flag = 0;
2719 	asoc->data_pkts_seen++;
2720 	while (stop_proc == 0) {
2721 		/* validate chunk length */
2722 		chk_length = ntohs(ch->chunk_length);
2723 		if (length - *offset < chk_length) {
2724 			/* all done, mutulated chunk */
2725 			stop_proc = 1;
2726 			continue;
2727 		}
2728 		if ((asoc->idata_supported == 1) &&
2729 		    (ch->chunk_type == SCTP_DATA)) {
2730 			struct mbuf *op_err;
2731 			char msg[SCTP_DIAG_INFO_LEN];
2732 
2733 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2734 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2735 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2736 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2737 			return (2);
2738 		}
2739 		if ((asoc->idata_supported == 0) &&
2740 		    (ch->chunk_type == SCTP_IDATA)) {
2741 			struct mbuf *op_err;
2742 			char msg[SCTP_DIAG_INFO_LEN];
2743 
2744 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2745 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2746 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2747 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2748 			return (2);
2749 		}
2750 		if ((ch->chunk_type == SCTP_DATA) ||
2751 		    (ch->chunk_type == SCTP_IDATA)) {
2752 			uint16_t clen;
2753 
2754 			if (ch->chunk_type == SCTP_DATA) {
2755 				clen = sizeof(struct sctp_data_chunk);
2756 			} else {
2757 				clen = sizeof(struct sctp_idata_chunk);
2758 			}
2759 			if (chk_length < clen) {
2760 				/*
2761 				 * Need to send an abort since we had a
2762 				 * invalid data chunk.
2763 				 */
2764 				struct mbuf *op_err;
2765 				char msg[SCTP_DIAG_INFO_LEN];
2766 
2767 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2768 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2769 				    chk_length);
2770 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2771 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2772 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2773 				return (2);
2774 			}
2775 #ifdef SCTP_AUDITING_ENABLED
2776 			sctp_audit_log(0xB1, 0);
2777 #endif
2778 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2779 				last_chunk = 1;
2780 			} else {
2781 				last_chunk = 0;
2782 			}
2783 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2784 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2785 			    last_chunk, ch->chunk_type)) {
2786 				num_chunks++;
2787 			}
2788 			if (abort_flag)
2789 				return (2);
2790 
2791 			if (break_flag) {
2792 				/*
2793 				 * Set because of out of rwnd space and no
2794 				 * drop rep space left.
2795 				 */
2796 				stop_proc = 1;
2797 				continue;
2798 			}
2799 		} else {
2800 			/* not a data chunk in the data region */
2801 			switch (ch->chunk_type) {
2802 			case SCTP_INITIATION:
2803 			case SCTP_INITIATION_ACK:
2804 			case SCTP_SELECTIVE_ACK:
2805 			case SCTP_NR_SELECTIVE_ACK:
2806 			case SCTP_HEARTBEAT_REQUEST:
2807 			case SCTP_HEARTBEAT_ACK:
2808 			case SCTP_ABORT_ASSOCIATION:
2809 			case SCTP_SHUTDOWN:
2810 			case SCTP_SHUTDOWN_ACK:
2811 			case SCTP_OPERATION_ERROR:
2812 			case SCTP_COOKIE_ECHO:
2813 			case SCTP_COOKIE_ACK:
2814 			case SCTP_ECN_ECHO:
2815 			case SCTP_ECN_CWR:
2816 			case SCTP_SHUTDOWN_COMPLETE:
2817 			case SCTP_AUTHENTICATION:
2818 			case SCTP_ASCONF_ACK:
2819 			case SCTP_PACKET_DROPPED:
2820 			case SCTP_STREAM_RESET:
2821 			case SCTP_FORWARD_CUM_TSN:
2822 			case SCTP_ASCONF:
2823 				{
2824 					/*
2825 					 * Now, what do we do with KNOWN
2826 					 * chunks that are NOT in the right
2827 					 * place?
2828 					 *
2829 					 * For now, I do nothing but ignore
2830 					 * them. We may later want to add
2831 					 * sysctl stuff to switch out and do
2832 					 * either an ABORT() or possibly
2833 					 * process them.
2834 					 */
2835 					struct mbuf *op_err;
2836 					char msg[SCTP_DIAG_INFO_LEN];
2837 
2838 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2839 					    ch->chunk_type);
2840 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2841 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2842 					return (2);
2843 				}
2844 			default:
2845 				/*
2846 				 * Unknown chunk type: use bit rules after
2847 				 * checking length
2848 				 */
2849 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2850 					/*
2851 					 * Need to send an abort since we
2852 					 * had a invalid chunk.
2853 					 */
2854 					struct mbuf *op_err;
2855 					char msg[SCTP_DIAG_INFO_LEN];
2856 
2857 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2858 					    chk_length);
2859 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2860 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2861 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2862 					return (2);
2863 				}
2864 				if (ch->chunk_type & 0x40) {
2865 					/* Add a error report to the queue */
2866 					struct mbuf *op_err;
2867 					struct sctp_gen_error_cause *cause;
2868 
2869 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2870 					    0, M_NOWAIT, 1, MT_DATA);
2871 					if (op_err != NULL) {
2872 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2873 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2874 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2875 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2876 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2877 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2878 							sctp_queue_op_err(stcb, op_err);
2879 						} else {
2880 							sctp_m_freem(op_err);
2881 						}
2882 					}
2883 				}
2884 				if ((ch->chunk_type & 0x80) == 0) {
2885 					/* discard the rest of this packet */
2886 					stop_proc = 1;
2887 				}	/* else skip this bad chunk and
2888 					 * continue... */
2889 				break;
2890 			}	/* switch of chunk type */
2891 		}
2892 		*offset += SCTP_SIZE32(chk_length);
2893 		if ((*offset >= length) || stop_proc) {
2894 			/* no more data left in the mbuf chain */
2895 			stop_proc = 1;
2896 			continue;
2897 		}
2898 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2899 		    sizeof(struct sctp_chunkhdr),
2900 		    (uint8_t *)&chunk_buf);
2901 		if (ch == NULL) {
2902 			*offset = length;
2903 			stop_proc = 1;
2904 			continue;
2905 		}
2906 	}
2907 	if (break_flag) {
2908 		/*
2909 		 * we need to report rwnd overrun drops.
2910 		 */
2911 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2912 	}
2913 	if (num_chunks) {
2914 		/*
2915 		 * Did we get data, if so update the time for auto-close and
2916 		 * give peer credit for being alive.
2917 		 */
2918 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2919 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2920 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2921 			    stcb->asoc.overall_error_count,
2922 			    0,
2923 			    SCTP_FROM_SCTP_INDATA,
2924 			    __LINE__);
2925 		}
2926 		stcb->asoc.overall_error_count = 0;
2927 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2928 	}
2929 	/* now service all of the reassm queue if needed */
2930 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2931 		/* Assure that we ack right away */
2932 		stcb->asoc.send_sack = 1;
2933 	}
2934 	/* Start a sack timer or QUEUE a SACK for sending */
2935 	sctp_sack_check(stcb, was_a_gap);
2936 	return (0);
2937 }
2938 
2939 static int
2940 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2941     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2942     int *num_frs,
2943     uint32_t *biggest_newly_acked_tsn,
2944     uint32_t *this_sack_lowest_newack,
2945     int *rto_ok)
2946 {
2947 	struct sctp_tmit_chunk *tp1;
2948 	unsigned int theTSN;
2949 	int j, wake_him = 0, circled = 0;
2950 
2951 	/* Recover the tp1 we last saw */
2952 	tp1 = *p_tp1;
2953 	if (tp1 == NULL) {
2954 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2955 	}
2956 	for (j = frag_strt; j <= frag_end; j++) {
2957 		theTSN = j + last_tsn;
2958 		while (tp1) {
2959 			if (tp1->rec.data.doing_fast_retransmit)
2960 				(*num_frs) += 1;
2961 
2962 			/*-
2963 			 * CMT: CUCv2 algorithm. For each TSN being
2964 			 * processed from the sent queue, track the
2965 			 * next expected pseudo-cumack, or
2966 			 * rtx_pseudo_cumack, if required. Separate
2967 			 * cumack trackers for first transmissions,
2968 			 * and retransmissions.
2969 			 */
2970 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2971 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2972 			    (tp1->snd_count == 1)) {
2973 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2974 				tp1->whoTo->find_pseudo_cumack = 0;
2975 			}
2976 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2977 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2978 			    (tp1->snd_count > 1)) {
2979 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2980 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2981 			}
2982 			if (tp1->rec.data.tsn == theTSN) {
2983 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2984 					/*-
2985 					 * must be held until
2986 					 * cum-ack passes
2987 					 */
2988 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2989 						/*-
2990 						 * If it is less than RESEND, it is
2991 						 * now no-longer in flight.
2992 						 * Higher values may already be set
2993 						 * via previous Gap Ack Blocks...
2994 						 * i.e. ACKED or RESEND.
2995 						 */
2996 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2997 						    *biggest_newly_acked_tsn)) {
2998 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2999 						}
3000 						/*-
3001 						 * CMT: SFR algo (and HTNA) - set
3002 						 * saw_newack to 1 for dest being
3003 						 * newly acked. update
3004 						 * this_sack_highest_newack if
3005 						 * appropriate.
3006 						 */
3007 						if (tp1->rec.data.chunk_was_revoked == 0)
3008 							tp1->whoTo->saw_newack = 1;
3009 
3010 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3011 						    tp1->whoTo->this_sack_highest_newack)) {
3012 							tp1->whoTo->this_sack_highest_newack =
3013 							    tp1->rec.data.tsn;
3014 						}
3015 						/*-
3016 						 * CMT DAC algo: also update
3017 						 * this_sack_lowest_newack
3018 						 */
3019 						if (*this_sack_lowest_newack == 0) {
3020 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3021 								sctp_log_sack(*this_sack_lowest_newack,
3022 								    last_tsn,
3023 								    tp1->rec.data.tsn,
3024 								    0,
3025 								    0,
3026 								    SCTP_LOG_TSN_ACKED);
3027 							}
3028 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3029 						}
3030 						/*-
3031 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3032 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3033 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3034 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3035 						 * Separate pseudo_cumack trackers for first transmissions and
3036 						 * retransmissions.
3037 						 */
3038 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3039 							if (tp1->rec.data.chunk_was_revoked == 0) {
3040 								tp1->whoTo->new_pseudo_cumack = 1;
3041 							}
3042 							tp1->whoTo->find_pseudo_cumack = 1;
3043 						}
3044 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3045 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3046 						}
3047 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3048 							if (tp1->rec.data.chunk_was_revoked == 0) {
3049 								tp1->whoTo->new_pseudo_cumack = 1;
3050 							}
3051 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3052 						}
3053 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3054 							sctp_log_sack(*biggest_newly_acked_tsn,
3055 							    last_tsn,
3056 							    tp1->rec.data.tsn,
3057 							    frag_strt,
3058 							    frag_end,
3059 							    SCTP_LOG_TSN_ACKED);
3060 						}
3061 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3062 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3063 							    tp1->whoTo->flight_size,
3064 							    tp1->book_size,
3065 							    (uint32_t)(uintptr_t)tp1->whoTo,
3066 							    tp1->rec.data.tsn);
3067 						}
3068 						sctp_flight_size_decrease(tp1);
3069 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3070 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3071 							    tp1);
3072 						}
3073 						sctp_total_flight_decrease(stcb, tp1);
3074 
3075 						tp1->whoTo->net_ack += tp1->send_size;
3076 						if (tp1->snd_count < 2) {
3077 							/*-
3078 							 * True non-retransmited chunk
3079 							 */
3080 							tp1->whoTo->net_ack2 += tp1->send_size;
3081 
3082 							/*-
3083 							 * update RTO too ?
3084 							 */
3085 							if (tp1->do_rtt) {
3086 								if (*rto_ok) {
3087 									tp1->whoTo->RTO =
3088 									    sctp_calculate_rto(stcb,
3089 									    &stcb->asoc,
3090 									    tp1->whoTo,
3091 									    &tp1->sent_rcv_time,
3092 									    SCTP_RTT_FROM_DATA);
3093 									*rto_ok = 0;
3094 								}
3095 								if (tp1->whoTo->rto_needed == 0) {
3096 									tp1->whoTo->rto_needed = 1;
3097 								}
3098 								tp1->do_rtt = 0;
3099 							}
3100 						}
3101 					}
3102 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3103 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3104 						    stcb->asoc.this_sack_highest_gap)) {
3105 							stcb->asoc.this_sack_highest_gap =
3106 							    tp1->rec.data.tsn;
3107 						}
3108 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3109 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3110 #ifdef SCTP_AUDITING_ENABLED
3111 							sctp_audit_log(0xB2,
3112 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3113 #endif
3114 						}
3115 					}
3116 					/*-
3117 					 * All chunks NOT UNSENT fall through here and are marked
3118 					 * (leave PR-SCTP ones that are to skip alone though)
3119 					 */
3120 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3121 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3122 						tp1->sent = SCTP_DATAGRAM_MARKED;
3123 					}
3124 					if (tp1->rec.data.chunk_was_revoked) {
3125 						/* deflate the cwnd */
3126 						tp1->whoTo->cwnd -= tp1->book_size;
3127 						tp1->rec.data.chunk_was_revoked = 0;
3128 					}
3129 					/* NR Sack code here */
3130 					if (nr_sacking &&
3131 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3132 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3133 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3134 #ifdef INVARIANTS
3135 						} else {
3136 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3137 #endif
3138 						}
3139 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3140 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3141 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3142 							stcb->asoc.trigger_reset = 1;
3143 						}
3144 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3145 						if (tp1->data) {
3146 							/*
3147 							 * sa_ignore
3148 							 * NO_NULL_CHK
3149 							 */
3150 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3151 							sctp_m_freem(tp1->data);
3152 							tp1->data = NULL;
3153 						}
3154 						wake_him++;
3155 					}
3156 				}
3157 				break;
3158 			}	/* if (tp1->tsn == theTSN) */
3159 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3160 				break;
3161 			}
3162 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3163 			if ((tp1 == NULL) && (circled == 0)) {
3164 				circled++;
3165 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3166 			}
3167 		}		/* end while (tp1) */
3168 		if (tp1 == NULL) {
3169 			circled = 0;
3170 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3171 		}
3172 		/* In case the fragments were not in order we must reset */
3173 	}			/* end for (j = fragStart */
3174 	*p_tp1 = tp1;
3175 	return (wake_him);	/* Return value only used for nr-sack */
3176 }
3177 
3178 
3179 static int
3180 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3181     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3182     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3183     int num_seg, int num_nr_seg, int *rto_ok)
3184 {
3185 	struct sctp_gap_ack_block *frag, block;
3186 	struct sctp_tmit_chunk *tp1;
3187 	int i;
3188 	int num_frs = 0;
3189 	int chunk_freed;
3190 	int non_revocable;
3191 	uint16_t frag_strt, frag_end, prev_frag_end;
3192 
3193 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3194 	prev_frag_end = 0;
3195 	chunk_freed = 0;
3196 
3197 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3198 		if (i == num_seg) {
3199 			prev_frag_end = 0;
3200 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3201 		}
3202 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3203 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3204 		*offset += sizeof(block);
3205 		if (frag == NULL) {
3206 			return (chunk_freed);
3207 		}
3208 		frag_strt = ntohs(frag->start);
3209 		frag_end = ntohs(frag->end);
3210 
3211 		if (frag_strt > frag_end) {
3212 			/* This gap report is malformed, skip it. */
3213 			continue;
3214 		}
3215 		if (frag_strt <= prev_frag_end) {
3216 			/* This gap report is not in order, so restart. */
3217 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 		}
3219 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3220 			*biggest_tsn_acked = last_tsn + frag_end;
3221 		}
3222 		if (i < num_seg) {
3223 			non_revocable = 0;
3224 		} else {
3225 			non_revocable = 1;
3226 		}
3227 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3228 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3229 		    this_sack_lowest_newack, rto_ok)) {
3230 			chunk_freed = 1;
3231 		}
3232 		prev_frag_end = frag_end;
3233 	}
3234 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3235 		if (num_frs)
3236 			sctp_log_fr(*biggest_tsn_acked,
3237 			    *biggest_newly_acked_tsn,
3238 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3239 	}
3240 	return (chunk_freed);
3241 }
3242 
3243 static void
3244 sctp_check_for_revoked(struct sctp_tcb *stcb,
3245     struct sctp_association *asoc, uint32_t cumack,
3246     uint32_t biggest_tsn_acked)
3247 {
3248 	struct sctp_tmit_chunk *tp1;
3249 
3250 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3251 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3252 			/*
3253 			 * ok this guy is either ACK or MARKED. If it is
3254 			 * ACKED it has been previously acked but not this
3255 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3256 			 * again.
3257 			 */
3258 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3259 				break;
3260 			}
3261 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3262 				/* it has been revoked */
3263 				tp1->sent = SCTP_DATAGRAM_SENT;
3264 				tp1->rec.data.chunk_was_revoked = 1;
3265 				/*
3266 				 * We must add this stuff back in to assure
3267 				 * timers and such get started.
3268 				 */
3269 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3270 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3271 					    tp1->whoTo->flight_size,
3272 					    tp1->book_size,
3273 					    (uint32_t)(uintptr_t)tp1->whoTo,
3274 					    tp1->rec.data.tsn);
3275 				}
3276 				sctp_flight_size_increase(tp1);
3277 				sctp_total_flight_increase(stcb, tp1);
3278 				/*
3279 				 * We inflate the cwnd to compensate for our
3280 				 * artificial inflation of the flight_size.
3281 				 */
3282 				tp1->whoTo->cwnd += tp1->book_size;
3283 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3284 					sctp_log_sack(asoc->last_acked_seq,
3285 					    cumack,
3286 					    tp1->rec.data.tsn,
3287 					    0,
3288 					    0,
3289 					    SCTP_LOG_TSN_REVOKED);
3290 				}
3291 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3292 				/* it has been re-acked in this SACK */
3293 				tp1->sent = SCTP_DATAGRAM_ACKED;
3294 			}
3295 		}
3296 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3297 			break;
3298 	}
3299 }
3300 
3301 
3302 static void
3303 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3304     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3305 {
3306 	struct sctp_tmit_chunk *tp1;
3307 	int strike_flag = 0;
3308 	struct timeval now;
3309 	int tot_retrans = 0;
3310 	uint32_t sending_seq;
3311 	struct sctp_nets *net;
3312 	int num_dests_sacked = 0;
3313 
3314 	/*
3315 	 * select the sending_seq, this is either the next thing ready to be
3316 	 * sent but not transmitted, OR, the next seq we assign.
3317 	 */
3318 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3319 	if (tp1 == NULL) {
3320 		sending_seq = asoc->sending_seq;
3321 	} else {
3322 		sending_seq = tp1->rec.data.tsn;
3323 	}
3324 
3325 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3326 	if ((asoc->sctp_cmt_on_off > 0) &&
3327 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3328 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3329 			if (net->saw_newack)
3330 				num_dests_sacked++;
3331 		}
3332 	}
3333 	if (stcb->asoc.prsctp_supported) {
3334 		(void)SCTP_GETTIME_TIMEVAL(&now);
3335 	}
3336 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3337 		strike_flag = 0;
3338 		if (tp1->no_fr_allowed) {
3339 			/* this one had a timeout or something */
3340 			continue;
3341 		}
3342 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3343 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3344 				sctp_log_fr(biggest_tsn_newly_acked,
3345 				    tp1->rec.data.tsn,
3346 				    tp1->sent,
3347 				    SCTP_FR_LOG_CHECK_STRIKE);
3348 		}
3349 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3350 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3351 			/* done */
3352 			break;
3353 		}
3354 		if (stcb->asoc.prsctp_supported) {
3355 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3356 				/* Is it expired? */
3357 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3358 					/* Yes so drop it */
3359 					if (tp1->data != NULL) {
3360 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3361 						    SCTP_SO_NOT_LOCKED);
3362 					}
3363 					continue;
3364 				}
3365 			}
3366 		}
3367 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3368 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3369 			/* we are beyond the tsn in the sack  */
3370 			break;
3371 		}
3372 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3373 			/* either a RESEND, ACKED, or MARKED */
3374 			/* skip */
3375 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3376 				/* Continue strikin FWD-TSN chunks */
3377 				tp1->rec.data.fwd_tsn_cnt++;
3378 			}
3379 			continue;
3380 		}
3381 		/*
3382 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3383 		 */
3384 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3385 			/*
3386 			 * No new acks were receieved for data sent to this
3387 			 * dest. Therefore, according to the SFR algo for
3388 			 * CMT, no data sent to this dest can be marked for
3389 			 * FR using this SACK.
3390 			 */
3391 			continue;
3392 		} else if (tp1->whoTo &&
3393 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3394 			    tp1->whoTo->this_sack_highest_newack) &&
3395 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3396 			/*
3397 			 * CMT: New acks were receieved for data sent to
3398 			 * this dest. But no new acks were seen for data
3399 			 * sent after tp1. Therefore, according to the SFR
3400 			 * algo for CMT, tp1 cannot be marked for FR using
3401 			 * this SACK. This step covers part of the DAC algo
3402 			 * and the HTNA algo as well.
3403 			 */
3404 			continue;
3405 		}
3406 		/*
3407 		 * Here we check to see if we were have already done a FR
3408 		 * and if so we see if the biggest TSN we saw in the sack is
3409 		 * smaller than the recovery point. If so we don't strike
3410 		 * the tsn... otherwise we CAN strike the TSN.
3411 		 */
3412 		/*
3413 		 * @@@ JRI: Check for CMT if (accum_moved &&
3414 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3415 		 * 0)) {
3416 		 */
3417 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3418 			/*
3419 			 * Strike the TSN if in fast-recovery and cum-ack
3420 			 * moved.
3421 			 */
3422 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3423 				sctp_log_fr(biggest_tsn_newly_acked,
3424 				    tp1->rec.data.tsn,
3425 				    tp1->sent,
3426 				    SCTP_FR_LOG_STRIKE_CHUNK);
3427 			}
3428 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3429 				tp1->sent++;
3430 			}
3431 			if ((asoc->sctp_cmt_on_off > 0) &&
3432 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3433 				/*
3434 				 * CMT DAC algorithm: If SACK flag is set to
3435 				 * 0, then lowest_newack test will not pass
3436 				 * because it would have been set to the
3437 				 * cumack earlier. If not already to be
3438 				 * rtx'd, If not a mixed sack and if tp1 is
3439 				 * not between two sacked TSNs, then mark by
3440 				 * one more. NOTE that we are marking by one
3441 				 * additional time since the SACK DAC flag
3442 				 * indicates that two packets have been
3443 				 * received after this missing TSN.
3444 				 */
3445 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3446 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3447 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3448 						sctp_log_fr(16 + num_dests_sacked,
3449 						    tp1->rec.data.tsn,
3450 						    tp1->sent,
3451 						    SCTP_FR_LOG_STRIKE_CHUNK);
3452 					}
3453 					tp1->sent++;
3454 				}
3455 			}
3456 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3457 		    (asoc->sctp_cmt_on_off == 0)) {
3458 			/*
3459 			 * For those that have done a FR we must take
3460 			 * special consideration if we strike. I.e the
3461 			 * biggest_newly_acked must be higher than the
3462 			 * sending_seq at the time we did the FR.
3463 			 */
3464 			if (
3465 #ifdef SCTP_FR_TO_ALTERNATE
3466 			/*
3467 			 * If FR's go to new networks, then we must only do
3468 			 * this for singly homed asoc's. However if the FR's
3469 			 * go to the same network (Armando's work) then its
3470 			 * ok to FR multiple times.
3471 			 */
3472 			    (asoc->numnets < 2)
3473 #else
3474 			    (1)
3475 #endif
3476 			    ) {
3477 
3478 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3479 				    tp1->rec.data.fast_retran_tsn)) {
3480 					/*
3481 					 * Strike the TSN, since this ack is
3482 					 * beyond where things were when we
3483 					 * did a FR.
3484 					 */
3485 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3486 						sctp_log_fr(biggest_tsn_newly_acked,
3487 						    tp1->rec.data.tsn,
3488 						    tp1->sent,
3489 						    SCTP_FR_LOG_STRIKE_CHUNK);
3490 					}
3491 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3492 						tp1->sent++;
3493 					}
3494 					strike_flag = 1;
3495 					if ((asoc->sctp_cmt_on_off > 0) &&
3496 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3497 						/*
3498 						 * CMT DAC algorithm: If
3499 						 * SACK flag is set to 0,
3500 						 * then lowest_newack test
3501 						 * will not pass because it
3502 						 * would have been set to
3503 						 * the cumack earlier. If
3504 						 * not already to be rtx'd,
3505 						 * If not a mixed sack and
3506 						 * if tp1 is not between two
3507 						 * sacked TSNs, then mark by
3508 						 * one more. NOTE that we
3509 						 * are marking by one
3510 						 * additional time since the
3511 						 * SACK DAC flag indicates
3512 						 * that two packets have
3513 						 * been received after this
3514 						 * missing TSN.
3515 						 */
3516 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3517 						    (num_dests_sacked == 1) &&
3518 						    SCTP_TSN_GT(this_sack_lowest_newack,
3519 						    tp1->rec.data.tsn)) {
3520 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3521 								sctp_log_fr(32 + num_dests_sacked,
3522 								    tp1->rec.data.tsn,
3523 								    tp1->sent,
3524 								    SCTP_FR_LOG_STRIKE_CHUNK);
3525 							}
3526 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3527 								tp1->sent++;
3528 							}
3529 						}
3530 					}
3531 				}
3532 			}
3533 			/*
3534 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3535 			 * algo covers HTNA.
3536 			 */
3537 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3538 		    biggest_tsn_newly_acked)) {
3539 			/*
3540 			 * We don't strike these: This is the  HTNA
3541 			 * algorithm i.e. we don't strike If our TSN is
3542 			 * larger than the Highest TSN Newly Acked.
3543 			 */
3544 			;
3545 		} else {
3546 			/* Strike the TSN */
3547 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3548 				sctp_log_fr(biggest_tsn_newly_acked,
3549 				    tp1->rec.data.tsn,
3550 				    tp1->sent,
3551 				    SCTP_FR_LOG_STRIKE_CHUNK);
3552 			}
3553 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3554 				tp1->sent++;
3555 			}
3556 			if ((asoc->sctp_cmt_on_off > 0) &&
3557 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3558 				/*
3559 				 * CMT DAC algorithm: If SACK flag is set to
3560 				 * 0, then lowest_newack test will not pass
3561 				 * because it would have been set to the
3562 				 * cumack earlier. If not already to be
3563 				 * rtx'd, If not a mixed sack and if tp1 is
3564 				 * not between two sacked TSNs, then mark by
3565 				 * one more. NOTE that we are marking by one
3566 				 * additional time since the SACK DAC flag
3567 				 * indicates that two packets have been
3568 				 * received after this missing TSN.
3569 				 */
3570 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3571 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3572 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3573 						sctp_log_fr(48 + num_dests_sacked,
3574 						    tp1->rec.data.tsn,
3575 						    tp1->sent,
3576 						    SCTP_FR_LOG_STRIKE_CHUNK);
3577 					}
3578 					tp1->sent++;
3579 				}
3580 			}
3581 		}
3582 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3583 			struct sctp_nets *alt;
3584 
3585 			/* fix counts and things */
3586 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3587 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3588 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3589 				    tp1->book_size,
3590 				    (uint32_t)(uintptr_t)tp1->whoTo,
3591 				    tp1->rec.data.tsn);
3592 			}
3593 			if (tp1->whoTo) {
3594 				tp1->whoTo->net_ack++;
3595 				sctp_flight_size_decrease(tp1);
3596 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3597 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3598 					    tp1);
3599 				}
3600 			}
3601 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3602 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3603 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3604 			}
3605 			/* add back to the rwnd */
3606 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3607 
3608 			/* remove from the total flight */
3609 			sctp_total_flight_decrease(stcb, tp1);
3610 
3611 			if ((stcb->asoc.prsctp_supported) &&
3612 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3613 				/*
3614 				 * Has it been retransmitted tv_sec times? -
3615 				 * we store the retran count there.
3616 				 */
3617 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3618 					/* Yes, so drop it */
3619 					if (tp1->data != NULL) {
3620 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3621 						    SCTP_SO_NOT_LOCKED);
3622 					}
3623 					/* Make sure to flag we had a FR */
3624 					tp1->whoTo->net_ack++;
3625 					continue;
3626 				}
3627 			}
3628 			/*
3629 			 * SCTP_PRINTF("OK, we are now ready to FR this
3630 			 * guy\n");
3631 			 */
3632 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3633 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3634 				    0, SCTP_FR_MARKED);
3635 			}
3636 			if (strike_flag) {
3637 				/* This is a subsequent FR */
3638 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3639 			}
3640 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3641 			if (asoc->sctp_cmt_on_off > 0) {
3642 				/*
3643 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3644 				 * If CMT is being used, then pick dest with
3645 				 * largest ssthresh for any retransmission.
3646 				 */
3647 				tp1->no_fr_allowed = 1;
3648 				alt = tp1->whoTo;
3649 				/* sa_ignore NO_NULL_CHK */
3650 				if (asoc->sctp_cmt_pf > 0) {
3651 					/*
3652 					 * JRS 5/18/07 - If CMT PF is on,
3653 					 * use the PF version of
3654 					 * find_alt_net()
3655 					 */
3656 					alt = sctp_find_alternate_net(stcb, alt, 2);
3657 				} else {
3658 					/*
3659 					 * JRS 5/18/07 - If only CMT is on,
3660 					 * use the CMT version of
3661 					 * find_alt_net()
3662 					 */
3663 					/* sa_ignore NO_NULL_CHK */
3664 					alt = sctp_find_alternate_net(stcb, alt, 1);
3665 				}
3666 				if (alt == NULL) {
3667 					alt = tp1->whoTo;
3668 				}
3669 				/*
3670 				 * CUCv2: If a different dest is picked for
3671 				 * the retransmission, then new
3672 				 * (rtx-)pseudo_cumack needs to be tracked
3673 				 * for orig dest. Let CUCv2 track new (rtx-)
3674 				 * pseudo-cumack always.
3675 				 */
3676 				if (tp1->whoTo) {
3677 					tp1->whoTo->find_pseudo_cumack = 1;
3678 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3679 				}
3680 			} else {	/* CMT is OFF */
3681 
3682 #ifdef SCTP_FR_TO_ALTERNATE
3683 				/* Can we find an alternate? */
3684 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3685 #else
3686 				/*
3687 				 * default behavior is to NOT retransmit
3688 				 * FR's to an alternate. Armando Caro's
3689 				 * paper details why.
3690 				 */
3691 				alt = tp1->whoTo;
3692 #endif
3693 			}
3694 
3695 			tp1->rec.data.doing_fast_retransmit = 1;
3696 			tot_retrans++;
3697 			/* mark the sending seq for possible subsequent FR's */
3698 			/*
3699 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3700 			 * (uint32_t)tpi->rec.data.tsn);
3701 			 */
3702 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3703 				/*
3704 				 * If the queue of send is empty then its
3705 				 * the next sequence number that will be
3706 				 * assigned so we subtract one from this to
3707 				 * get the one we last sent.
3708 				 */
3709 				tp1->rec.data.fast_retran_tsn = sending_seq;
3710 			} else {
3711 				/*
3712 				 * If there are chunks on the send queue
3713 				 * (unsent data that has made it from the
3714 				 * stream queues but not out the door, we
3715 				 * take the first one (which will have the
3716 				 * lowest TSN) and subtract one to get the
3717 				 * one we last sent.
3718 				 */
3719 				struct sctp_tmit_chunk *ttt;
3720 
3721 				ttt = TAILQ_FIRST(&asoc->send_queue);
3722 				tp1->rec.data.fast_retran_tsn =
3723 				    ttt->rec.data.tsn;
3724 			}
3725 
3726 			if (tp1->do_rtt) {
3727 				/*
3728 				 * this guy had a RTO calculation pending on
3729 				 * it, cancel it
3730 				 */
3731 				if ((tp1->whoTo != NULL) &&
3732 				    (tp1->whoTo->rto_needed == 0)) {
3733 					tp1->whoTo->rto_needed = 1;
3734 				}
3735 				tp1->do_rtt = 0;
3736 			}
3737 			if (alt != tp1->whoTo) {
3738 				/* yes, there is an alternate. */
3739 				sctp_free_remote_addr(tp1->whoTo);
3740 				/* sa_ignore FREED_MEMORY */
3741 				tp1->whoTo = alt;
3742 				atomic_add_int(&alt->ref_count, 1);
3743 			}
3744 		}
3745 	}
3746 }
3747 
3748 struct sctp_tmit_chunk *
3749 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3750     struct sctp_association *asoc)
3751 {
3752 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3753 	struct timeval now;
3754 	int now_filled = 0;
3755 
3756 	if (asoc->prsctp_supported == 0) {
3757 		return (NULL);
3758 	}
3759 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3760 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3761 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3762 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3763 			/* no chance to advance, out of here */
3764 			break;
3765 		}
3766 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3767 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3768 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3769 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3770 				    asoc->advanced_peer_ack_point,
3771 				    tp1->rec.data.tsn, 0, 0);
3772 			}
3773 		}
3774 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3775 			/*
3776 			 * We can't fwd-tsn past any that are reliable aka
3777 			 * retransmitted until the asoc fails.
3778 			 */
3779 			break;
3780 		}
3781 		if (!now_filled) {
3782 			(void)SCTP_GETTIME_TIMEVAL(&now);
3783 			now_filled = 1;
3784 		}
3785 		/*
3786 		 * now we got a chunk which is marked for another
3787 		 * retransmission to a PR-stream but has run out its chances
3788 		 * already maybe OR has been marked to skip now. Can we skip
3789 		 * it if its a resend?
3790 		 */
3791 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3792 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3793 			/*
3794 			 * Now is this one marked for resend and its time is
3795 			 * now up?
3796 			 */
3797 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3798 				/* Yes so drop it */
3799 				if (tp1->data) {
3800 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3801 					    1, SCTP_SO_NOT_LOCKED);
3802 				}
3803 			} else {
3804 				/*
3805 				 * No, we are done when hit one for resend
3806 				 * whos time as not expired.
3807 				 */
3808 				break;
3809 			}
3810 		}
3811 		/*
3812 		 * Ok now if this chunk is marked to drop it we can clean up
3813 		 * the chunk, advance our peer ack point and we can check
3814 		 * the next chunk.
3815 		 */
3816 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3817 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3818 			/* advance PeerAckPoint goes forward */
3819 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3820 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3821 				a_adv = tp1;
3822 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3823 				/* No update but we do save the chk */
3824 				a_adv = tp1;
3825 			}
3826 		} else {
3827 			/*
3828 			 * If it is still in RESEND we can advance no
3829 			 * further
3830 			 */
3831 			break;
3832 		}
3833 	}
3834 	return (a_adv);
3835 }
3836 
3837 static int
3838 sctp_fs_audit(struct sctp_association *asoc)
3839 {
3840 	struct sctp_tmit_chunk *chk;
3841 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3842 	int ret;
3843 #ifndef INVARIANTS
3844 	int entry_flight, entry_cnt;
3845 #endif
3846 
3847 	ret = 0;
3848 #ifndef INVARIANTS
3849 	entry_flight = asoc->total_flight;
3850 	entry_cnt = asoc->total_flight_count;
3851 #endif
3852 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3853 		return (0);
3854 
3855 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3857 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3858 			    chk->rec.data.tsn,
3859 			    chk->send_size,
3860 			    chk->snd_count);
3861 			inflight++;
3862 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863 			resend++;
3864 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3865 			inbetween++;
3866 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3867 			above++;
3868 		} else {
3869 			acked++;
3870 		}
3871 	}
3872 
3873 	if ((inflight > 0) || (inbetween > 0)) {
3874 #ifdef INVARIANTS
3875 		panic("Flight size-express incorrect? \n");
3876 #else
3877 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3878 		    entry_flight, entry_cnt);
3879 
3880 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3881 		    inflight, inbetween, resend, above, acked);
3882 		ret = 1;
3883 #endif
3884 	}
3885 	return (ret);
3886 }
3887 
3888 
3889 static void
3890 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3891     struct sctp_association *asoc,
3892     struct sctp_tmit_chunk *tp1)
3893 {
3894 	tp1->window_probe = 0;
3895 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3896 		/* TSN's skipped we do NOT move back. */
3897 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3898 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3899 		    tp1->book_size,
3900 		    (uint32_t)(uintptr_t)tp1->whoTo,
3901 		    tp1->rec.data.tsn);
3902 		return;
3903 	}
3904 	/* First setup this by shrinking flight */
3905 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3906 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3907 		    tp1);
3908 	}
3909 	sctp_flight_size_decrease(tp1);
3910 	sctp_total_flight_decrease(stcb, tp1);
3911 	/* Now mark for resend */
3912 	tp1->sent = SCTP_DATAGRAM_RESEND;
3913 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3914 
3915 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3916 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3917 		    tp1->whoTo->flight_size,
3918 		    tp1->book_size,
3919 		    (uint32_t)(uintptr_t)tp1->whoTo,
3920 		    tp1->rec.data.tsn);
3921 	}
3922 }
3923 
3924 void
3925 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3926     uint32_t rwnd, int *abort_now, int ecne_seen)
3927 {
3928 	struct sctp_nets *net;
3929 	struct sctp_association *asoc;
3930 	struct sctp_tmit_chunk *tp1, *tp2;
3931 	uint32_t old_rwnd;
3932 	int win_probe_recovery = 0;
3933 	int win_probe_recovered = 0;
3934 	int j, done_once = 0;
3935 	int rto_ok = 1;
3936 	uint32_t send_s;
3937 
3938 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3939 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3940 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3941 	}
3942 	SCTP_TCB_LOCK_ASSERT(stcb);
3943 #ifdef SCTP_ASOCLOG_OF_TSNS
3944 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3945 	stcb->asoc.cumack_log_at++;
3946 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3947 		stcb->asoc.cumack_log_at = 0;
3948 	}
3949 #endif
3950 	asoc = &stcb->asoc;
3951 	old_rwnd = asoc->peers_rwnd;
3952 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3953 		/* old ack */
3954 		return;
3955 	} else if (asoc->last_acked_seq == cumack) {
3956 		/* Window update sack */
3957 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3958 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3959 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3960 			/* SWS sender side engages */
3961 			asoc->peers_rwnd = 0;
3962 		}
3963 		if (asoc->peers_rwnd > old_rwnd) {
3964 			goto again;
3965 		}
3966 		return;
3967 	}
3968 	/* First setup for CC stuff */
3969 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3970 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3971 			/* Drag along the window_tsn for cwr's */
3972 			net->cwr_window_tsn = cumack;
3973 		}
3974 		net->prev_cwnd = net->cwnd;
3975 		net->net_ack = 0;
3976 		net->net_ack2 = 0;
3977 
3978 		/*
3979 		 * CMT: Reset CUC and Fast recovery algo variables before
3980 		 * SACK processing
3981 		 */
3982 		net->new_pseudo_cumack = 0;
3983 		net->will_exit_fast_recovery = 0;
3984 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3985 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3986 		}
3987 	}
3988 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3989 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3990 		    sctpchunk_listhead);
3991 		send_s = tp1->rec.data.tsn + 1;
3992 	} else {
3993 		send_s = asoc->sending_seq;
3994 	}
3995 	if (SCTP_TSN_GE(cumack, send_s)) {
3996 		struct mbuf *op_err;
3997 		char msg[SCTP_DIAG_INFO_LEN];
3998 
3999 		*abort_now = 1;
4000 		/* XXX */
4001 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4002 		    cumack, send_s);
4003 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4004 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4005 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4006 		return;
4007 	}
4008 	asoc->this_sack_highest_gap = cumack;
4009 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4010 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4011 		    stcb->asoc.overall_error_count,
4012 		    0,
4013 		    SCTP_FROM_SCTP_INDATA,
4014 		    __LINE__);
4015 	}
4016 	stcb->asoc.overall_error_count = 0;
4017 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4018 		/* process the new consecutive TSN first */
4019 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4020 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4021 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4022 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4023 				}
4024 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4025 					/*
4026 					 * If it is less than ACKED, it is
4027 					 * now no-longer in flight. Higher
4028 					 * values may occur during marking
4029 					 */
4030 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4031 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4032 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4033 							    tp1->whoTo->flight_size,
4034 							    tp1->book_size,
4035 							    (uint32_t)(uintptr_t)tp1->whoTo,
4036 							    tp1->rec.data.tsn);
4037 						}
4038 						sctp_flight_size_decrease(tp1);
4039 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4040 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4041 							    tp1);
4042 						}
4043 						/* sa_ignore NO_NULL_CHK */
4044 						sctp_total_flight_decrease(stcb, tp1);
4045 					}
4046 					tp1->whoTo->net_ack += tp1->send_size;
4047 					if (tp1->snd_count < 2) {
4048 						/*
4049 						 * True non-retransmited
4050 						 * chunk
4051 						 */
4052 						tp1->whoTo->net_ack2 +=
4053 						    tp1->send_size;
4054 
4055 						/* update RTO too? */
4056 						if (tp1->do_rtt) {
4057 							if (rto_ok) {
4058 								tp1->whoTo->RTO =
4059 								/*
4060 								 * sa_ignore
4061 								 * NO_NULL_CHK
4062 								 */
4063 								    sctp_calculate_rto(stcb,
4064 								    asoc, tp1->whoTo,
4065 								    &tp1->sent_rcv_time,
4066 								    SCTP_RTT_FROM_DATA);
4067 								rto_ok = 0;
4068 							}
4069 							if (tp1->whoTo->rto_needed == 0) {
4070 								tp1->whoTo->rto_needed = 1;
4071 							}
4072 							tp1->do_rtt = 0;
4073 						}
4074 					}
4075 					/*
4076 					 * CMT: CUCv2 algorithm. From the
4077 					 * cumack'd TSNs, for each TSN being
4078 					 * acked for the first time, set the
4079 					 * following variables for the
4080 					 * corresp destination.
4081 					 * new_pseudo_cumack will trigger a
4082 					 * cwnd update.
4083 					 * find_(rtx_)pseudo_cumack will
4084 					 * trigger search for the next
4085 					 * expected (rtx-)pseudo-cumack.
4086 					 */
4087 					tp1->whoTo->new_pseudo_cumack = 1;
4088 					tp1->whoTo->find_pseudo_cumack = 1;
4089 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4090 
4091 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4092 						/* sa_ignore NO_NULL_CHK */
4093 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4094 					}
4095 				}
4096 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4097 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4098 				}
4099 				if (tp1->rec.data.chunk_was_revoked) {
4100 					/* deflate the cwnd */
4101 					tp1->whoTo->cwnd -= tp1->book_size;
4102 					tp1->rec.data.chunk_was_revoked = 0;
4103 				}
4104 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4105 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4106 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4107 #ifdef INVARIANTS
4108 					} else {
4109 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4110 #endif
4111 					}
4112 				}
4113 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4114 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4115 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4116 					asoc->trigger_reset = 1;
4117 				}
4118 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4119 				if (tp1->data) {
4120 					/* sa_ignore NO_NULL_CHK */
4121 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4122 					sctp_m_freem(tp1->data);
4123 					tp1->data = NULL;
4124 				}
4125 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4126 					sctp_log_sack(asoc->last_acked_seq,
4127 					    cumack,
4128 					    tp1->rec.data.tsn,
4129 					    0,
4130 					    0,
4131 					    SCTP_LOG_FREE_SENT);
4132 				}
4133 				asoc->sent_queue_cnt--;
4134 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4135 			} else {
4136 				break;
4137 			}
4138 		}
4139 
4140 	}
4141 	/* sa_ignore NO_NULL_CHK */
4142 	if (stcb->sctp_socket) {
4143 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4144 		struct socket *so;
4145 
4146 #endif
4147 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4148 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4149 			/* sa_ignore NO_NULL_CHK */
4150 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4151 		}
4152 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4153 		so = SCTP_INP_SO(stcb->sctp_ep);
4154 		atomic_add_int(&stcb->asoc.refcnt, 1);
4155 		SCTP_TCB_UNLOCK(stcb);
4156 		SCTP_SOCKET_LOCK(so, 1);
4157 		SCTP_TCB_LOCK(stcb);
4158 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4159 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4160 			/* assoc was freed while we were unlocked */
4161 			SCTP_SOCKET_UNLOCK(so, 1);
4162 			return;
4163 		}
4164 #endif
4165 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4166 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4167 		SCTP_SOCKET_UNLOCK(so, 1);
4168 #endif
4169 	} else {
4170 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4172 		}
4173 	}
4174 
4175 	/* JRS - Use the congestion control given in the CC module */
4176 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4177 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4178 			if (net->net_ack2 > 0) {
4179 				/*
4180 				 * Karn's rule applies to clearing error
4181 				 * count, this is optional.
4182 				 */
4183 				net->error_count = 0;
4184 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4185 					/* addr came good */
4186 					net->dest_state |= SCTP_ADDR_REACHABLE;
4187 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4188 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4189 				}
4190 				if (net == stcb->asoc.primary_destination) {
4191 					if (stcb->asoc.alternate) {
4192 						/*
4193 						 * release the alternate,
4194 						 * primary is good
4195 						 */
4196 						sctp_free_remote_addr(stcb->asoc.alternate);
4197 						stcb->asoc.alternate = NULL;
4198 					}
4199 				}
4200 				if (net->dest_state & SCTP_ADDR_PF) {
4201 					net->dest_state &= ~SCTP_ADDR_PF;
4202 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4203 					    stcb->sctp_ep, stcb, net,
4204 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4205 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4206 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4207 					/* Done with this net */
4208 					net->net_ack = 0;
4209 				}
4210 				/* restore any doubled timers */
4211 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4212 				if (net->RTO < stcb->asoc.minrto) {
4213 					net->RTO = stcb->asoc.minrto;
4214 				}
4215 				if (net->RTO > stcb->asoc.maxrto) {
4216 					net->RTO = stcb->asoc.maxrto;
4217 				}
4218 			}
4219 		}
4220 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4221 	}
4222 	asoc->last_acked_seq = cumack;
4223 
4224 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4225 		/* nothing left in-flight */
4226 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4227 			net->flight_size = 0;
4228 			net->partial_bytes_acked = 0;
4229 		}
4230 		asoc->total_flight = 0;
4231 		asoc->total_flight_count = 0;
4232 	}
4233 	/* RWND update */
4234 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4235 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4236 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4237 		/* SWS sender side engages */
4238 		asoc->peers_rwnd = 0;
4239 	}
4240 	if (asoc->peers_rwnd > old_rwnd) {
4241 		win_probe_recovery = 1;
4242 	}
4243 	/* Now assure a timer where data is queued at */
4244 again:
4245 	j = 0;
4246 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 		if (win_probe_recovery && (net->window_probe)) {
4248 			win_probe_recovered = 1;
4249 			/*
4250 			 * Find first chunk that was used with window probe
4251 			 * and clear the sent
4252 			 */
4253 			/* sa_ignore FREED_MEMORY */
4254 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4255 				if (tp1->window_probe) {
4256 					/* move back to data send queue */
4257 					sctp_window_probe_recovery(stcb, asoc, tp1);
4258 					break;
4259 				}
4260 			}
4261 		}
4262 		if (net->flight_size) {
4263 			j++;
4264 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4265 			if (net->window_probe) {
4266 				net->window_probe = 0;
4267 			}
4268 		} else {
4269 			if (net->window_probe) {
4270 				/*
4271 				 * In window probes we must assure a timer
4272 				 * is still running there
4273 				 */
4274 				net->window_probe = 0;
4275 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4276 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4277 				}
4278 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4279 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4280 				    stcb, net,
4281 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4282 			}
4283 		}
4284 	}
4285 	if ((j == 0) &&
4286 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4287 	    (asoc->sent_queue_retran_cnt == 0) &&
4288 	    (win_probe_recovered == 0) &&
4289 	    (done_once == 0)) {
4290 		/*
4291 		 * huh, this should not happen unless all packets are
4292 		 * PR-SCTP and marked to skip of course.
4293 		 */
4294 		if (sctp_fs_audit(asoc)) {
4295 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4296 				net->flight_size = 0;
4297 			}
4298 			asoc->total_flight = 0;
4299 			asoc->total_flight_count = 0;
4300 			asoc->sent_queue_retran_cnt = 0;
4301 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4302 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4303 					sctp_flight_size_increase(tp1);
4304 					sctp_total_flight_increase(stcb, tp1);
4305 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4306 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4307 				}
4308 			}
4309 		}
4310 		done_once = 1;
4311 		goto again;
4312 	}
4313 	/**********************************/
4314 	/* Now what about shutdown issues */
4315 	/**********************************/
4316 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4317 		/* nothing left on sendqueue.. consider done */
4318 		/* clean up */
4319 		if ((asoc->stream_queue_cnt == 1) &&
4320 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4321 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4322 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4323 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4324 		}
4325 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4326 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4327 		    (asoc->stream_queue_cnt == 1) &&
4328 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4329 			struct mbuf *op_err;
4330 
4331 			*abort_now = 1;
4332 			/* XXX */
4333 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4334 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4335 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4336 			return;
4337 		}
4338 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4339 		    (asoc->stream_queue_cnt == 0)) {
4340 			struct sctp_nets *netp;
4341 
4342 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4343 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4344 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4345 			}
4346 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4347 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4348 			sctp_stop_timers_for_shutdown(stcb);
4349 			if (asoc->alternate) {
4350 				netp = asoc->alternate;
4351 			} else {
4352 				netp = asoc->primary_destination;
4353 			}
4354 			sctp_send_shutdown(stcb, netp);
4355 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4356 			    stcb->sctp_ep, stcb, netp);
4357 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4358 			    stcb->sctp_ep, stcb, netp);
4359 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4360 		    (asoc->stream_queue_cnt == 0)) {
4361 			struct sctp_nets *netp;
4362 
4363 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4366 			sctp_stop_timers_for_shutdown(stcb);
4367 			if (asoc->alternate) {
4368 				netp = asoc->alternate;
4369 			} else {
4370 				netp = asoc->primary_destination;
4371 			}
4372 			sctp_send_shutdown_ack(stcb, netp);
4373 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374 			    stcb->sctp_ep, stcb, netp);
4375 		}
4376 	}
4377 	/*********************************************/
4378 	/* Here we perform PR-SCTP procedures        */
4379 	/* (section 4.2)                             */
4380 	/*********************************************/
4381 	/* C1. update advancedPeerAckPoint */
4382 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4383 		asoc->advanced_peer_ack_point = cumack;
4384 	}
4385 	/* PR-Sctp issues need to be addressed too */
4386 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4387 		struct sctp_tmit_chunk *lchk;
4388 		uint32_t old_adv_peer_ack_point;
4389 
4390 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392 		/* C3. See if we need to send a Fwd-TSN */
4393 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4394 			/*
4395 			 * ISSUE with ECN, see FWD-TSN processing.
4396 			 */
4397 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4398 				send_forward_tsn(stcb, asoc);
4399 			} else if (lchk) {
4400 				/* try to FR fwd-tsn's that get lost too */
4401 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4402 					send_forward_tsn(stcb, asoc);
4403 				}
4404 			}
4405 		}
4406 		if (lchk) {
4407 			/* Assure a timer is up */
4408 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4409 			    stcb->sctp_ep, stcb, lchk->whoTo);
4410 		}
4411 	}
4412 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4413 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4414 		    rwnd,
4415 		    stcb->asoc.peers_rwnd,
4416 		    stcb->asoc.total_flight,
4417 		    stcb->asoc.total_output_queue_size);
4418 	}
4419 }
4420 
4421 void
4422 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4423     struct sctp_tcb *stcb,
4424     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4425     int *abort_now, uint8_t flags,
4426     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4427 {
4428 	struct sctp_association *asoc;
4429 	struct sctp_tmit_chunk *tp1, *tp2;
4430 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4431 	uint16_t wake_him = 0;
4432 	uint32_t send_s = 0;
4433 	long j;
4434 	int accum_moved = 0;
4435 	int will_exit_fast_recovery = 0;
4436 	uint32_t a_rwnd, old_rwnd;
4437 	int win_probe_recovery = 0;
4438 	int win_probe_recovered = 0;
4439 	struct sctp_nets *net = NULL;
4440 	int done_once;
4441 	int rto_ok = 1;
4442 	uint8_t reneged_all = 0;
4443 	uint8_t cmt_dac_flag;
4444 
4445 	/*
4446 	 * we take any chance we can to service our queues since we cannot
4447 	 * get awoken when the socket is read from :<
4448 	 */
4449 	/*
4450 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4451 	 * old sack, if so discard. 2) If there is nothing left in the send
4452 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4453 	 * too, update any rwnd change and verify no timers are running.
4454 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4455 	 * moved process these first and note that it moved. 4) Process any
4456 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4457 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4458 	 * sync up flightsizes and things, stop all timers and also check
4459 	 * for shutdown_pending state. If so then go ahead and send off the
4460 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4461 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4462 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4463 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4464 	 * if in shutdown_recv state.
4465 	 */
4466 	SCTP_TCB_LOCK_ASSERT(stcb);
4467 	/* CMT DAC algo */
4468 	this_sack_lowest_newack = 0;
4469 	SCTP_STAT_INCR(sctps_slowpath_sack);
4470 	last_tsn = cum_ack;
4471 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4472 #ifdef SCTP_ASOCLOG_OF_TSNS
4473 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4474 	stcb->asoc.cumack_log_at++;
4475 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4476 		stcb->asoc.cumack_log_at = 0;
4477 	}
4478 #endif
4479 	a_rwnd = rwnd;
4480 
4481 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4482 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4483 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4484 	}
4485 	old_rwnd = stcb->asoc.peers_rwnd;
4486 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4487 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4488 		    stcb->asoc.overall_error_count,
4489 		    0,
4490 		    SCTP_FROM_SCTP_INDATA,
4491 		    __LINE__);
4492 	}
4493 	stcb->asoc.overall_error_count = 0;
4494 	asoc = &stcb->asoc;
4495 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4496 		sctp_log_sack(asoc->last_acked_seq,
4497 		    cum_ack,
4498 		    0,
4499 		    num_seg,
4500 		    num_dup,
4501 		    SCTP_LOG_NEW_SACK);
4502 	}
4503 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4504 		uint16_t i;
4505 		uint32_t *dupdata, dblock;
4506 
4507 		for (i = 0; i < num_dup; i++) {
4508 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4509 			    sizeof(uint32_t), (uint8_t *)&dblock);
4510 			if (dupdata == NULL) {
4511 				break;
4512 			}
4513 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4514 		}
4515 	}
4516 	/* reality check */
4517 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4518 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4519 		    sctpchunk_listhead);
4520 		send_s = tp1->rec.data.tsn + 1;
4521 	} else {
4522 		tp1 = NULL;
4523 		send_s = asoc->sending_seq;
4524 	}
4525 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4526 		struct mbuf *op_err;
4527 		char msg[SCTP_DIAG_INFO_LEN];
4528 
4529 		/*
4530 		 * no way, we have not even sent this TSN out yet. Peer is
4531 		 * hopelessly messed up with us.
4532 		 */
4533 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4534 		    cum_ack, send_s);
4535 		if (tp1) {
4536 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4537 			    tp1->rec.data.tsn, (void *)tp1);
4538 		}
4539 hopeless_peer:
4540 		*abort_now = 1;
4541 		/* XXX */
4542 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4543 		    cum_ack, send_s);
4544 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4545 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4546 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4547 		return;
4548 	}
4549 	/**********************/
4550 	/* 1) check the range */
4551 	/**********************/
4552 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4553 		/* acking something behind */
4554 		return;
4555 	}
4556 	/* update the Rwnd of the peer */
4557 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4558 	    TAILQ_EMPTY(&asoc->send_queue) &&
4559 	    (asoc->stream_queue_cnt == 0)) {
4560 		/* nothing left on send/sent and strmq */
4561 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4562 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4563 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4564 		}
4565 		asoc->peers_rwnd = a_rwnd;
4566 		if (asoc->sent_queue_retran_cnt) {
4567 			asoc->sent_queue_retran_cnt = 0;
4568 		}
4569 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4570 			/* SWS sender side engages */
4571 			asoc->peers_rwnd = 0;
4572 		}
4573 		/* stop any timers */
4574 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4575 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4576 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4577 			net->partial_bytes_acked = 0;
4578 			net->flight_size = 0;
4579 		}
4580 		asoc->total_flight = 0;
4581 		asoc->total_flight_count = 0;
4582 		return;
4583 	}
4584 	/*
4585 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4586 	 * things. The total byte count acked is tracked in netAckSz AND
4587 	 * netAck2 is used to track the total bytes acked that are un-
4588 	 * amibguious and were never retransmitted. We track these on a per
4589 	 * destination address basis.
4590 	 */
4591 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4592 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4593 			/* Drag along the window_tsn for cwr's */
4594 			net->cwr_window_tsn = cum_ack;
4595 		}
4596 		net->prev_cwnd = net->cwnd;
4597 		net->net_ack = 0;
4598 		net->net_ack2 = 0;
4599 
4600 		/*
4601 		 * CMT: Reset CUC and Fast recovery algo variables before
4602 		 * SACK processing
4603 		 */
4604 		net->new_pseudo_cumack = 0;
4605 		net->will_exit_fast_recovery = 0;
4606 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4607 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4608 		}
4609 		/*
4610 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4611 		 * to be greater than the cumack. Also reset saw_newack to 0
4612 		 * for all dests.
4613 		 */
4614 		net->saw_newack = 0;
4615 		net->this_sack_highest_newack = last_tsn;
4616 	}
4617 	/* process the new consecutive TSN first */
4618 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4619 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4620 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4621 				accum_moved = 1;
4622 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4623 					/*
4624 					 * If it is less than ACKED, it is
4625 					 * now no-longer in flight. Higher
4626 					 * values may occur during marking
4627 					 */
4628 					if ((tp1->whoTo->dest_state &
4629 					    SCTP_ADDR_UNCONFIRMED) &&
4630 					    (tp1->snd_count < 2)) {
4631 						/*
4632 						 * If there was no retran
4633 						 * and the address is
4634 						 * un-confirmed and we sent
4635 						 * there and are now
4636 						 * sacked.. its confirmed,
4637 						 * mark it so.
4638 						 */
4639 						tp1->whoTo->dest_state &=
4640 						    ~SCTP_ADDR_UNCONFIRMED;
4641 					}
4642 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4643 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4644 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4645 							    tp1->whoTo->flight_size,
4646 							    tp1->book_size,
4647 							    (uint32_t)(uintptr_t)tp1->whoTo,
4648 							    tp1->rec.data.tsn);
4649 						}
4650 						sctp_flight_size_decrease(tp1);
4651 						sctp_total_flight_decrease(stcb, tp1);
4652 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4653 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4654 							    tp1);
4655 						}
4656 					}
4657 					tp1->whoTo->net_ack += tp1->send_size;
4658 
4659 					/* CMT SFR and DAC algos */
4660 					this_sack_lowest_newack = tp1->rec.data.tsn;
4661 					tp1->whoTo->saw_newack = 1;
4662 
4663 					if (tp1->snd_count < 2) {
4664 						/*
4665 						 * True non-retransmited
4666 						 * chunk
4667 						 */
4668 						tp1->whoTo->net_ack2 +=
4669 						    tp1->send_size;
4670 
4671 						/* update RTO too? */
4672 						if (tp1->do_rtt) {
4673 							if (rto_ok) {
4674 								tp1->whoTo->RTO =
4675 								    sctp_calculate_rto(stcb,
4676 								    asoc, tp1->whoTo,
4677 								    &tp1->sent_rcv_time,
4678 								    SCTP_RTT_FROM_DATA);
4679 								rto_ok = 0;
4680 							}
4681 							if (tp1->whoTo->rto_needed == 0) {
4682 								tp1->whoTo->rto_needed = 1;
4683 							}
4684 							tp1->do_rtt = 0;
4685 						}
4686 					}
4687 					/*
4688 					 * CMT: CUCv2 algorithm. From the
4689 					 * cumack'd TSNs, for each TSN being
4690 					 * acked for the first time, set the
4691 					 * following variables for the
4692 					 * corresp destination.
4693 					 * new_pseudo_cumack will trigger a
4694 					 * cwnd update.
4695 					 * find_(rtx_)pseudo_cumack will
4696 					 * trigger search for the next
4697 					 * expected (rtx-)pseudo-cumack.
4698 					 */
4699 					tp1->whoTo->new_pseudo_cumack = 1;
4700 					tp1->whoTo->find_pseudo_cumack = 1;
4701 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4702 
4703 
4704 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4705 						sctp_log_sack(asoc->last_acked_seq,
4706 						    cum_ack,
4707 						    tp1->rec.data.tsn,
4708 						    0,
4709 						    0,
4710 						    SCTP_LOG_TSN_ACKED);
4711 					}
4712 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4713 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4714 					}
4715 				}
4716 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4717 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4718 #ifdef SCTP_AUDITING_ENABLED
4719 					sctp_audit_log(0xB3,
4720 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4721 #endif
4722 				}
4723 				if (tp1->rec.data.chunk_was_revoked) {
4724 					/* deflate the cwnd */
4725 					tp1->whoTo->cwnd -= tp1->book_size;
4726 					tp1->rec.data.chunk_was_revoked = 0;
4727 				}
4728 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4729 					tp1->sent = SCTP_DATAGRAM_ACKED;
4730 				}
4731 			}
4732 		} else {
4733 			break;
4734 		}
4735 	}
4736 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4737 	/* always set this up to cum-ack */
4738 	asoc->this_sack_highest_gap = last_tsn;
4739 
4740 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4741 
4742 		/*
4743 		 * thisSackHighestGap will increase while handling NEW
4744 		 * segments this_sack_highest_newack will increase while
4745 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4746 		 * used for CMT DAC algo. saw_newack will also change.
4747 		 */
4748 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4749 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4750 		    num_seg, num_nr_seg, &rto_ok)) {
4751 			wake_him++;
4752 		}
4753 		/*
4754 		 * validate the biggest_tsn_acked in the gap acks if strict
4755 		 * adherence is wanted.
4756 		 */
4757 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4758 			/*
4759 			 * peer is either confused or we are under attack.
4760 			 * We must abort.
4761 			 */
4762 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4763 			    biggest_tsn_acked, send_s);
4764 			goto hopeless_peer;
4765 		}
4766 	}
4767 	/*******************************************/
4768 	/* cancel ALL T3-send timer if accum moved */
4769 	/*******************************************/
4770 	if (asoc->sctp_cmt_on_off > 0) {
4771 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4772 			if (net->new_pseudo_cumack)
4773 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4774 				    stcb, net,
4775 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4776 
4777 		}
4778 	} else {
4779 		if (accum_moved) {
4780 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4782 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4783 			}
4784 		}
4785 	}
4786 	/********************************************/
4787 	/* drop the acked chunks from the sentqueue */
4788 	/********************************************/
4789 	asoc->last_acked_seq = cum_ack;
4790 
4791 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4792 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4793 			break;
4794 		}
4795 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4796 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4797 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4798 #ifdef INVARIANTS
4799 			} else {
4800 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4801 #endif
4802 			}
4803 		}
4804 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4805 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4806 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4807 			asoc->trigger_reset = 1;
4808 		}
4809 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4810 		if (PR_SCTP_ENABLED(tp1->flags)) {
4811 			if (asoc->pr_sctp_cnt != 0)
4812 				asoc->pr_sctp_cnt--;
4813 		}
4814 		asoc->sent_queue_cnt--;
4815 		if (tp1->data) {
4816 			/* sa_ignore NO_NULL_CHK */
4817 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4818 			sctp_m_freem(tp1->data);
4819 			tp1->data = NULL;
4820 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4821 				asoc->sent_queue_cnt_removeable--;
4822 			}
4823 		}
4824 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4825 			sctp_log_sack(asoc->last_acked_seq,
4826 			    cum_ack,
4827 			    tp1->rec.data.tsn,
4828 			    0,
4829 			    0,
4830 			    SCTP_LOG_FREE_SENT);
4831 		}
4832 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4833 		wake_him++;
4834 	}
4835 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4836 #ifdef INVARIANTS
4837 		panic("Warning flight size is positive and should be 0");
4838 #else
4839 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4840 		    asoc->total_flight);
4841 #endif
4842 		asoc->total_flight = 0;
4843 	}
4844 	/* sa_ignore NO_NULL_CHK */
4845 	if ((wake_him) && (stcb->sctp_socket)) {
4846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4847 		struct socket *so;
4848 
4849 #endif
4850 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4851 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4852 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4853 		}
4854 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4855 		so = SCTP_INP_SO(stcb->sctp_ep);
4856 		atomic_add_int(&stcb->asoc.refcnt, 1);
4857 		SCTP_TCB_UNLOCK(stcb);
4858 		SCTP_SOCKET_LOCK(so, 1);
4859 		SCTP_TCB_LOCK(stcb);
4860 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4861 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4862 			/* assoc was freed while we were unlocked */
4863 			SCTP_SOCKET_UNLOCK(so, 1);
4864 			return;
4865 		}
4866 #endif
4867 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4869 		SCTP_SOCKET_UNLOCK(so, 1);
4870 #endif
4871 	} else {
4872 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4873 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4874 		}
4875 	}
4876 
4877 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4878 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4879 			/* Setup so we will exit RFC2582 fast recovery */
4880 			will_exit_fast_recovery = 1;
4881 		}
4882 	}
4883 	/*
4884 	 * Check for revoked fragments:
4885 	 *
4886 	 * if Previous sack - Had no frags then we can't have any revoked if
4887 	 * Previous sack - Had frag's then - If we now have frags aka
4888 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4889 	 * some of them. else - The peer revoked all ACKED fragments, since
4890 	 * we had some before and now we have NONE.
4891 	 */
4892 
4893 	if (num_seg) {
4894 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4895 		asoc->saw_sack_with_frags = 1;
4896 	} else if (asoc->saw_sack_with_frags) {
4897 		int cnt_revoked = 0;
4898 
4899 		/* Peer revoked all dg's marked or acked */
4900 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4901 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4902 				tp1->sent = SCTP_DATAGRAM_SENT;
4903 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4904 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4905 					    tp1->whoTo->flight_size,
4906 					    tp1->book_size,
4907 					    (uint32_t)(uintptr_t)tp1->whoTo,
4908 					    tp1->rec.data.tsn);
4909 				}
4910 				sctp_flight_size_increase(tp1);
4911 				sctp_total_flight_increase(stcb, tp1);
4912 				tp1->rec.data.chunk_was_revoked = 1;
4913 				/*
4914 				 * To ensure that this increase in
4915 				 * flightsize, which is artificial, does not
4916 				 * throttle the sender, we also increase the
4917 				 * cwnd artificially.
4918 				 */
4919 				tp1->whoTo->cwnd += tp1->book_size;
4920 				cnt_revoked++;
4921 			}
4922 		}
4923 		if (cnt_revoked) {
4924 			reneged_all = 1;
4925 		}
4926 		asoc->saw_sack_with_frags = 0;
4927 	}
4928 	if (num_nr_seg > 0)
4929 		asoc->saw_sack_with_nr_frags = 1;
4930 	else
4931 		asoc->saw_sack_with_nr_frags = 0;
4932 
4933 	/* JRS - Use the congestion control given in the CC module */
4934 	if (ecne_seen == 0) {
4935 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4936 			if (net->net_ack2 > 0) {
4937 				/*
4938 				 * Karn's rule applies to clearing error
4939 				 * count, this is optional.
4940 				 */
4941 				net->error_count = 0;
4942 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4943 					/* addr came good */
4944 					net->dest_state |= SCTP_ADDR_REACHABLE;
4945 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4946 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4947 				}
4948 				if (net == stcb->asoc.primary_destination) {
4949 					if (stcb->asoc.alternate) {
4950 						/*
4951 						 * release the alternate,
4952 						 * primary is good
4953 						 */
4954 						sctp_free_remote_addr(stcb->asoc.alternate);
4955 						stcb->asoc.alternate = NULL;
4956 					}
4957 				}
4958 				if (net->dest_state & SCTP_ADDR_PF) {
4959 					net->dest_state &= ~SCTP_ADDR_PF;
4960 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4961 					    stcb->sctp_ep, stcb, net,
4962 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4963 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4964 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4965 					/* Done with this net */
4966 					net->net_ack = 0;
4967 				}
4968 				/* restore any doubled timers */
4969 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4970 				if (net->RTO < stcb->asoc.minrto) {
4971 					net->RTO = stcb->asoc.minrto;
4972 				}
4973 				if (net->RTO > stcb->asoc.maxrto) {
4974 					net->RTO = stcb->asoc.maxrto;
4975 				}
4976 			}
4977 		}
4978 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4979 	}
4980 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4981 		/* nothing left in-flight */
4982 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4983 			/* stop all timers */
4984 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4985 			    stcb, net,
4986 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4987 			net->flight_size = 0;
4988 			net->partial_bytes_acked = 0;
4989 		}
4990 		asoc->total_flight = 0;
4991 		asoc->total_flight_count = 0;
4992 	}
4993 	/**********************************/
4994 	/* Now what about shutdown issues */
4995 	/**********************************/
4996 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4997 		/* nothing left on sendqueue.. consider done */
4998 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4999 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5000 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5001 		}
5002 		asoc->peers_rwnd = a_rwnd;
5003 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5004 			/* SWS sender side engages */
5005 			asoc->peers_rwnd = 0;
5006 		}
5007 		/* clean up */
5008 		if ((asoc->stream_queue_cnt == 1) &&
5009 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5010 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5011 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5012 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5013 		}
5014 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5015 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5016 		    (asoc->stream_queue_cnt == 1) &&
5017 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5018 			struct mbuf *op_err;
5019 
5020 			*abort_now = 1;
5021 			/* XXX */
5022 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5023 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5024 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5025 			return;
5026 		}
5027 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5028 		    (asoc->stream_queue_cnt == 0)) {
5029 			struct sctp_nets *netp;
5030 
5031 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5032 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5033 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5034 			}
5035 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5036 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5037 			sctp_stop_timers_for_shutdown(stcb);
5038 			if (asoc->alternate) {
5039 				netp = asoc->alternate;
5040 			} else {
5041 				netp = asoc->primary_destination;
5042 			}
5043 			sctp_send_shutdown(stcb, netp);
5044 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5045 			    stcb->sctp_ep, stcb, netp);
5046 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5047 			    stcb->sctp_ep, stcb, netp);
5048 			return;
5049 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5050 		    (asoc->stream_queue_cnt == 0)) {
5051 			struct sctp_nets *netp;
5052 
5053 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5054 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5055 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5056 			sctp_stop_timers_for_shutdown(stcb);
5057 			if (asoc->alternate) {
5058 				netp = asoc->alternate;
5059 			} else {
5060 				netp = asoc->primary_destination;
5061 			}
5062 			sctp_send_shutdown_ack(stcb, netp);
5063 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5064 			    stcb->sctp_ep, stcb, netp);
5065 			return;
5066 		}
5067 	}
5068 	/*
5069 	 * Now here we are going to recycle net_ack for a different use...
5070 	 * HEADS UP.
5071 	 */
5072 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5073 		net->net_ack = 0;
5074 	}
5075 
5076 	/*
5077 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5078 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5079 	 * automatically ensure that.
5080 	 */
5081 	if ((asoc->sctp_cmt_on_off > 0) &&
5082 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5083 	    (cmt_dac_flag == 0)) {
5084 		this_sack_lowest_newack = cum_ack;
5085 	}
5086 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5087 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5088 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5089 	}
5090 	/* JRS - Use the congestion control given in the CC module */
5091 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5092 
5093 	/* Now are we exiting loss recovery ? */
5094 	if (will_exit_fast_recovery) {
5095 		/* Ok, we must exit fast recovery */
5096 		asoc->fast_retran_loss_recovery = 0;
5097 	}
5098 	if ((asoc->sat_t3_loss_recovery) &&
5099 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5100 		/* end satellite t3 loss recovery */
5101 		asoc->sat_t3_loss_recovery = 0;
5102 	}
5103 	/*
5104 	 * CMT Fast recovery
5105 	 */
5106 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5107 		if (net->will_exit_fast_recovery) {
5108 			/* Ok, we must exit fast recovery */
5109 			net->fast_retran_loss_recovery = 0;
5110 		}
5111 	}
5112 
5113 	/* Adjust and set the new rwnd value */
5114 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5115 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5116 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5117 	}
5118 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5119 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5120 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5121 		/* SWS sender side engages */
5122 		asoc->peers_rwnd = 0;
5123 	}
5124 	if (asoc->peers_rwnd > old_rwnd) {
5125 		win_probe_recovery = 1;
5126 	}
5127 	/*
5128 	 * Now we must setup so we have a timer up for anyone with
5129 	 * outstanding data.
5130 	 */
5131 	done_once = 0;
5132 again:
5133 	j = 0;
5134 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5135 		if (win_probe_recovery && (net->window_probe)) {
5136 			win_probe_recovered = 1;
5137 			/*-
5138 			 * Find first chunk that was used with
5139 			 * window probe and clear the event. Put
5140 			 * it back into the send queue as if has
5141 			 * not been sent.
5142 			 */
5143 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5144 				if (tp1->window_probe) {
5145 					sctp_window_probe_recovery(stcb, asoc, tp1);
5146 					break;
5147 				}
5148 			}
5149 		}
5150 		if (net->flight_size) {
5151 			j++;
5152 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5153 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5154 				    stcb->sctp_ep, stcb, net);
5155 			}
5156 			if (net->window_probe) {
5157 				net->window_probe = 0;
5158 			}
5159 		} else {
5160 			if (net->window_probe) {
5161 				/*
5162 				 * In window probes we must assure a timer
5163 				 * is still running there
5164 				 */
5165 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5166 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5167 					    stcb->sctp_ep, stcb, net);
5168 
5169 				}
5170 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5171 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5172 				    stcb, net,
5173 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5174 			}
5175 		}
5176 	}
5177 	if ((j == 0) &&
5178 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5179 	    (asoc->sent_queue_retran_cnt == 0) &&
5180 	    (win_probe_recovered == 0) &&
5181 	    (done_once == 0)) {
5182 		/*
5183 		 * huh, this should not happen unless all packets are
5184 		 * PR-SCTP and marked to skip of course.
5185 		 */
5186 		if (sctp_fs_audit(asoc)) {
5187 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5188 				net->flight_size = 0;
5189 			}
5190 			asoc->total_flight = 0;
5191 			asoc->total_flight_count = 0;
5192 			asoc->sent_queue_retran_cnt = 0;
5193 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5194 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5195 					sctp_flight_size_increase(tp1);
5196 					sctp_total_flight_increase(stcb, tp1);
5197 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5198 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5199 				}
5200 			}
5201 		}
5202 		done_once = 1;
5203 		goto again;
5204 	}
5205 	/*********************************************/
5206 	/* Here we perform PR-SCTP procedures        */
5207 	/* (section 4.2)                             */
5208 	/*********************************************/
5209 	/* C1. update advancedPeerAckPoint */
5210 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5211 		asoc->advanced_peer_ack_point = cum_ack;
5212 	}
5213 	/* C2. try to further move advancedPeerAckPoint ahead */
5214 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5215 		struct sctp_tmit_chunk *lchk;
5216 		uint32_t old_adv_peer_ack_point;
5217 
5218 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5219 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5220 		/* C3. See if we need to send a Fwd-TSN */
5221 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5222 			/*
5223 			 * ISSUE with ECN, see FWD-TSN processing.
5224 			 */
5225 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5226 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5227 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5228 				    old_adv_peer_ack_point);
5229 			}
5230 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5231 				send_forward_tsn(stcb, asoc);
5232 			} else if (lchk) {
5233 				/* try to FR fwd-tsn's that get lost too */
5234 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5235 					send_forward_tsn(stcb, asoc);
5236 				}
5237 			}
5238 		}
5239 		if (lchk) {
5240 			/* Assure a timer is up */
5241 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5242 			    stcb->sctp_ep, stcb, lchk->whoTo);
5243 		}
5244 	}
5245 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5246 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5247 		    a_rwnd,
5248 		    stcb->asoc.peers_rwnd,
5249 		    stcb->asoc.total_flight,
5250 		    stcb->asoc.total_output_queue_size);
5251 	}
5252 }
5253 
5254 void
5255 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5256 {
5257 	/* Copy cum-ack */
5258 	uint32_t cum_ack, a_rwnd;
5259 
5260 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5261 	/* Arrange so a_rwnd does NOT change */
5262 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5263 
5264 	/* Now call the express sack handling */
5265 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5266 }
5267 
5268 static void
5269 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5270     struct sctp_stream_in *strmin)
5271 {
5272 	struct sctp_queued_to_read *control, *ncontrol;
5273 	struct sctp_association *asoc;
5274 	uint32_t mid;
5275 	int need_reasm_check = 0;
5276 
5277 	asoc = &stcb->asoc;
5278 	mid = strmin->last_mid_delivered;
5279 	/*
5280 	 * First deliver anything prior to and including the stream no that
5281 	 * came in.
5282 	 */
5283 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5284 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5285 			/* this is deliverable now */
5286 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5287 				if (control->on_strm_q) {
5288 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5289 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5290 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5291 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5292 #ifdef INVARIANTS
5293 					} else {
5294 						panic("strmin: %p ctl: %p unknown %d",
5295 						    strmin, control, control->on_strm_q);
5296 #endif
5297 					}
5298 					control->on_strm_q = 0;
5299 				}
5300 				/* subtract pending on streams */
5301 				if (asoc->size_on_all_streams >= control->length) {
5302 					asoc->size_on_all_streams -= control->length;
5303 				} else {
5304 #ifdef INVARIANTS
5305 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5306 #else
5307 					asoc->size_on_all_streams = 0;
5308 #endif
5309 				}
5310 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5311 				/* deliver it to at least the delivery-q */
5312 				if (stcb->sctp_socket) {
5313 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5314 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5315 					    control,
5316 					    &stcb->sctp_socket->so_rcv,
5317 					    1, SCTP_READ_LOCK_HELD,
5318 					    SCTP_SO_NOT_LOCKED);
5319 				}
5320 			} else {
5321 				/* Its a fragmented message */
5322 				if (control->first_frag_seen) {
5323 					/*
5324 					 * Make it so this is next to
5325 					 * deliver, we restore later
5326 					 */
5327 					strmin->last_mid_delivered = control->mid - 1;
5328 					need_reasm_check = 1;
5329 					break;
5330 				}
5331 			}
5332 		} else {
5333 			/* no more delivery now. */
5334 			break;
5335 		}
5336 	}
5337 	if (need_reasm_check) {
5338 		int ret;
5339 
5340 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5341 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5342 			/* Restore the next to deliver unless we are ahead */
5343 			strmin->last_mid_delivered = mid;
5344 		}
5345 		if (ret == 0) {
5346 			/* Left the front Partial one on */
5347 			return;
5348 		}
5349 		need_reasm_check = 0;
5350 	}
5351 	/*
5352 	 * now we must deliver things in queue the normal way  if any are
5353 	 * now ready.
5354 	 */
5355 	mid = strmin->last_mid_delivered + 1;
5356 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5357 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5358 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5359 				/* this is deliverable now */
5360 				if (control->on_strm_q) {
5361 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5362 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5363 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5364 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5365 #ifdef INVARIANTS
5366 					} else {
5367 						panic("strmin: %p ctl: %p unknown %d",
5368 						    strmin, control, control->on_strm_q);
5369 #endif
5370 					}
5371 					control->on_strm_q = 0;
5372 				}
5373 				/* subtract pending on streams */
5374 				if (asoc->size_on_all_streams >= control->length) {
5375 					asoc->size_on_all_streams -= control->length;
5376 				} else {
5377 #ifdef INVARIANTS
5378 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5379 #else
5380 					asoc->size_on_all_streams = 0;
5381 #endif
5382 				}
5383 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5384 				/* deliver it to at least the delivery-q */
5385 				strmin->last_mid_delivered = control->mid;
5386 				if (stcb->sctp_socket) {
5387 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5388 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5389 					    control,
5390 					    &stcb->sctp_socket->so_rcv, 1,
5391 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5392 
5393 				}
5394 				mid = strmin->last_mid_delivered + 1;
5395 			} else {
5396 				/* Its a fragmented message */
5397 				if (control->first_frag_seen) {
5398 					/*
5399 					 * Make it so this is next to
5400 					 * deliver
5401 					 */
5402 					strmin->last_mid_delivered = control->mid - 1;
5403 					need_reasm_check = 1;
5404 					break;
5405 				}
5406 			}
5407 		} else {
5408 			break;
5409 		}
5410 	}
5411 	if (need_reasm_check) {
5412 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5413 	}
5414 }
5415 
5416 
5417 
5418 static void
5419 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5420     struct sctp_association *asoc,
5421     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5422 {
5423 	struct sctp_queued_to_read *control;
5424 	struct sctp_stream_in *strm;
5425 	struct sctp_tmit_chunk *chk, *nchk;
5426 	int cnt_removed = 0;
5427 
5428 	/*
5429 	 * For now large messages held on the stream reasm that are complete
5430 	 * will be tossed too. We could in theory do more work to spin
5431 	 * through and stop after dumping one msg aka seeing the start of a
5432 	 * new msg at the head, and call the delivery function... to see if
5433 	 * it can be delivered... But for now we just dump everything on the
5434 	 * queue.
5435 	 */
5436 	strm = &asoc->strmin[stream];
5437 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5438 	if (control == NULL) {
5439 		/* Not found */
5440 		return;
5441 	}
5442 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5443 		return;
5444 	}
5445 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5446 		/* Purge hanging chunks */
5447 		if (!asoc->idata_supported && (ordered == 0)) {
5448 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5449 				break;
5450 			}
5451 		}
5452 		cnt_removed++;
5453 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5454 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5455 			asoc->size_on_reasm_queue -= chk->send_size;
5456 		} else {
5457 #ifdef INVARIANTS
5458 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5459 #else
5460 			asoc->size_on_reasm_queue = 0;
5461 #endif
5462 		}
5463 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5464 		if (chk->data) {
5465 			sctp_m_freem(chk->data);
5466 			chk->data = NULL;
5467 		}
5468 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5469 	}
5470 	if (!TAILQ_EMPTY(&control->reasm)) {
5471 		/* This has to be old data, unordered */
5472 		if (control->data) {
5473 			sctp_m_freem(control->data);
5474 			control->data = NULL;
5475 		}
5476 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5477 		chk = TAILQ_FIRST(&control->reasm);
5478 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5479 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5480 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5481 			    chk, SCTP_READ_LOCK_HELD);
5482 		}
5483 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5484 		return;
5485 	}
5486 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5487 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5488 		if (asoc->size_on_all_streams >= control->length) {
5489 			asoc->size_on_all_streams -= control->length;
5490 		} else {
5491 #ifdef INVARIANTS
5492 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5493 #else
5494 			asoc->size_on_all_streams = 0;
5495 #endif
5496 		}
5497 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5498 		control->on_strm_q = 0;
5499 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5500 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5501 		control->on_strm_q = 0;
5502 #ifdef INVARIANTS
5503 	} else if (control->on_strm_q) {
5504 		panic("strm: %p ctl: %p unknown %d",
5505 		    strm, control, control->on_strm_q);
5506 #endif
5507 	}
5508 	control->on_strm_q = 0;
5509 	if (control->on_read_q == 0) {
5510 		sctp_free_remote_addr(control->whoFrom);
5511 		if (control->data) {
5512 			sctp_m_freem(control->data);
5513 			control->data = NULL;
5514 		}
5515 		sctp_free_a_readq(stcb, control);
5516 	}
5517 }
5518 
5519 void
5520 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5521     struct sctp_forward_tsn_chunk *fwd,
5522     int *abort_flag, struct mbuf *m, int offset)
5523 {
5524 	/* The pr-sctp fwd tsn */
5525 	/*
5526 	 * here we will perform all the data receiver side steps for
5527 	 * processing FwdTSN, as required in by pr-sctp draft:
5528 	 *
5529 	 * Assume we get FwdTSN(x):
5530 	 *
5531 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5532 	 * + others we have 3) examine and update re-ordering queue on
5533 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5534 	 * report where we are.
5535 	 */
5536 	struct sctp_association *asoc;
5537 	uint32_t new_cum_tsn, gap;
5538 	unsigned int i, fwd_sz, m_size;
5539 	uint32_t str_seq;
5540 	struct sctp_stream_in *strm;
5541 	struct sctp_queued_to_read *control, *sv;
5542 
5543 	asoc = &stcb->asoc;
5544 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5545 		SCTPDBG(SCTP_DEBUG_INDATA1,
5546 		    "Bad size too small/big fwd-tsn\n");
5547 		return;
5548 	}
5549 	m_size = (stcb->asoc.mapping_array_size << 3);
5550 	/*************************************************************/
5551 	/* 1. Here we update local cumTSN and shift the bitmap array */
5552 	/*************************************************************/
5553 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5554 
5555 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5556 		/* Already got there ... */
5557 		return;
5558 	}
5559 	/*
5560 	 * now we know the new TSN is more advanced, let's find the actual
5561 	 * gap
5562 	 */
5563 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5564 	asoc->cumulative_tsn = new_cum_tsn;
5565 	if (gap >= m_size) {
5566 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5567 			struct mbuf *op_err;
5568 			char msg[SCTP_DIAG_INFO_LEN];
5569 
5570 			/*
5571 			 * out of range (of single byte chunks in the rwnd I
5572 			 * give out). This must be an attacker.
5573 			 */
5574 			*abort_flag = 1;
5575 			snprintf(msg, sizeof(msg),
5576 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5577 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5578 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5579 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5580 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5581 			return;
5582 		}
5583 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5584 
5585 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5586 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5587 		asoc->highest_tsn_inside_map = new_cum_tsn;
5588 
5589 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5590 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5591 
5592 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5593 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5594 		}
5595 	} else {
5596 		SCTP_TCB_LOCK_ASSERT(stcb);
5597 		for (i = 0; i <= gap; i++) {
5598 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5599 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5600 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5601 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5602 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5603 				}
5604 			}
5605 		}
5606 	}
5607 	/*************************************************************/
5608 	/* 2. Clear up re-assembly queue                             */
5609 	/*************************************************************/
5610 
5611 	/* This is now done as part of clearing up the stream/seq */
5612 	if (asoc->idata_supported == 0) {
5613 		uint16_t sid;
5614 
5615 		/* Flush all the un-ordered data based on cum-tsn */
5616 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5617 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5618 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5619 		}
5620 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5621 	}
5622 	/*******************************************************/
5623 	/* 3. Update the PR-stream re-ordering queues and fix  */
5624 	/* delivery issues as needed.                       */
5625 	/*******************************************************/
5626 	fwd_sz -= sizeof(*fwd);
5627 	if (m && fwd_sz) {
5628 		/* New method. */
5629 		unsigned int num_str;
5630 		uint32_t mid, cur_mid;
5631 		uint16_t sid;
5632 		uint16_t ordered, flags;
5633 		struct sctp_strseq *stseq, strseqbuf;
5634 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5635 
5636 		offset += sizeof(*fwd);
5637 
5638 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5639 		if (asoc->idata_supported) {
5640 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5641 		} else {
5642 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5643 		}
5644 		for (i = 0; i < num_str; i++) {
5645 			if (asoc->idata_supported) {
5646 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5647 				    sizeof(struct sctp_strseq_mid),
5648 				    (uint8_t *)&strseqbuf_m);
5649 				offset += sizeof(struct sctp_strseq_mid);
5650 				if (stseq_m == NULL) {
5651 					break;
5652 				}
5653 				sid = ntohs(stseq_m->sid);
5654 				mid = ntohl(stseq_m->mid);
5655 				flags = ntohs(stseq_m->flags);
5656 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5657 					ordered = 0;
5658 				} else {
5659 					ordered = 1;
5660 				}
5661 			} else {
5662 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5663 				    sizeof(struct sctp_strseq),
5664 				    (uint8_t *)&strseqbuf);
5665 				offset += sizeof(struct sctp_strseq);
5666 				if (stseq == NULL) {
5667 					break;
5668 				}
5669 				sid = ntohs(stseq->sid);
5670 				mid = (uint32_t)ntohs(stseq->ssn);
5671 				ordered = 1;
5672 			}
5673 			/* Convert */
5674 
5675 			/* now process */
5676 
5677 			/*
5678 			 * Ok we now look for the stream/seq on the read
5679 			 * queue where its not all delivered. If we find it
5680 			 * we transmute the read entry into a PDI_ABORTED.
5681 			 */
5682 			if (sid >= asoc->streamincnt) {
5683 				/* screwed up streams, stop!  */
5684 				break;
5685 			}
5686 			if ((asoc->str_of_pdapi == sid) &&
5687 			    (asoc->ssn_of_pdapi == mid)) {
5688 				/*
5689 				 * If this is the one we were partially
5690 				 * delivering now then we no longer are.
5691 				 * Note this will change with the reassembly
5692 				 * re-write.
5693 				 */
5694 				asoc->fragmented_delivery_inprogress = 0;
5695 			}
5696 			strm = &asoc->strmin[sid];
5697 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5698 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5699 			}
5700 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5701 				if ((control->sinfo_stream == sid) &&
5702 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5703 					str_seq = (sid << 16) | (0x0000ffff & mid);
5704 					control->pdapi_aborted = 1;
5705 					sv = stcb->asoc.control_pdapi;
5706 					control->end_added = 1;
5707 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5708 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5709 						if (asoc->size_on_all_streams >= control->length) {
5710 							asoc->size_on_all_streams -= control->length;
5711 						} else {
5712 #ifdef INVARIANTS
5713 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5714 #else
5715 							asoc->size_on_all_streams = 0;
5716 #endif
5717 						}
5718 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5719 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5720 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5721 #ifdef INVARIANTS
5722 					} else if (control->on_strm_q) {
5723 						panic("strm: %p ctl: %p unknown %d",
5724 						    strm, control, control->on_strm_q);
5725 #endif
5726 					}
5727 					control->on_strm_q = 0;
5728 					stcb->asoc.control_pdapi = control;
5729 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5730 					    stcb,
5731 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5732 					    (void *)&str_seq,
5733 					    SCTP_SO_NOT_LOCKED);
5734 					stcb->asoc.control_pdapi = sv;
5735 					break;
5736 				} else if ((control->sinfo_stream == sid) &&
5737 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5738 					/* We are past our victim SSN */
5739 					break;
5740 				}
5741 			}
5742 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5743 				/* Update the sequence number */
5744 				strm->last_mid_delivered = mid;
5745 			}
5746 			/* now kick the stream the new way */
5747 			/* sa_ignore NO_NULL_CHK */
5748 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5749 		}
5750 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5751 	}
5752 	/*
5753 	 * Now slide thing forward.
5754 	 */
5755 	sctp_slide_mapping_arrays(stcb);
5756 }
5757