xref: /freebsd/sys/netinet/sctp_indata.c (revision 19fe57fdb4fd2c18a37f2a972617c8769609cdb8)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 
286 static void
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 {
289 	uint32_t gap, i, cumackp1;
290 	int fnd = 0;
291 	int in_r = 0, in_nr = 0;
292 
293 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 		return;
295 	}
296 	cumackp1 = asoc->cumulative_tsn + 1;
297 	if (SCTP_TSN_GT(cumackp1, tsn)) {
298 		/*
299 		 * this tsn is behind the cum ack and thus we don't need to
300 		 * worry about it being moved from one to the other.
301 		 */
302 		return;
303 	}
304 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	if ((in_r == 0) && (in_nr == 0)) {
308 #ifdef INVARIANTS
309 		panic("Things are really messed up now");
310 #else
311 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 		sctp_print_mapping_array(asoc);
313 #endif
314 	}
315 	if (in_nr == 0)
316 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	if (in_r)
318 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 		asoc->highest_tsn_inside_nr_map = tsn;
321 	}
322 	if (tsn == asoc->highest_tsn_inside_map) {
323 		/* We must back down to see what the new highest is */
324 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 static int
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340     struct sctp_association *asoc,
341     struct sctp_queued_to_read *control)
342 {
343 	struct sctp_queued_to_read *at;
344 	struct sctp_readhead *q;
345 	uint8_t flags, unordered;
346 
347 	flags = (control->sinfo_flags >> 8);
348 	unordered = flags & SCTP_DATA_UNORDERED;
349 	if (unordered) {
350 		q = &strm->uno_inqueue;
351 		if (asoc->idata_supported == 0) {
352 			if (!TAILQ_EMPTY(q)) {
353 				/*
354 				 * Only one stream can be here in old style
355 				 * -- abort
356 				 */
357 				return (-1);
358 			}
359 			TAILQ_INSERT_TAIL(q, control, next_instrm);
360 			control->on_strm_q = SCTP_ON_UNORDERED;
361 			return (0);
362 		}
363 	} else {
364 		q = &strm->inqueue;
365 	}
366 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 		control->end_added = 1;
368 		control->first_frag_seen = 1;
369 		control->last_frag_seen = 1;
370 	}
371 	if (TAILQ_EMPTY(q)) {
372 		/* Empty queue */
373 		TAILQ_INSERT_HEAD(q, control, next_instrm);
374 		if (unordered) {
375 			control->on_strm_q = SCTP_ON_UNORDERED;
376 		} else {
377 			control->on_strm_q = SCTP_ON_ORDERED;
378 		}
379 		return (0);
380 	} else {
381 		TAILQ_FOREACH(at, q, next_instrm) {
382 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * one in queue is bigger than the new one,
385 				 * insert before this one
386 				 */
387 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 				if (unordered) {
389 					control->on_strm_q = SCTP_ON_UNORDERED;
390 				} else {
391 					control->on_strm_q = SCTP_ON_ORDERED;
392 				}
393 				break;
394 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 				/*
396 				 * Gak, He sent me a duplicate msg id
397 				 * number?? return -1 to abort.
398 				 */
399 				return (-1);
400 			} else {
401 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 					/*
403 					 * We are at the end, insert it
404 					 * after this one
405 					 */
406 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 						sctp_log_strm_del(control, at,
408 						    SCTP_STR_LOG_FROM_INSERT_TL);
409 					}
410 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 					if (unordered) {
412 						control->on_strm_q = SCTP_ON_UNORDERED;
413 					} else {
414 						control->on_strm_q = SCTP_ON_ORDERED;
415 					}
416 					break;
417 				}
418 			}
419 		}
420 	}
421 	return (0);
422 }
423 
424 static void
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426     struct sctp_queued_to_read *control,
427     struct sctp_tmit_chunk *chk,
428     int *abort_flag, int opspot)
429 {
430 	char msg[SCTP_DIAG_INFO_LEN];
431 	struct mbuf *oper;
432 
433 	if (stcb->asoc.idata_supported) {
434 		snprintf(msg, sizeof(msg),
435 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 		    opspot,
437 		    control->fsn_included,
438 		    chk->rec.data.tsn,
439 		    chk->rec.data.sid,
440 		    chk->rec.data.fsn, chk->rec.data.mid);
441 	} else {
442 		snprintf(msg, sizeof(msg),
443 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 		    opspot,
445 		    control->fsn_included,
446 		    chk->rec.data.tsn,
447 		    chk->rec.data.sid,
448 		    chk->rec.data.fsn,
449 		    (uint16_t)chk->rec.data.mid);
450 	}
451 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 	sctp_m_freem(chk->data);
453 	chk->data = NULL;
454 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
457 	*abort_flag = 1;
458 }
459 
460 static void
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 {
463 	/*
464 	 * The control could not be placed and must be cleaned.
465 	 */
466 	struct sctp_tmit_chunk *chk, *nchk;
467 
468 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 		if (chk->data)
471 			sctp_m_freem(chk->data);
472 		chk->data = NULL;
473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	}
475 	sctp_free_remote_addr(control->whoFrom);
476 	if (control->data) {
477 		sctp_m_freem(control->data);
478 		control->data = NULL;
479 	}
480 	sctp_free_a_readq(stcb, control);
481 }
482 
483 /*
484  * Queue the chunk either right into the socket buffer if it is the next one
485  * to go OR put it in the correct place in the delivery queue.  If we do
486  * append to the so_buf, keep doing so until we are out of order as
487  * long as the control's entered are non-fragmented.
488  */
489 static void
490 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
491     struct sctp_association *asoc,
492     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
493 {
494 	/*
495 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
496 	 * all the data in one stream this could happen quite rapidly. One
497 	 * could use the TSN to keep track of things, but this scheme breaks
498 	 * down in the other type of stream usage that could occur. Send a
499 	 * single msg to stream 0, send 4Billion messages to stream 1, now
500 	 * send a message to stream 0. You have a situation where the TSN
501 	 * has wrapped but not in the stream. Is this worth worrying about
502 	 * or should we just change our queue sort at the bottom to be by
503 	 * TSN.
504 	 *
505 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
506 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
507 	 * assignment this could happen... and I don't see how this would be
508 	 * a violation. So for now I am undecided an will leave the sort by
509 	 * SSN alone. Maybe a hybred approach is the answer
510 	 *
511 	 */
512 	struct sctp_queued_to_read *at;
513 	int queue_needed;
514 	uint32_t nxt_todel;
515 	struct mbuf *op_err;
516 	struct sctp_stream_in *strm;
517 	char msg[SCTP_DIAG_INFO_LEN];
518 
519 	strm = &asoc->strmin[control->sinfo_stream];
520 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
521 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
522 	}
523 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
524 		/* The incoming sseq is behind where we last delivered? */
525 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
526 		    strm->last_mid_delivered, control->mid);
527 		/*
528 		 * throw it in the stream so it gets cleaned up in
529 		 * association destruction
530 		 */
531 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
532 		if (asoc->idata_supported) {
533 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
534 			    strm->last_mid_delivered, control->sinfo_tsn,
535 			    control->sinfo_stream, control->mid);
536 		} else {
537 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
538 			    (uint16_t)strm->last_mid_delivered,
539 			    control->sinfo_tsn,
540 			    control->sinfo_stream,
541 			    (uint16_t)control->mid);
542 		}
543 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
544 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
545 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
546 		*abort_flag = 1;
547 		return;
548 
549 	}
550 	queue_needed = 1;
551 	asoc->size_on_all_streams += control->length;
552 	sctp_ucount_incr(asoc->cnt_on_all_streams);
553 	nxt_todel = strm->last_mid_delivered + 1;
554 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
556 		struct socket *so;
557 
558 		so = SCTP_INP_SO(stcb->sctp_ep);
559 		atomic_add_int(&stcb->asoc.refcnt, 1);
560 		SCTP_TCB_UNLOCK(stcb);
561 		SCTP_SOCKET_LOCK(so, 1);
562 		SCTP_TCB_LOCK(stcb);
563 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
564 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
565 			SCTP_SOCKET_UNLOCK(so, 1);
566 			return;
567 		}
568 #endif
569 		/* can be delivered right away? */
570 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
571 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
572 		}
573 		/* EY it wont be queued if it could be delivered directly */
574 		queue_needed = 0;
575 		if (asoc->size_on_all_streams >= control->length) {
576 			asoc->size_on_all_streams -= control->length;
577 		} else {
578 #ifdef INVARIANTS
579 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
580 #else
581 			asoc->size_on_all_streams = 0;
582 #endif
583 		}
584 		sctp_ucount_decr(asoc->cnt_on_all_streams);
585 		strm->last_mid_delivered++;
586 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
587 		sctp_add_to_readq(stcb->sctp_ep, stcb,
588 		    control,
589 		    &stcb->sctp_socket->so_rcv, 1,
590 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
591 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
592 			/* all delivered */
593 			nxt_todel = strm->last_mid_delivered + 1;
594 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
595 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
596 				if (control->on_strm_q == SCTP_ON_ORDERED) {
597 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
598 					if (asoc->size_on_all_streams >= control->length) {
599 						asoc->size_on_all_streams -= control->length;
600 					} else {
601 #ifdef INVARIANTS
602 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
603 #else
604 						asoc->size_on_all_streams = 0;
605 #endif
606 					}
607 					sctp_ucount_decr(asoc->cnt_on_all_streams);
608 #ifdef INVARIANTS
609 				} else {
610 					panic("Huh control: %p is on_strm_q: %d",
611 					    control, control->on_strm_q);
612 #endif
613 				}
614 				control->on_strm_q = 0;
615 				strm->last_mid_delivered++;
616 				/*
617 				 * We ignore the return of deliver_data here
618 				 * since we always can hold the chunk on the
619 				 * d-queue. And we have a finite number that
620 				 * can be delivered from the strq.
621 				 */
622 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
623 					sctp_log_strm_del(control, NULL,
624 					    SCTP_STR_LOG_FROM_IMMED_DEL);
625 				}
626 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
627 				sctp_add_to_readq(stcb->sctp_ep, stcb,
628 				    control,
629 				    &stcb->sctp_socket->so_rcv, 1,
630 				    SCTP_READ_LOCK_NOT_HELD,
631 				    SCTP_SO_LOCKED);
632 				continue;
633 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
634 				*need_reasm = 1;
635 			}
636 			break;
637 		}
638 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
639 		SCTP_SOCKET_UNLOCK(so, 1);
640 #endif
641 	}
642 	if (queue_needed) {
643 		/*
644 		 * Ok, we did not deliver this guy, find the correct place
645 		 * to put it on the queue.
646 		 */
647 		if (sctp_place_control_in_stream(strm, asoc, control)) {
648 			snprintf(msg, sizeof(msg),
649 			    "Queue to str MID: %u duplicate",
650 			    control->mid);
651 			sctp_clean_up_control(stcb, control);
652 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
653 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
654 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
655 			*abort_flag = 1;
656 		}
657 	}
658 }
659 
660 
661 static void
662 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
663 {
664 	struct mbuf *m, *prev = NULL;
665 	struct sctp_tcb *stcb;
666 
667 	stcb = control->stcb;
668 	control->held_length = 0;
669 	control->length = 0;
670 	m = control->data;
671 	while (m) {
672 		if (SCTP_BUF_LEN(m) == 0) {
673 			/* Skip mbufs with NO length */
674 			if (prev == NULL) {
675 				/* First one */
676 				control->data = sctp_m_free(m);
677 				m = control->data;
678 			} else {
679 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
680 				m = SCTP_BUF_NEXT(prev);
681 			}
682 			if (m == NULL) {
683 				control->tail_mbuf = prev;
684 			}
685 			continue;
686 		}
687 		prev = m;
688 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
689 		if (control->on_read_q) {
690 			/*
691 			 * On read queue so we must increment the SB stuff,
692 			 * we assume caller has done any locks of SB.
693 			 */
694 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
695 		}
696 		m = SCTP_BUF_NEXT(m);
697 	}
698 	if (prev) {
699 		control->tail_mbuf = prev;
700 	}
701 }
702 
703 static void
704 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
705 {
706 	struct mbuf *prev = NULL;
707 	struct sctp_tcb *stcb;
708 
709 	stcb = control->stcb;
710 	if (stcb == NULL) {
711 #ifdef INVARIANTS
712 		panic("Control broken");
713 #else
714 		return;
715 #endif
716 	}
717 	if (control->tail_mbuf == NULL) {
718 		/* TSNH */
719 		sctp_m_freem(control->data);
720 		control->data = m;
721 		sctp_setup_tail_pointer(control);
722 		return;
723 	}
724 	control->tail_mbuf->m_next = m;
725 	while (m) {
726 		if (SCTP_BUF_LEN(m) == 0) {
727 			/* Skip mbufs with NO length */
728 			if (prev == NULL) {
729 				/* First one */
730 				control->tail_mbuf->m_next = sctp_m_free(m);
731 				m = control->tail_mbuf->m_next;
732 			} else {
733 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
734 				m = SCTP_BUF_NEXT(prev);
735 			}
736 			if (m == NULL) {
737 				control->tail_mbuf = prev;
738 			}
739 			continue;
740 		}
741 		prev = m;
742 		if (control->on_read_q) {
743 			/*
744 			 * On read queue so we must increment the SB stuff,
745 			 * we assume caller has done any locks of SB.
746 			 */
747 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
748 		}
749 		*added += SCTP_BUF_LEN(m);
750 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
751 		m = SCTP_BUF_NEXT(m);
752 	}
753 	if (prev) {
754 		control->tail_mbuf = prev;
755 	}
756 }
757 
758 static void
759 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
760 {
761 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
762 	nc->sinfo_stream = control->sinfo_stream;
763 	nc->mid = control->mid;
764 	TAILQ_INIT(&nc->reasm);
765 	nc->top_fsn = control->top_fsn;
766 	nc->mid = control->mid;
767 	nc->sinfo_flags = control->sinfo_flags;
768 	nc->sinfo_ppid = control->sinfo_ppid;
769 	nc->sinfo_context = control->sinfo_context;
770 	nc->fsn_included = 0xffffffff;
771 	nc->sinfo_tsn = control->sinfo_tsn;
772 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
773 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
774 	nc->whoFrom = control->whoFrom;
775 	atomic_add_int(&nc->whoFrom->ref_count, 1);
776 	nc->stcb = control->stcb;
777 	nc->port_from = control->port_from;
778 }
779 
780 static void
781 sctp_reset_a_control(struct sctp_queued_to_read *control,
782     struct sctp_inpcb *inp, uint32_t tsn)
783 {
784 	control->fsn_included = tsn;
785 	if (control->on_read_q) {
786 		/*
787 		 * We have to purge it from there, hopefully this will work
788 		 * :-)
789 		 */
790 		TAILQ_REMOVE(&inp->read_queue, control, next);
791 		control->on_read_q = 0;
792 	}
793 }
794 
795 static int
796 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
797     struct sctp_association *asoc,
798     struct sctp_stream_in *strm,
799     struct sctp_queued_to_read *control,
800     uint32_t pd_point,
801     int inp_read_lock_held)
802 {
803 	/*
804 	 * Special handling for the old un-ordered data chunk. All the
805 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
806 	 * to see if we have it all. If you return one, no other control
807 	 * entries on the un-ordered queue will be looked at. In theory
808 	 * there should be no others entries in reality, unless the guy is
809 	 * sending both unordered NDATA and unordered DATA...
810 	 */
811 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
812 	uint32_t fsn;
813 	struct sctp_queued_to_read *nc;
814 	int cnt_added;
815 
816 	if (control->first_frag_seen == 0) {
817 		/* Nothing we can do, we have not seen the first piece yet */
818 		return (1);
819 	}
820 	/* Collapse any we can */
821 	cnt_added = 0;
822 restart:
823 	fsn = control->fsn_included + 1;
824 	/* Now what can we add? */
825 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
826 		if (chk->rec.data.fsn == fsn) {
827 			/* Ok lets add it */
828 			sctp_alloc_a_readq(stcb, nc);
829 			if (nc == NULL) {
830 				break;
831 			}
832 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
833 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
834 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
835 			fsn++;
836 			cnt_added++;
837 			chk = NULL;
838 			if (control->end_added) {
839 				/* We are done */
840 				if (!TAILQ_EMPTY(&control->reasm)) {
841 					/*
842 					 * Ok we have to move anything left
843 					 * on the control queue to a new
844 					 * control.
845 					 */
846 					sctp_build_readq_entry_from_ctl(nc, control);
847 					tchk = TAILQ_FIRST(&control->reasm);
848 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
849 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
850 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
851 							asoc->size_on_reasm_queue -= tchk->send_size;
852 						} else {
853 #ifdef INVARIANTS
854 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
855 #else
856 							asoc->size_on_reasm_queue = 0;
857 #endif
858 						}
859 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
860 						nc->first_frag_seen = 1;
861 						nc->fsn_included = tchk->rec.data.fsn;
862 						nc->data = tchk->data;
863 						nc->sinfo_ppid = tchk->rec.data.ppid;
864 						nc->sinfo_tsn = tchk->rec.data.tsn;
865 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
866 						tchk->data = NULL;
867 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
868 						sctp_setup_tail_pointer(nc);
869 						tchk = TAILQ_FIRST(&control->reasm);
870 					}
871 					/* Spin the rest onto the queue */
872 					while (tchk) {
873 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
874 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
875 						tchk = TAILQ_FIRST(&control->reasm);
876 					}
877 					/*
878 					 * Now lets add it to the queue
879 					 * after removing control
880 					 */
881 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
882 					nc->on_strm_q = SCTP_ON_UNORDERED;
883 					if (control->on_strm_q) {
884 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 						control->on_strm_q = 0;
886 					}
887 				}
888 				if (control->pdapi_started) {
889 					strm->pd_api_started = 0;
890 					control->pdapi_started = 0;
891 				}
892 				if (control->on_strm_q) {
893 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
894 					control->on_strm_q = 0;
895 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
896 				}
897 				if (control->on_read_q == 0) {
898 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
899 					    &stcb->sctp_socket->so_rcv, control->end_added,
900 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
901 				}
902 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
903 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
904 					/*
905 					 * Switch to the new guy and
906 					 * continue
907 					 */
908 					control = nc;
909 					goto restart;
910 				} else {
911 					if (nc->on_strm_q == 0) {
912 						sctp_free_a_readq(stcb, nc);
913 					}
914 				}
915 				return (1);
916 			} else {
917 				sctp_free_a_readq(stcb, nc);
918 			}
919 		} else {
920 			/* Can't add more */
921 			break;
922 		}
923 	}
924 	if (cnt_added && strm->pd_api_started) {
925 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
926 	}
927 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
928 		strm->pd_api_started = 1;
929 		control->pdapi_started = 1;
930 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
931 		    &stcb->sctp_socket->so_rcv, control->end_added,
932 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
933 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
934 		return (0);
935 	} else {
936 		return (1);
937 	}
938 }
939 
940 static void
941 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
942     struct sctp_association *asoc,
943     struct sctp_queued_to_read *control,
944     struct sctp_tmit_chunk *chk,
945     int *abort_flag)
946 {
947 	struct sctp_tmit_chunk *at;
948 	int inserted;
949 
950 	/*
951 	 * Here we need to place the chunk into the control structure sorted
952 	 * in the correct order.
953 	 */
954 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
955 		/* Its the very first one. */
956 		SCTPDBG(SCTP_DEBUG_XXX,
957 		    "chunk is a first fsn: %u becomes fsn_included\n",
958 		    chk->rec.data.fsn);
959 		at = TAILQ_FIRST(&control->reasm);
960 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
961 			/*
962 			 * The first chunk in the reassembly is a smaller
963 			 * TSN than this one, even though this has a first,
964 			 * it must be from a subsequent msg.
965 			 */
966 			goto place_chunk;
967 		}
968 		if (control->first_frag_seen) {
969 			/*
970 			 * In old un-ordered we can reassembly on one
971 			 * control multiple messages. As long as the next
972 			 * FIRST is greater then the old first (TSN i.e. FSN
973 			 * wise)
974 			 */
975 			struct mbuf *tdata;
976 			uint32_t tmp;
977 
978 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
979 				/*
980 				 * Easy way the start of a new guy beyond
981 				 * the lowest
982 				 */
983 				goto place_chunk;
984 			}
985 			if ((chk->rec.data.fsn == control->fsn_included) ||
986 			    (control->pdapi_started)) {
987 				/*
988 				 * Ok this should not happen, if it does we
989 				 * started the pd-api on the higher TSN
990 				 * (since the equals part is a TSN failure
991 				 * it must be that).
992 				 *
993 				 * We are completly hosed in that case since
994 				 * I have no way to recover. This really
995 				 * will only happen if we can get more TSN's
996 				 * higher before the pd-api-point.
997 				 */
998 				sctp_abort_in_reasm(stcb, control, chk,
999 				    abort_flag,
1000 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1001 
1002 				return;
1003 			}
1004 			/*
1005 			 * Ok we have two firsts and the one we just got is
1006 			 * smaller than the one we previously placed.. yuck!
1007 			 * We must swap them out.
1008 			 */
1009 			/* swap the mbufs */
1010 			tdata = control->data;
1011 			control->data = chk->data;
1012 			chk->data = tdata;
1013 			/* Save the lengths */
1014 			chk->send_size = control->length;
1015 			/* Recompute length of control and tail pointer */
1016 			sctp_setup_tail_pointer(control);
1017 			/* Fix the FSN included */
1018 			tmp = control->fsn_included;
1019 			control->fsn_included = chk->rec.data.fsn;
1020 			chk->rec.data.fsn = tmp;
1021 			/* Fix the TSN included */
1022 			tmp = control->sinfo_tsn;
1023 			control->sinfo_tsn = chk->rec.data.tsn;
1024 			chk->rec.data.tsn = tmp;
1025 			/* Fix the PPID included */
1026 			tmp = control->sinfo_ppid;
1027 			control->sinfo_ppid = chk->rec.data.ppid;
1028 			chk->rec.data.ppid = tmp;
1029 			/* Fix tail pointer */
1030 			goto place_chunk;
1031 		}
1032 		control->first_frag_seen = 1;
1033 		control->fsn_included = chk->rec.data.fsn;
1034 		control->top_fsn = chk->rec.data.fsn;
1035 		control->sinfo_tsn = chk->rec.data.tsn;
1036 		control->sinfo_ppid = chk->rec.data.ppid;
1037 		control->data = chk->data;
1038 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1039 		chk->data = NULL;
1040 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1041 		sctp_setup_tail_pointer(control);
1042 		return;
1043 	}
1044 place_chunk:
1045 	inserted = 0;
1046 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1047 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1048 			/*
1049 			 * This one in queue is bigger than the new one,
1050 			 * insert the new one before at.
1051 			 */
1052 			asoc->size_on_reasm_queue += chk->send_size;
1053 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1054 			inserted = 1;
1055 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1056 			break;
1057 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1058 			/*
1059 			 * They sent a duplicate fsn number. This really
1060 			 * should not happen since the FSN is a TSN and it
1061 			 * should have been dropped earlier.
1062 			 */
1063 			sctp_abort_in_reasm(stcb, control, chk,
1064 			    abort_flag,
1065 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1066 			return;
1067 		}
1068 
1069 	}
1070 	if (inserted == 0) {
1071 		/* Its at the end */
1072 		asoc->size_on_reasm_queue += chk->send_size;
1073 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1074 		control->top_fsn = chk->rec.data.fsn;
1075 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1076 	}
1077 }
1078 
1079 static int
1080 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1081     struct sctp_stream_in *strm, int inp_read_lock_held)
1082 {
1083 	/*
1084 	 * Given a stream, strm, see if any of the SSN's on it that are
1085 	 * fragmented are ready to deliver. If so go ahead and place them on
1086 	 * the read queue. In so placing if we have hit the end, then we
1087 	 * need to remove them from the stream's queue.
1088 	 */
1089 	struct sctp_queued_to_read *control, *nctl = NULL;
1090 	uint32_t next_to_del;
1091 	uint32_t pd_point;
1092 	int ret = 0;
1093 
1094 	if (stcb->sctp_socket) {
1095 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1096 		    stcb->sctp_ep->partial_delivery_point);
1097 	} else {
1098 		pd_point = stcb->sctp_ep->partial_delivery_point;
1099 	}
1100 	control = TAILQ_FIRST(&strm->uno_inqueue);
1101 
1102 	if ((control != NULL) &&
1103 	    (asoc->idata_supported == 0)) {
1104 		/* Special handling needed for "old" data format */
1105 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1106 			goto done_un;
1107 		}
1108 	}
1109 	if (strm->pd_api_started) {
1110 		/* Can't add more */
1111 		return (0);
1112 	}
1113 	while (control) {
1114 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1115 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1116 		nctl = TAILQ_NEXT(control, next_instrm);
1117 		if (control->end_added) {
1118 			/* We just put the last bit on */
1119 			if (control->on_strm_q) {
1120 #ifdef INVARIANTS
1121 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1122 					panic("Huh control: %p on_q: %d -- not unordered?",
1123 					    control, control->on_strm_q);
1124 				}
1125 #endif
1126 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1127 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1128 				control->on_strm_q = 0;
1129 			}
1130 			if (control->on_read_q == 0) {
1131 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1132 				    control,
1133 				    &stcb->sctp_socket->so_rcv, control->end_added,
1134 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1135 			}
1136 		} else {
1137 			/* Can we do a PD-API for this un-ordered guy? */
1138 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1139 				strm->pd_api_started = 1;
1140 				control->pdapi_started = 1;
1141 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1142 				    control,
1143 				    &stcb->sctp_socket->so_rcv, control->end_added,
1144 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1145 
1146 				break;
1147 			}
1148 		}
1149 		control = nctl;
1150 	}
1151 done_un:
1152 	control = TAILQ_FIRST(&strm->inqueue);
1153 	if (strm->pd_api_started) {
1154 		/* Can't add more */
1155 		return (0);
1156 	}
1157 	if (control == NULL) {
1158 		return (ret);
1159 	}
1160 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1161 		/*
1162 		 * Ok the guy at the top was being partially delivered
1163 		 * completed, so we remove it. Note the pd_api flag was
1164 		 * taken off when the chunk was merged on in
1165 		 * sctp_queue_data_for_reasm below.
1166 		 */
1167 		nctl = TAILQ_NEXT(control, next_instrm);
1168 		SCTPDBG(SCTP_DEBUG_XXX,
1169 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1170 		    control, control->end_added, control->mid,
1171 		    control->top_fsn, control->fsn_included,
1172 		    strm->last_mid_delivered);
1173 		if (control->end_added) {
1174 			if (control->on_strm_q) {
1175 #ifdef INVARIANTS
1176 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1177 					panic("Huh control: %p on_q: %d -- not ordered?",
1178 					    control, control->on_strm_q);
1179 				}
1180 #endif
1181 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1182 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1183 				if (asoc->size_on_all_streams >= control->length) {
1184 					asoc->size_on_all_streams -= control->length;
1185 				} else {
1186 #ifdef INVARIANTS
1187 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1188 #else
1189 					asoc->size_on_all_streams = 0;
1190 #endif
1191 				}
1192 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1193 				control->on_strm_q = 0;
1194 			}
1195 			if (strm->pd_api_started && control->pdapi_started) {
1196 				control->pdapi_started = 0;
1197 				strm->pd_api_started = 0;
1198 			}
1199 			if (control->on_read_q == 0) {
1200 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1201 				    control,
1202 				    &stcb->sctp_socket->so_rcv, control->end_added,
1203 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1204 			}
1205 			control = nctl;
1206 		}
1207 	}
1208 	if (strm->pd_api_started) {
1209 		/*
1210 		 * Can't add more must have gotten an un-ordered above being
1211 		 * partially delivered.
1212 		 */
1213 		return (0);
1214 	}
1215 deliver_more:
1216 	next_to_del = strm->last_mid_delivered + 1;
1217 	if (control) {
1218 		SCTPDBG(SCTP_DEBUG_XXX,
1219 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1220 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1221 		    next_to_del);
1222 		nctl = TAILQ_NEXT(control, next_instrm);
1223 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1224 		    (control->first_frag_seen)) {
1225 			int done;
1226 
1227 			/* Ok we can deliver it onto the stream. */
1228 			if (control->end_added) {
1229 				/* We are done with it afterwards */
1230 				if (control->on_strm_q) {
1231 #ifdef INVARIANTS
1232 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1233 						panic("Huh control: %p on_q: %d -- not ordered?",
1234 						    control, control->on_strm_q);
1235 					}
1236 #endif
1237 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1238 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1239 					if (asoc->size_on_all_streams >= control->length) {
1240 						asoc->size_on_all_streams -= control->length;
1241 					} else {
1242 #ifdef INVARIANTS
1243 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1244 #else
1245 						asoc->size_on_all_streams = 0;
1246 #endif
1247 					}
1248 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1249 					control->on_strm_q = 0;
1250 				}
1251 				ret++;
1252 			}
1253 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1254 				/*
1255 				 * A singleton now slipping through - mark
1256 				 * it non-revokable too
1257 				 */
1258 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1259 			} else if (control->end_added == 0) {
1260 				/*
1261 				 * Check if we can defer adding until its
1262 				 * all there
1263 				 */
1264 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1265 					/*
1266 					 * Don't need it or cannot add more
1267 					 * (one being delivered that way)
1268 					 */
1269 					goto out;
1270 				}
1271 			}
1272 			done = (control->end_added) && (control->last_frag_seen);
1273 			if (control->on_read_q == 0) {
1274 				if (!done) {
1275 					if (asoc->size_on_all_streams >= control->length) {
1276 						asoc->size_on_all_streams -= control->length;
1277 					} else {
1278 #ifdef INVARIANTS
1279 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1280 #else
1281 						asoc->size_on_all_streams = 0;
1282 #endif
1283 					}
1284 					strm->pd_api_started = 1;
1285 					control->pdapi_started = 1;
1286 				}
1287 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1288 				    control,
1289 				    &stcb->sctp_socket->so_rcv, control->end_added,
1290 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1291 			}
1292 			strm->last_mid_delivered = next_to_del;
1293 			if (done) {
1294 				control = nctl;
1295 				goto deliver_more;
1296 			}
1297 		}
1298 	}
1299 out:
1300 	return (ret);
1301 }
1302 
1303 
1304 uint32_t
1305 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1306     struct sctp_stream_in *strm,
1307     struct sctp_tcb *stcb, struct sctp_association *asoc,
1308     struct sctp_tmit_chunk *chk, int hold_rlock)
1309 {
1310 	/*
1311 	 * Given a control and a chunk, merge the data from the chk onto the
1312 	 * control and free up the chunk resources.
1313 	 */
1314 	uint32_t added = 0;
1315 	int i_locked = 0;
1316 
1317 	if (control->on_read_q && (hold_rlock == 0)) {
1318 		/*
1319 		 * Its being pd-api'd so we must do some locks.
1320 		 */
1321 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1322 		i_locked = 1;
1323 	}
1324 	if (control->data == NULL) {
1325 		control->data = chk->data;
1326 		sctp_setup_tail_pointer(control);
1327 	} else {
1328 		sctp_add_to_tail_pointer(control, chk->data, &added);
1329 	}
1330 	control->fsn_included = chk->rec.data.fsn;
1331 	asoc->size_on_reasm_queue -= chk->send_size;
1332 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1333 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1334 	chk->data = NULL;
1335 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336 		control->first_frag_seen = 1;
1337 		control->sinfo_tsn = chk->rec.data.tsn;
1338 		control->sinfo_ppid = chk->rec.data.ppid;
1339 	}
1340 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1341 		/* Its complete */
1342 		if ((control->on_strm_q) && (control->on_read_q)) {
1343 			if (control->pdapi_started) {
1344 				control->pdapi_started = 0;
1345 				strm->pd_api_started = 0;
1346 			}
1347 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1348 				/* Unordered */
1349 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1350 				control->on_strm_q = 0;
1351 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1352 				/* Ordered */
1353 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1354 				/*
1355 				 * Don't need to decrement
1356 				 * size_on_all_streams, since control is on
1357 				 * the read queue.
1358 				 */
1359 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1360 				control->on_strm_q = 0;
1361 #ifdef INVARIANTS
1362 			} else if (control->on_strm_q) {
1363 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1364 				    control->on_strm_q);
1365 #endif
1366 			}
1367 		}
1368 		control->end_added = 1;
1369 		control->last_frag_seen = 1;
1370 	}
1371 	if (i_locked) {
1372 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1373 	}
1374 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 	return (added);
1376 }
1377 
1378 /*
1379  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380  * queue, see if anthing can be delivered. If so pull it off (or as much as
1381  * we can. If we run out of space then we must dump what we can and set the
1382  * appropriate flag to say we queued what we could.
1383  */
1384 static void
1385 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1386     struct sctp_queued_to_read *control,
1387     struct sctp_tmit_chunk *chk,
1388     int created_control,
1389     int *abort_flag, uint32_t tsn)
1390 {
1391 	uint32_t next_fsn;
1392 	struct sctp_tmit_chunk *at, *nat;
1393 	struct sctp_stream_in *strm;
1394 	int do_wakeup, unordered;
1395 	uint32_t lenadded;
1396 
1397 	strm = &asoc->strmin[control->sinfo_stream];
1398 	/*
1399 	 * For old un-ordered data chunks.
1400 	 */
1401 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 		unordered = 1;
1403 	} else {
1404 		unordered = 0;
1405 	}
1406 	/* Must be added to the stream-in queue */
1407 	if (created_control) {
1408 		if (unordered == 0) {
1409 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1410 		}
1411 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1412 			/* Duplicate SSN? */
1413 			sctp_abort_in_reasm(stcb, control, chk,
1414 			    abort_flag,
1415 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1416 			sctp_clean_up_control(stcb, control);
1417 			return;
1418 		}
1419 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1420 			/*
1421 			 * Ok we created this control and now lets validate
1422 			 * that its legal i.e. there is a B bit set, if not
1423 			 * and we have up to the cum-ack then its invalid.
1424 			 */
1425 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1426 				sctp_abort_in_reasm(stcb, control, chk,
1427 				    abort_flag,
1428 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 				return;
1430 			}
1431 		}
1432 	}
1433 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1434 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1435 		return;
1436 	}
1437 	/*
1438 	 * Ok we must queue the chunk into the reasembly portion: o if its
1439 	 * the first it goes to the control mbuf. o if its not first but the
1440 	 * next in sequence it goes to the control, and each succeeding one
1441 	 * in order also goes. o if its not in order we place it on the list
1442 	 * in its place.
1443 	 */
1444 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445 		/* Its the very first one. */
1446 		SCTPDBG(SCTP_DEBUG_XXX,
1447 		    "chunk is a first fsn: %u becomes fsn_included\n",
1448 		    chk->rec.data.fsn);
1449 		if (control->first_frag_seen) {
1450 			/*
1451 			 * Error on senders part, they either sent us two
1452 			 * data chunks with FIRST, or they sent two
1453 			 * un-ordered chunks that were fragmented at the
1454 			 * same time in the same stream.
1455 			 */
1456 			sctp_abort_in_reasm(stcb, control, chk,
1457 			    abort_flag,
1458 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1459 			return;
1460 		}
1461 		control->first_frag_seen = 1;
1462 		control->sinfo_ppid = chk->rec.data.ppid;
1463 		control->sinfo_tsn = chk->rec.data.tsn;
1464 		control->fsn_included = chk->rec.data.fsn;
1465 		control->data = chk->data;
1466 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1467 		chk->data = NULL;
1468 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469 		sctp_setup_tail_pointer(control);
1470 		asoc->size_on_all_streams += control->length;
1471 	} else {
1472 		/* Place the chunk in our list */
1473 		int inserted = 0;
1474 
1475 		if (control->last_frag_seen == 0) {
1476 			/* Still willing to raise highest FSN seen */
1477 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1478 				SCTPDBG(SCTP_DEBUG_XXX,
1479 				    "We have a new top_fsn: %u\n",
1480 				    chk->rec.data.fsn);
1481 				control->top_fsn = chk->rec.data.fsn;
1482 			}
1483 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1484 				SCTPDBG(SCTP_DEBUG_XXX,
1485 				    "The last fsn is now in place fsn: %u\n",
1486 				    chk->rec.data.fsn);
1487 				control->last_frag_seen = 1;
1488 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1489 					SCTPDBG(SCTP_DEBUG_XXX,
1490 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1491 					    chk->rec.data.fsn,
1492 					    control->top_fsn);
1493 					sctp_abort_in_reasm(stcb, control, chk,
1494 					    abort_flag,
1495 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1496 					return;
1497 				}
1498 			}
1499 			if (asoc->idata_supported || control->first_frag_seen) {
1500 				/*
1501 				 * For IDATA we always check since we know
1502 				 * that the first fragment is 0. For old
1503 				 * DATA we have to receive the first before
1504 				 * we know the first FSN (which is the TSN).
1505 				 */
1506 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1507 					/*
1508 					 * We have already delivered up to
1509 					 * this so its a dup
1510 					 */
1511 					sctp_abort_in_reasm(stcb, control, chk,
1512 					    abort_flag,
1513 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1514 					return;
1515 				}
1516 			}
1517 		} else {
1518 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1519 				/* Second last? huh? */
1520 				SCTPDBG(SCTP_DEBUG_XXX,
1521 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1522 				    chk->rec.data.fsn, control->top_fsn);
1523 				sctp_abort_in_reasm(stcb, control,
1524 				    chk, abort_flag,
1525 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1526 				return;
1527 			}
1528 			if (asoc->idata_supported || control->first_frag_seen) {
1529 				/*
1530 				 * For IDATA we always check since we know
1531 				 * that the first fragment is 0. For old
1532 				 * DATA we have to receive the first before
1533 				 * we know the first FSN (which is the TSN).
1534 				 */
1535 
1536 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1537 					/*
1538 					 * We have already delivered up to
1539 					 * this so its a dup
1540 					 */
1541 					SCTPDBG(SCTP_DEBUG_XXX,
1542 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1543 					    chk->rec.data.fsn, control->fsn_included);
1544 					sctp_abort_in_reasm(stcb, control, chk,
1545 					    abort_flag,
1546 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1547 					return;
1548 				}
1549 			}
1550 			/*
1551 			 * validate not beyond top FSN if we have seen last
1552 			 * one
1553 			 */
1554 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1555 				SCTPDBG(SCTP_DEBUG_XXX,
1556 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1557 				    chk->rec.data.fsn,
1558 				    control->top_fsn);
1559 				sctp_abort_in_reasm(stcb, control, chk,
1560 				    abort_flag,
1561 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1562 				return;
1563 			}
1564 		}
1565 		/*
1566 		 * If we reach here, we need to place the new chunk in the
1567 		 * reassembly for this control.
1568 		 */
1569 		SCTPDBG(SCTP_DEBUG_XXX,
1570 		    "chunk is a not first fsn: %u needs to be inserted\n",
1571 		    chk->rec.data.fsn);
1572 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1573 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1574 				/*
1575 				 * This one in queue is bigger than the new
1576 				 * one, insert the new one before at.
1577 				 */
1578 				SCTPDBG(SCTP_DEBUG_XXX,
1579 				    "Insert it before fsn: %u\n",
1580 				    at->rec.data.fsn);
1581 				asoc->size_on_reasm_queue += chk->send_size;
1582 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1583 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1584 				inserted = 1;
1585 				break;
1586 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1587 				/*
1588 				 * Gak, He sent me a duplicate str seq
1589 				 * number
1590 				 */
1591 				/*
1592 				 * foo bar, I guess I will just free this
1593 				 * new guy, should we abort too? FIX ME
1594 				 * MAYBE? Or it COULD be that the SSN's have
1595 				 * wrapped. Maybe I should compare to TSN
1596 				 * somehow... sigh for now just blow away
1597 				 * the chunk!
1598 				 */
1599 				SCTPDBG(SCTP_DEBUG_XXX,
1600 				    "Duplicate to fsn: %u -- abort\n",
1601 				    at->rec.data.fsn);
1602 				sctp_abort_in_reasm(stcb, control,
1603 				    chk, abort_flag,
1604 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1605 				return;
1606 			}
1607 		}
1608 		if (inserted == 0) {
1609 			/* Goes on the end */
1610 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1611 			    chk->rec.data.fsn);
1612 			asoc->size_on_reasm_queue += chk->send_size;
1613 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1614 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1615 		}
1616 	}
1617 	/*
1618 	 * Ok lets see if we can suck any up into the control structure that
1619 	 * are in seq if it makes sense.
1620 	 */
1621 	do_wakeup = 0;
1622 	/*
1623 	 * If the first fragment has not been seen there is no sense in
1624 	 * looking.
1625 	 */
1626 	if (control->first_frag_seen) {
1627 		next_fsn = control->fsn_included + 1;
1628 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1629 			if (at->rec.data.fsn == next_fsn) {
1630 				/* We can add this one now to the control */
1631 				SCTPDBG(SCTP_DEBUG_XXX,
1632 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1633 				    control, at,
1634 				    at->rec.data.fsn,
1635 				    next_fsn, control->fsn_included);
1636 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1637 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1638 				if (control->on_read_q) {
1639 					do_wakeup = 1;
1640 				} else {
1641 					/*
1642 					 * We only add to the
1643 					 * size-on-all-streams if its not on
1644 					 * the read q. The read q flag will
1645 					 * cause a sballoc so its accounted
1646 					 * for there.
1647 					 */
1648 					asoc->size_on_all_streams += lenadded;
1649 				}
1650 				next_fsn++;
1651 				if (control->end_added && control->pdapi_started) {
1652 					if (strm->pd_api_started) {
1653 						strm->pd_api_started = 0;
1654 						control->pdapi_started = 0;
1655 					}
1656 					if (control->on_read_q == 0) {
1657 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1658 						    control,
1659 						    &stcb->sctp_socket->so_rcv, control->end_added,
1660 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1661 					}
1662 					break;
1663 				}
1664 			} else {
1665 				break;
1666 			}
1667 		}
1668 	}
1669 	if (do_wakeup) {
1670 		/* Need to wakeup the reader */
1671 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1672 	}
1673 }
1674 
1675 static struct sctp_queued_to_read *
1676 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1677 {
1678 	struct sctp_queued_to_read *control;
1679 
1680 	if (ordered) {
1681 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1682 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1683 				break;
1684 			}
1685 		}
1686 	} else {
1687 		if (idata_supported) {
1688 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1689 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1690 					break;
1691 				}
1692 			}
1693 		} else {
1694 			control = TAILQ_FIRST(&strm->uno_inqueue);
1695 		}
1696 	}
1697 	return (control);
1698 }
1699 
1700 static int
1701 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1702     struct mbuf **m, int offset, int chk_length,
1703     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1704     int *break_flag, int last_chunk, uint8_t chk_type)
1705 {
1706 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1707 	uint32_t tsn, fsn, gap, mid;
1708 	struct mbuf *dmbuf;
1709 	int the_len;
1710 	int need_reasm_check = 0;
1711 	uint16_t sid;
1712 	struct mbuf *op_err;
1713 	char msg[SCTP_DIAG_INFO_LEN];
1714 	struct sctp_queued_to_read *control, *ncontrol;
1715 	uint32_t ppid;
1716 	uint8_t chk_flags;
1717 	struct sctp_stream_reset_list *liste;
1718 	int ordered;
1719 	size_t clen;
1720 	int created_control = 0;
1721 
1722 	if (chk_type == SCTP_IDATA) {
1723 		struct sctp_idata_chunk *chunk, chunk_buf;
1724 
1725 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1726 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1727 		chk_flags = chunk->ch.chunk_flags;
1728 		clen = sizeof(struct sctp_idata_chunk);
1729 		tsn = ntohl(chunk->dp.tsn);
1730 		sid = ntohs(chunk->dp.sid);
1731 		mid = ntohl(chunk->dp.mid);
1732 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1733 			fsn = 0;
1734 			ppid = chunk->dp.ppid_fsn.ppid;
1735 		} else {
1736 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1737 			ppid = 0xffffffff;	/* Use as an invalid value. */
1738 		}
1739 	} else {
1740 		struct sctp_data_chunk *chunk, chunk_buf;
1741 
1742 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1743 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1744 		chk_flags = chunk->ch.chunk_flags;
1745 		clen = sizeof(struct sctp_data_chunk);
1746 		tsn = ntohl(chunk->dp.tsn);
1747 		sid = ntohs(chunk->dp.sid);
1748 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1749 		fsn = tsn;
1750 		ppid = chunk->dp.ppid;
1751 	}
1752 	if ((size_t)chk_length == clen) {
1753 		/*
1754 		 * Need to send an abort since we had a empty data chunk.
1755 		 */
1756 		op_err = sctp_generate_no_user_data_cause(tsn);
1757 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1758 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1759 		*abort_flag = 1;
1760 		return (0);
1761 	}
1762 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1763 		asoc->send_sack = 1;
1764 	}
1765 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1766 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768 	}
1769 	if (stcb == NULL) {
1770 		return (0);
1771 	}
1772 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1773 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774 		/* It is a duplicate */
1775 		SCTP_STAT_INCR(sctps_recvdupdata);
1776 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 			/* Record a dup for the next outbound sack */
1778 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 			asoc->numduptsns++;
1780 		}
1781 		asoc->send_sack = 1;
1782 		return (0);
1783 	}
1784 	/* Calculate the number of TSN's between the base and this TSN */
1785 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787 		/* Can't hold the bit in the mapping at max array, toss it */
1788 		return (0);
1789 	}
1790 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1791 		SCTP_TCB_LOCK_ASSERT(stcb);
1792 		if (sctp_expand_mapping_array(asoc, gap)) {
1793 			/* Can't expand, drop it */
1794 			return (0);
1795 		}
1796 	}
1797 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 		*high_tsn = tsn;
1799 	}
1800 	/* See if we have received this one already */
1801 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803 		SCTP_STAT_INCR(sctps_recvdupdata);
1804 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805 			/* Record a dup for the next outbound sack */
1806 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 			asoc->numduptsns++;
1808 		}
1809 		asoc->send_sack = 1;
1810 		return (0);
1811 	}
1812 	/*
1813 	 * Check to see about the GONE flag, duplicates would cause a sack
1814 	 * to be sent up above
1815 	 */
1816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1818 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1819 		/*
1820 		 * wait a minute, this guy is gone, there is no longer a
1821 		 * receiver. Send peer an ABORT!
1822 		 */
1823 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1824 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1825 		*abort_flag = 1;
1826 		return (0);
1827 	}
1828 	/*
1829 	 * Now before going further we see if there is room. If NOT then we
1830 	 * MAY let one through only IF this TSN is the one we are waiting
1831 	 * for on a partial delivery API.
1832 	 */
1833 
1834 	/* Is the stream valid? */
1835 	if (sid >= asoc->streamincnt) {
1836 		struct sctp_error_invalid_stream *cause;
1837 
1838 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839 		    0, M_NOWAIT, 1, MT_DATA);
1840 		if (op_err != NULL) {
1841 			/* add some space up front so prepend will work well */
1842 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1844 			/*
1845 			 * Error causes are just param's and this one has
1846 			 * two back to back phdr, one with the error type
1847 			 * and size, the other with the streamid and a rsvd
1848 			 */
1849 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1852 			cause->stream_id = htons(sid);
1853 			cause->reserved = htons(0);
1854 			sctp_queue_op_err(stcb, op_err);
1855 		}
1856 		SCTP_STAT_INCR(sctps_badsid);
1857 		SCTP_TCB_LOCK_ASSERT(stcb);
1858 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860 			asoc->highest_tsn_inside_nr_map = tsn;
1861 		}
1862 		if (tsn == (asoc->cumulative_tsn + 1)) {
1863 			/* Update cum-ack */
1864 			asoc->cumulative_tsn = tsn;
1865 		}
1866 		return (0);
1867 	}
1868 	/*
1869 	 * If its a fragmented message, lets see if we can find the control
1870 	 * on the reassembly queues.
1871 	 */
1872 	if ((chk_type == SCTP_IDATA) &&
1873 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 	    (fsn == 0)) {
1875 		/*
1876 		 * The first *must* be fsn 0, and other (middle/end) pieces
1877 		 * can *not* be fsn 0. XXX: This can happen in case of a
1878 		 * wrap around. Ignore is for now.
1879 		 */
1880 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1881 		    mid, chk_flags);
1882 		goto err_out;
1883 	}
1884 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1885 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1886 	    chk_flags, control);
1887 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1888 		/* See if we can find the re-assembly entity */
1889 		if (control != NULL) {
1890 			/* We found something, does it belong? */
1891 			if (ordered && (mid != control->mid)) {
1892 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1893 		err_out:
1894 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1896 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 				*abort_flag = 1;
1898 				return (0);
1899 			}
1900 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1901 				/*
1902 				 * We can't have a switched order with an
1903 				 * unordered chunk
1904 				 */
1905 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1906 				    tsn);
1907 				goto err_out;
1908 			}
1909 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1910 				/*
1911 				 * We can't have a switched unordered with a
1912 				 * ordered chunk
1913 				 */
1914 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1915 				    tsn);
1916 				goto err_out;
1917 			}
1918 		}
1919 	} else {
1920 		/*
1921 		 * Its a complete segment. Lets validate we don't have a
1922 		 * re-assembly going on with the same Stream/Seq (for
1923 		 * ordered) or in the same Stream for unordered.
1924 		 */
1925 		if (control != NULL) {
1926 			if (ordered || asoc->idata_supported) {
1927 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1928 				    chk_flags, mid);
1929 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1930 				goto err_out;
1931 			} else {
1932 				if ((tsn == control->fsn_included + 1) &&
1933 				    (control->end_added == 0)) {
1934 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1935 					goto err_out;
1936 				} else {
1937 					control = NULL;
1938 				}
1939 			}
1940 		}
1941 	}
1942 	/* now do the tests */
1943 	if (((asoc->cnt_on_all_streams +
1944 	    asoc->cnt_on_reasm_queue +
1945 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1946 	    (((int)asoc->my_rwnd) <= 0)) {
1947 		/*
1948 		 * When we have NO room in the rwnd we check to make sure
1949 		 * the reader is doing its job...
1950 		 */
1951 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1952 			/* some to read, wake-up */
1953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1954 			struct socket *so;
1955 
1956 			so = SCTP_INP_SO(stcb->sctp_ep);
1957 			atomic_add_int(&stcb->asoc.refcnt, 1);
1958 			SCTP_TCB_UNLOCK(stcb);
1959 			SCTP_SOCKET_LOCK(so, 1);
1960 			SCTP_TCB_LOCK(stcb);
1961 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1962 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1963 				/* assoc was freed while we were unlocked */
1964 				SCTP_SOCKET_UNLOCK(so, 1);
1965 				return (0);
1966 			}
1967 #endif
1968 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1969 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1970 			SCTP_SOCKET_UNLOCK(so, 1);
1971 #endif
1972 		}
1973 		/* now is it in the mapping array of what we have accepted? */
1974 		if (chk_type == SCTP_DATA) {
1975 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1976 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1977 				/* Nope not in the valid range dump it */
1978 		dump_packet:
1979 				sctp_set_rwnd(stcb, asoc);
1980 				if ((asoc->cnt_on_all_streams +
1981 				    asoc->cnt_on_reasm_queue +
1982 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1983 					SCTP_STAT_INCR(sctps_datadropchklmt);
1984 				} else {
1985 					SCTP_STAT_INCR(sctps_datadroprwnd);
1986 				}
1987 				*break_flag = 1;
1988 				return (0);
1989 			}
1990 		} else {
1991 			if (control == NULL) {
1992 				goto dump_packet;
1993 			}
1994 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1995 				goto dump_packet;
1996 			}
1997 		}
1998 	}
1999 #ifdef SCTP_ASOCLOG_OF_TSNS
2000 	SCTP_TCB_LOCK_ASSERT(stcb);
2001 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2002 		asoc->tsn_in_at = 0;
2003 		asoc->tsn_in_wrapped = 1;
2004 	}
2005 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2006 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2007 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2008 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2009 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2010 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2011 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2012 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2013 	asoc->tsn_in_at++;
2014 #endif
2015 	/*
2016 	 * Before we continue lets validate that we are not being fooled by
2017 	 * an evil attacker. We can only have Nk chunks based on our TSN
2018 	 * spread allowed by the mapping array N * 8 bits, so there is no
2019 	 * way our stream sequence numbers could have wrapped. We of course
2020 	 * only validate the FIRST fragment so the bit must be set.
2021 	 */
2022 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2023 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2024 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2025 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2026 		/* The incoming sseq is behind where we last delivered? */
2027 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2028 		    mid, asoc->strmin[sid].last_mid_delivered);
2029 
2030 		if (asoc->idata_supported) {
2031 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2032 			    asoc->strmin[sid].last_mid_delivered,
2033 			    tsn,
2034 			    sid,
2035 			    mid);
2036 		} else {
2037 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2038 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2039 			    tsn,
2040 			    sid,
2041 			    (uint16_t)mid);
2042 		}
2043 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2044 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2045 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2046 		*abort_flag = 1;
2047 		return (0);
2048 	}
2049 	if (chk_type == SCTP_IDATA) {
2050 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2051 	} else {
2052 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2053 	}
2054 	if (last_chunk == 0) {
2055 		if (chk_type == SCTP_IDATA) {
2056 			dmbuf = SCTP_M_COPYM(*m,
2057 			    (offset + sizeof(struct sctp_idata_chunk)),
2058 			    the_len, M_NOWAIT);
2059 		} else {
2060 			dmbuf = SCTP_M_COPYM(*m,
2061 			    (offset + sizeof(struct sctp_data_chunk)),
2062 			    the_len, M_NOWAIT);
2063 		}
2064 #ifdef SCTP_MBUF_LOGGING
2065 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2066 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2067 		}
2068 #endif
2069 	} else {
2070 		/* We can steal the last chunk */
2071 		int l_len;
2072 
2073 		dmbuf = *m;
2074 		/* lop off the top part */
2075 		if (chk_type == SCTP_IDATA) {
2076 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2077 		} else {
2078 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2079 		}
2080 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2081 			l_len = SCTP_BUF_LEN(dmbuf);
2082 		} else {
2083 			/*
2084 			 * need to count up the size hopefully does not hit
2085 			 * this to often :-0
2086 			 */
2087 			struct mbuf *lat;
2088 
2089 			l_len = 0;
2090 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2091 				l_len += SCTP_BUF_LEN(lat);
2092 			}
2093 		}
2094 		if (l_len > the_len) {
2095 			/* Trim the end round bytes off  too */
2096 			m_adj(dmbuf, -(l_len - the_len));
2097 		}
2098 	}
2099 	if (dmbuf == NULL) {
2100 		SCTP_STAT_INCR(sctps_nomem);
2101 		return (0);
2102 	}
2103 	/*
2104 	 * Now no matter what, we need a control, get one if we don't have
2105 	 * one (we may have gotten it above when we found the message was
2106 	 * fragmented
2107 	 */
2108 	if (control == NULL) {
2109 		sctp_alloc_a_readq(stcb, control);
2110 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2111 		    ppid,
2112 		    sid,
2113 		    chk_flags,
2114 		    NULL, fsn, mid);
2115 		if (control == NULL) {
2116 			SCTP_STAT_INCR(sctps_nomem);
2117 			return (0);
2118 		}
2119 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2120 			struct mbuf *mm;
2121 
2122 			control->data = dmbuf;
2123 			control->tail_mbuf = NULL;
2124 			for (mm = control->data; mm; mm = mm->m_next) {
2125 				control->length += SCTP_BUF_LEN(mm);
2126 				if (SCTP_BUF_NEXT(mm) == NULL) {
2127 					control->tail_mbuf = mm;
2128 				}
2129 			}
2130 			control->end_added = 1;
2131 			control->last_frag_seen = 1;
2132 			control->first_frag_seen = 1;
2133 			control->fsn_included = fsn;
2134 			control->top_fsn = fsn;
2135 		}
2136 		created_control = 1;
2137 	}
2138 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2139 	    chk_flags, ordered, mid, control);
2140 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2141 	    TAILQ_EMPTY(&asoc->resetHead) &&
2142 	    ((ordered == 0) ||
2143 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2144 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2145 		/* Candidate for express delivery */
2146 		/*
2147 		 * Its not fragmented, No PD-API is up, Nothing in the
2148 		 * delivery queue, Its un-ordered OR ordered and the next to
2149 		 * deliver AND nothing else is stuck on the stream queue,
2150 		 * And there is room for it in the socket buffer. Lets just
2151 		 * stuff it up the buffer....
2152 		 */
2153 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2154 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2155 			asoc->highest_tsn_inside_nr_map = tsn;
2156 		}
2157 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2158 		    control, mid);
2159 
2160 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2161 		    control, &stcb->sctp_socket->so_rcv,
2162 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2163 
2164 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2165 			/* for ordered, bump what we delivered */
2166 			asoc->strmin[sid].last_mid_delivered++;
2167 		}
2168 		SCTP_STAT_INCR(sctps_recvexpress);
2169 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2170 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2171 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2172 		}
2173 		control = NULL;
2174 		goto finish_express_del;
2175 	}
2176 
2177 	/* Now will we need a chunk too? */
2178 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2179 		sctp_alloc_a_chunk(stcb, chk);
2180 		if (chk == NULL) {
2181 			/* No memory so we drop the chunk */
2182 			SCTP_STAT_INCR(sctps_nomem);
2183 			if (last_chunk == 0) {
2184 				/* we copied it, free the copy */
2185 				sctp_m_freem(dmbuf);
2186 			}
2187 			return (0);
2188 		}
2189 		chk->rec.data.tsn = tsn;
2190 		chk->no_fr_allowed = 0;
2191 		chk->rec.data.fsn = fsn;
2192 		chk->rec.data.mid = mid;
2193 		chk->rec.data.sid = sid;
2194 		chk->rec.data.ppid = ppid;
2195 		chk->rec.data.context = stcb->asoc.context;
2196 		chk->rec.data.doing_fast_retransmit = 0;
2197 		chk->rec.data.rcv_flags = chk_flags;
2198 		chk->asoc = asoc;
2199 		chk->send_size = the_len;
2200 		chk->whoTo = net;
2201 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2202 		    chk,
2203 		    control, mid);
2204 		atomic_add_int(&net->ref_count, 1);
2205 		chk->data = dmbuf;
2206 	}
2207 	/* Set the appropriate TSN mark */
2208 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2209 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2210 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2211 			asoc->highest_tsn_inside_nr_map = tsn;
2212 		}
2213 	} else {
2214 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2215 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2216 			asoc->highest_tsn_inside_map = tsn;
2217 		}
2218 	}
2219 	/* Now is it complete (i.e. not fragmented)? */
2220 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2221 		/*
2222 		 * Special check for when streams are resetting. We could be
2223 		 * more smart about this and check the actual stream to see
2224 		 * if it is not being reset.. that way we would not create a
2225 		 * HOLB when amongst streams being reset and those not being
2226 		 * reset.
2227 		 *
2228 		 */
2229 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2230 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2231 			/*
2232 			 * yep its past where we need to reset... go ahead
2233 			 * and queue it.
2234 			 */
2235 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2236 				/* first one on */
2237 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2238 			} else {
2239 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2240 				unsigned char inserted = 0;
2241 
2242 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2243 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2244 
2245 						continue;
2246 					} else {
2247 						/* found it */
2248 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2249 						inserted = 1;
2250 						break;
2251 					}
2252 				}
2253 				if (inserted == 0) {
2254 					/*
2255 					 * must be put at end, use prevP
2256 					 * (all setup from loop) to setup
2257 					 * nextP.
2258 					 */
2259 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2260 				}
2261 			}
2262 			goto finish_express_del;
2263 		}
2264 		if (chk_flags & SCTP_DATA_UNORDERED) {
2265 			/* queue directly into socket buffer */
2266 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2267 			    control, mid);
2268 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2269 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2270 			    control,
2271 			    &stcb->sctp_socket->so_rcv, 1,
2272 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2273 
2274 		} else {
2275 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2276 			    mid);
2277 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2278 			if (*abort_flag) {
2279 				if (last_chunk) {
2280 					*m = NULL;
2281 				}
2282 				return (0);
2283 			}
2284 		}
2285 		goto finish_express_del;
2286 	}
2287 	/* If we reach here its a reassembly */
2288 	need_reasm_check = 1;
2289 	SCTPDBG(SCTP_DEBUG_XXX,
2290 	    "Queue data to stream for reasm control: %p MID: %u\n",
2291 	    control, mid);
2292 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2293 	if (*abort_flag) {
2294 		/*
2295 		 * the assoc is now gone and chk was put onto the reasm
2296 		 * queue, which has all been freed.
2297 		 */
2298 		if (last_chunk) {
2299 			*m = NULL;
2300 		}
2301 		return (0);
2302 	}
2303 finish_express_del:
2304 	/* Here we tidy up things */
2305 	if (tsn == (asoc->cumulative_tsn + 1)) {
2306 		/* Update cum-ack */
2307 		asoc->cumulative_tsn = tsn;
2308 	}
2309 	if (last_chunk) {
2310 		*m = NULL;
2311 	}
2312 	if (ordered) {
2313 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2314 	} else {
2315 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2316 	}
2317 	SCTP_STAT_INCR(sctps_recvdata);
2318 	/* Set it present please */
2319 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2320 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2321 	}
2322 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2323 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2324 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2325 	}
2326 	if (need_reasm_check) {
2327 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2328 		need_reasm_check = 0;
2329 	}
2330 	/* check the special flag for stream resets */
2331 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2332 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2333 		/*
2334 		 * we have finished working through the backlogged TSN's now
2335 		 * time to reset streams. 1: call reset function. 2: free
2336 		 * pending_reply space 3: distribute any chunks in
2337 		 * pending_reply_queue.
2338 		 */
2339 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2340 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2341 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2342 		SCTP_FREE(liste, SCTP_M_STRESET);
2343 		/* sa_ignore FREED_MEMORY */
2344 		liste = TAILQ_FIRST(&asoc->resetHead);
2345 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2346 			/* All can be removed */
2347 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2348 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2349 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2350 				if (*abort_flag) {
2351 					return (0);
2352 				}
2353 				if (need_reasm_check) {
2354 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2355 					need_reasm_check = 0;
2356 				}
2357 			}
2358 		} else {
2359 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2360 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2361 					break;
2362 				}
2363 				/*
2364 				 * if control->sinfo_tsn is <= liste->tsn we
2365 				 * can process it which is the NOT of
2366 				 * control->sinfo_tsn > liste->tsn
2367 				 */
2368 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2369 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2370 				if (*abort_flag) {
2371 					return (0);
2372 				}
2373 				if (need_reasm_check) {
2374 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2375 					need_reasm_check = 0;
2376 				}
2377 			}
2378 		}
2379 	}
2380 	return (1);
2381 }
2382 
2383 static const int8_t sctp_map_lookup_tab[256] = {
2384 	0, 1, 0, 2, 0, 1, 0, 3,
2385 	0, 1, 0, 2, 0, 1, 0, 4,
2386 	0, 1, 0, 2, 0, 1, 0, 3,
2387 	0, 1, 0, 2, 0, 1, 0, 5,
2388 	0, 1, 0, 2, 0, 1, 0, 3,
2389 	0, 1, 0, 2, 0, 1, 0, 4,
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 6,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 4,
2394 	0, 1, 0, 2, 0, 1, 0, 3,
2395 	0, 1, 0, 2, 0, 1, 0, 5,
2396 	0, 1, 0, 2, 0, 1, 0, 3,
2397 	0, 1, 0, 2, 0, 1, 0, 4,
2398 	0, 1, 0, 2, 0, 1, 0, 3,
2399 	0, 1, 0, 2, 0, 1, 0, 7,
2400 	0, 1, 0, 2, 0, 1, 0, 3,
2401 	0, 1, 0, 2, 0, 1, 0, 4,
2402 	0, 1, 0, 2, 0, 1, 0, 3,
2403 	0, 1, 0, 2, 0, 1, 0, 5,
2404 	0, 1, 0, 2, 0, 1, 0, 3,
2405 	0, 1, 0, 2, 0, 1, 0, 4,
2406 	0, 1, 0, 2, 0, 1, 0, 3,
2407 	0, 1, 0, 2, 0, 1, 0, 6,
2408 	0, 1, 0, 2, 0, 1, 0, 3,
2409 	0, 1, 0, 2, 0, 1, 0, 4,
2410 	0, 1, 0, 2, 0, 1, 0, 3,
2411 	0, 1, 0, 2, 0, 1, 0, 5,
2412 	0, 1, 0, 2, 0, 1, 0, 3,
2413 	0, 1, 0, 2, 0, 1, 0, 4,
2414 	0, 1, 0, 2, 0, 1, 0, 3,
2415 	0, 1, 0, 2, 0, 1, 0, 8
2416 };
2417 
2418 
2419 void
2420 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2421 {
2422 	/*
2423 	 * Now we also need to check the mapping array in a couple of ways.
2424 	 * 1) Did we move the cum-ack point?
2425 	 *
2426 	 * When you first glance at this you might think that all entries
2427 	 * that make up the position of the cum-ack would be in the
2428 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2429 	 * deliverable. Thats true with one exception, when its a fragmented
2430 	 * message we may not deliver the data until some threshold (or all
2431 	 * of it) is in place. So we must OR the nr_mapping_array and
2432 	 * mapping_array to get a true picture of the cum-ack.
2433 	 */
2434 	struct sctp_association *asoc;
2435 	int at;
2436 	uint8_t val;
2437 	int slide_from, slide_end, lgap, distance;
2438 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2439 
2440 	asoc = &stcb->asoc;
2441 
2442 	old_cumack = asoc->cumulative_tsn;
2443 	old_base = asoc->mapping_array_base_tsn;
2444 	old_highest = asoc->highest_tsn_inside_map;
2445 	/*
2446 	 * We could probably improve this a small bit by calculating the
2447 	 * offset of the current cum-ack as the starting point.
2448 	 */
2449 	at = 0;
2450 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2451 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2452 		if (val == 0xff) {
2453 			at += 8;
2454 		} else {
2455 			/* there is a 0 bit */
2456 			at += sctp_map_lookup_tab[val];
2457 			break;
2458 		}
2459 	}
2460 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2461 
2462 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2463 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2464 #ifdef INVARIANTS
2465 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2466 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2467 #else
2468 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2469 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2470 		sctp_print_mapping_array(asoc);
2471 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2472 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2473 		}
2474 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2475 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2476 #endif
2477 	}
2478 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2479 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2480 	} else {
2481 		highest_tsn = asoc->highest_tsn_inside_map;
2482 	}
2483 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2484 		/* The complete array was completed by a single FR */
2485 		/* highest becomes the cum-ack */
2486 		int clr;
2487 #ifdef INVARIANTS
2488 		unsigned int i;
2489 #endif
2490 
2491 		/* clear the array */
2492 		clr = ((at + 7) >> 3);
2493 		if (clr > asoc->mapping_array_size) {
2494 			clr = asoc->mapping_array_size;
2495 		}
2496 		memset(asoc->mapping_array, 0, clr);
2497 		memset(asoc->nr_mapping_array, 0, clr);
2498 #ifdef INVARIANTS
2499 		for (i = 0; i < asoc->mapping_array_size; i++) {
2500 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2501 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2502 				sctp_print_mapping_array(asoc);
2503 			}
2504 		}
2505 #endif
2506 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2507 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2508 	} else if (at >= 8) {
2509 		/* we can slide the mapping array down */
2510 		/* slide_from holds where we hit the first NON 0xff byte */
2511 
2512 		/*
2513 		 * now calculate the ceiling of the move using our highest
2514 		 * TSN value
2515 		 */
2516 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2517 		slide_end = (lgap >> 3);
2518 		if (slide_end < slide_from) {
2519 			sctp_print_mapping_array(asoc);
2520 #ifdef INVARIANTS
2521 			panic("impossible slide");
2522 #else
2523 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2524 			    lgap, slide_end, slide_from, at);
2525 			return;
2526 #endif
2527 		}
2528 		if (slide_end > asoc->mapping_array_size) {
2529 #ifdef INVARIANTS
2530 			panic("would overrun buffer");
2531 #else
2532 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2533 			    asoc->mapping_array_size, slide_end);
2534 			slide_end = asoc->mapping_array_size;
2535 #endif
2536 		}
2537 		distance = (slide_end - slide_from) + 1;
2538 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2539 			sctp_log_map(old_base, old_cumack, old_highest,
2540 			    SCTP_MAP_PREPARE_SLIDE);
2541 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2542 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2543 		}
2544 		if (distance + slide_from > asoc->mapping_array_size ||
2545 		    distance < 0) {
2546 			/*
2547 			 * Here we do NOT slide forward the array so that
2548 			 * hopefully when more data comes in to fill it up
2549 			 * we will be able to slide it forward. Really I
2550 			 * don't think this should happen :-0
2551 			 */
2552 
2553 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2554 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2555 				    (uint32_t)asoc->mapping_array_size,
2556 				    SCTP_MAP_SLIDE_NONE);
2557 			}
2558 		} else {
2559 			int ii;
2560 
2561 			for (ii = 0; ii < distance; ii++) {
2562 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2563 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2564 
2565 			}
2566 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2567 				asoc->mapping_array[ii] = 0;
2568 				asoc->nr_mapping_array[ii] = 0;
2569 			}
2570 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2571 				asoc->highest_tsn_inside_map += (slide_from << 3);
2572 			}
2573 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2574 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2575 			}
2576 			asoc->mapping_array_base_tsn += (slide_from << 3);
2577 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2578 				sctp_log_map(asoc->mapping_array_base_tsn,
2579 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2580 				    SCTP_MAP_SLIDE_RESULT);
2581 			}
2582 		}
2583 	}
2584 }
2585 
2586 void
2587 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2588 {
2589 	struct sctp_association *asoc;
2590 	uint32_t highest_tsn;
2591 	int is_a_gap;
2592 
2593 	sctp_slide_mapping_arrays(stcb);
2594 	asoc = &stcb->asoc;
2595 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2596 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2597 	} else {
2598 		highest_tsn = asoc->highest_tsn_inside_map;
2599 	}
2600 	/* Is there a gap now? */
2601 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2602 
2603 	/*
2604 	 * Now we need to see if we need to queue a sack or just start the
2605 	 * timer (if allowed).
2606 	 */
2607 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2608 		/*
2609 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2610 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2611 		 * SACK
2612 		 */
2613 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2614 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2615 			    stcb->sctp_ep, stcb, NULL,
2616 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2617 		}
2618 		sctp_send_shutdown(stcb,
2619 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2620 		if (is_a_gap) {
2621 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2622 		}
2623 	} else {
2624 		/*
2625 		 * CMT DAC algorithm: increase number of packets received
2626 		 * since last ack
2627 		 */
2628 		stcb->asoc.cmt_dac_pkts_rcvd++;
2629 
2630 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2631 							 * SACK */
2632 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2633 							 * longer is one */
2634 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2635 		    (is_a_gap) ||	/* is still a gap */
2636 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2637 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2638 		    ) {
2639 
2640 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2641 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2642 			    (stcb->asoc.send_sack == 0) &&
2643 			    (stcb->asoc.numduptsns == 0) &&
2644 			    (stcb->asoc.delayed_ack) &&
2645 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2646 
2647 				/*
2648 				 * CMT DAC algorithm: With CMT, delay acks
2649 				 * even in the face of
2650 				 *
2651 				 * reordering. Therefore, if acks that do
2652 				 * not have to be sent because of the above
2653 				 * reasons, will be delayed. That is, acks
2654 				 * that would have been sent due to gap
2655 				 * reports will be delayed with DAC. Start
2656 				 * the delayed ack timer.
2657 				 */
2658 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2659 				    stcb->sctp_ep, stcb, NULL);
2660 			} else {
2661 				/*
2662 				 * Ok we must build a SACK since the timer
2663 				 * is pending, we got our first packet OR
2664 				 * there are gaps or duplicates.
2665 				 */
2666 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2667 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2668 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2669 			}
2670 		} else {
2671 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2672 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2673 				    stcb->sctp_ep, stcb, NULL);
2674 			}
2675 		}
2676 	}
2677 }
2678 
2679 int
2680 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2681     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2682     struct sctp_nets *net, uint32_t *high_tsn)
2683 {
2684 	struct sctp_chunkhdr *ch, chunk_buf;
2685 	struct sctp_association *asoc;
2686 	int num_chunks = 0;	/* number of control chunks processed */
2687 	int stop_proc = 0;
2688 	int break_flag, last_chunk;
2689 	int abort_flag = 0, was_a_gap;
2690 	struct mbuf *m;
2691 	uint32_t highest_tsn;
2692 	uint16_t chk_length;
2693 
2694 	/* set the rwnd */
2695 	sctp_set_rwnd(stcb, &stcb->asoc);
2696 
2697 	m = *mm;
2698 	SCTP_TCB_LOCK_ASSERT(stcb);
2699 	asoc = &stcb->asoc;
2700 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2701 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2702 	} else {
2703 		highest_tsn = asoc->highest_tsn_inside_map;
2704 	}
2705 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2706 	/*
2707 	 * setup where we got the last DATA packet from for any SACK that
2708 	 * may need to go out. Don't bump the net. This is done ONLY when a
2709 	 * chunk is assigned.
2710 	 */
2711 	asoc->last_data_chunk_from = net;
2712 
2713 	/*-
2714 	 * Now before we proceed we must figure out if this is a wasted
2715 	 * cluster... i.e. it is a small packet sent in and yet the driver
2716 	 * underneath allocated a full cluster for it. If so we must copy it
2717 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2718 	 * with cluster starvation. Note for __Panda__ we don't do this
2719 	 * since it has clusters all the way down to 64 bytes.
2720 	 */
2721 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2722 		/* we only handle mbufs that are singletons.. not chains */
2723 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2724 		if (m) {
2725 			/* ok lets see if we can copy the data up */
2726 			caddr_t *from, *to;
2727 
2728 			/* get the pointers and copy */
2729 			to = mtod(m, caddr_t *);
2730 			from = mtod((*mm), caddr_t *);
2731 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2732 			/* copy the length and free up the old */
2733 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2734 			sctp_m_freem(*mm);
2735 			/* success, back copy */
2736 			*mm = m;
2737 		} else {
2738 			/* We are in trouble in the mbuf world .. yikes */
2739 			m = *mm;
2740 		}
2741 	}
2742 	/* get pointer to the first chunk header */
2743 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2744 	    sizeof(struct sctp_chunkhdr),
2745 	    (uint8_t *)&chunk_buf);
2746 	if (ch == NULL) {
2747 		return (1);
2748 	}
2749 	/*
2750 	 * process all DATA chunks...
2751 	 */
2752 	*high_tsn = asoc->cumulative_tsn;
2753 	break_flag = 0;
2754 	asoc->data_pkts_seen++;
2755 	while (stop_proc == 0) {
2756 		/* validate chunk length */
2757 		chk_length = ntohs(ch->chunk_length);
2758 		if (length - *offset < chk_length) {
2759 			/* all done, mutulated chunk */
2760 			stop_proc = 1;
2761 			continue;
2762 		}
2763 		if ((asoc->idata_supported == 1) &&
2764 		    (ch->chunk_type == SCTP_DATA)) {
2765 			struct mbuf *op_err;
2766 			char msg[SCTP_DIAG_INFO_LEN];
2767 
2768 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2769 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2770 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2771 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2772 			return (2);
2773 		}
2774 		if ((asoc->idata_supported == 0) &&
2775 		    (ch->chunk_type == SCTP_IDATA)) {
2776 			struct mbuf *op_err;
2777 			char msg[SCTP_DIAG_INFO_LEN];
2778 
2779 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2780 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2781 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2782 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2783 			return (2);
2784 		}
2785 		if ((ch->chunk_type == SCTP_DATA) ||
2786 		    (ch->chunk_type == SCTP_IDATA)) {
2787 			uint16_t clen;
2788 
2789 			if (ch->chunk_type == SCTP_DATA) {
2790 				clen = sizeof(struct sctp_data_chunk);
2791 			} else {
2792 				clen = sizeof(struct sctp_idata_chunk);
2793 			}
2794 			if (chk_length < clen) {
2795 				/*
2796 				 * Need to send an abort since we had a
2797 				 * invalid data chunk.
2798 				 */
2799 				struct mbuf *op_err;
2800 				char msg[SCTP_DIAG_INFO_LEN];
2801 
2802 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2803 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2804 				    chk_length);
2805 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2806 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2807 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2808 				return (2);
2809 			}
2810 #ifdef SCTP_AUDITING_ENABLED
2811 			sctp_audit_log(0xB1, 0);
2812 #endif
2813 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2814 				last_chunk = 1;
2815 			} else {
2816 				last_chunk = 0;
2817 			}
2818 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2819 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2820 			    last_chunk, ch->chunk_type)) {
2821 				num_chunks++;
2822 			}
2823 			if (abort_flag)
2824 				return (2);
2825 
2826 			if (break_flag) {
2827 				/*
2828 				 * Set because of out of rwnd space and no
2829 				 * drop rep space left.
2830 				 */
2831 				stop_proc = 1;
2832 				continue;
2833 			}
2834 		} else {
2835 			/* not a data chunk in the data region */
2836 			switch (ch->chunk_type) {
2837 			case SCTP_INITIATION:
2838 			case SCTP_INITIATION_ACK:
2839 			case SCTP_SELECTIVE_ACK:
2840 			case SCTP_NR_SELECTIVE_ACK:
2841 			case SCTP_HEARTBEAT_REQUEST:
2842 			case SCTP_HEARTBEAT_ACK:
2843 			case SCTP_ABORT_ASSOCIATION:
2844 			case SCTP_SHUTDOWN:
2845 			case SCTP_SHUTDOWN_ACK:
2846 			case SCTP_OPERATION_ERROR:
2847 			case SCTP_COOKIE_ECHO:
2848 			case SCTP_COOKIE_ACK:
2849 			case SCTP_ECN_ECHO:
2850 			case SCTP_ECN_CWR:
2851 			case SCTP_SHUTDOWN_COMPLETE:
2852 			case SCTP_AUTHENTICATION:
2853 			case SCTP_ASCONF_ACK:
2854 			case SCTP_PACKET_DROPPED:
2855 			case SCTP_STREAM_RESET:
2856 			case SCTP_FORWARD_CUM_TSN:
2857 			case SCTP_ASCONF:
2858 				{
2859 					/*
2860 					 * Now, what do we do with KNOWN
2861 					 * chunks that are NOT in the right
2862 					 * place?
2863 					 *
2864 					 * For now, I do nothing but ignore
2865 					 * them. We may later want to add
2866 					 * sysctl stuff to switch out and do
2867 					 * either an ABORT() or possibly
2868 					 * process them.
2869 					 */
2870 					struct mbuf *op_err;
2871 					char msg[SCTP_DIAG_INFO_LEN];
2872 
2873 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2874 					    ch->chunk_type);
2875 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2876 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2877 					return (2);
2878 				}
2879 			default:
2880 				/*
2881 				 * Unknown chunk type: use bit rules after
2882 				 * checking length
2883 				 */
2884 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2885 					/*
2886 					 * Need to send an abort since we
2887 					 * had a invalid chunk.
2888 					 */
2889 					struct mbuf *op_err;
2890 					char msg[SCTP_DIAG_INFO_LEN];
2891 
2892 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2893 					    chk_length);
2894 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2895 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2896 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2897 					return (2);
2898 				}
2899 				if (ch->chunk_type & 0x40) {
2900 					/* Add a error report to the queue */
2901 					struct mbuf *op_err;
2902 					struct sctp_gen_error_cause *cause;
2903 
2904 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2905 					    0, M_NOWAIT, 1, MT_DATA);
2906 					if (op_err != NULL) {
2907 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2908 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2909 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2910 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2911 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2912 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2913 							sctp_queue_op_err(stcb, op_err);
2914 						} else {
2915 							sctp_m_freem(op_err);
2916 						}
2917 					}
2918 				}
2919 				if ((ch->chunk_type & 0x80) == 0) {
2920 					/* discard the rest of this packet */
2921 					stop_proc = 1;
2922 				}	/* else skip this bad chunk and
2923 					 * continue... */
2924 				break;
2925 			}	/* switch of chunk type */
2926 		}
2927 		*offset += SCTP_SIZE32(chk_length);
2928 		if ((*offset >= length) || stop_proc) {
2929 			/* no more data left in the mbuf chain */
2930 			stop_proc = 1;
2931 			continue;
2932 		}
2933 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2934 		    sizeof(struct sctp_chunkhdr),
2935 		    (uint8_t *)&chunk_buf);
2936 		if (ch == NULL) {
2937 			*offset = length;
2938 			stop_proc = 1;
2939 			continue;
2940 		}
2941 	}
2942 	if (break_flag) {
2943 		/*
2944 		 * we need to report rwnd overrun drops.
2945 		 */
2946 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2947 	}
2948 	if (num_chunks) {
2949 		/*
2950 		 * Did we get data, if so update the time for auto-close and
2951 		 * give peer credit for being alive.
2952 		 */
2953 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2954 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2955 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2956 			    stcb->asoc.overall_error_count,
2957 			    0,
2958 			    SCTP_FROM_SCTP_INDATA,
2959 			    __LINE__);
2960 		}
2961 		stcb->asoc.overall_error_count = 0;
2962 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2963 	}
2964 	/* now service all of the reassm queue if needed */
2965 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2966 		/* Assure that we ack right away */
2967 		stcb->asoc.send_sack = 1;
2968 	}
2969 	/* Start a sack timer or QUEUE a SACK for sending */
2970 	sctp_sack_check(stcb, was_a_gap);
2971 	return (0);
2972 }
2973 
2974 static int
2975 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2976     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2977     int *num_frs,
2978     uint32_t *biggest_newly_acked_tsn,
2979     uint32_t *this_sack_lowest_newack,
2980     int *rto_ok)
2981 {
2982 	struct sctp_tmit_chunk *tp1;
2983 	unsigned int theTSN;
2984 	int j, wake_him = 0, circled = 0;
2985 
2986 	/* Recover the tp1 we last saw */
2987 	tp1 = *p_tp1;
2988 	if (tp1 == NULL) {
2989 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2990 	}
2991 	for (j = frag_strt; j <= frag_end; j++) {
2992 		theTSN = j + last_tsn;
2993 		while (tp1) {
2994 			if (tp1->rec.data.doing_fast_retransmit)
2995 				(*num_frs) += 1;
2996 
2997 			/*-
2998 			 * CMT: CUCv2 algorithm. For each TSN being
2999 			 * processed from the sent queue, track the
3000 			 * next expected pseudo-cumack, or
3001 			 * rtx_pseudo_cumack, if required. Separate
3002 			 * cumack trackers for first transmissions,
3003 			 * and retransmissions.
3004 			 */
3005 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3006 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
3007 			    (tp1->snd_count == 1)) {
3008 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3009 				tp1->whoTo->find_pseudo_cumack = 0;
3010 			}
3011 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3012 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3013 			    (tp1->snd_count > 1)) {
3014 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3015 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3016 			}
3017 			if (tp1->rec.data.tsn == theTSN) {
3018 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3019 					/*-
3020 					 * must be held until
3021 					 * cum-ack passes
3022 					 */
3023 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3024 						/*-
3025 						 * If it is less than RESEND, it is
3026 						 * now no-longer in flight.
3027 						 * Higher values may already be set
3028 						 * via previous Gap Ack Blocks...
3029 						 * i.e. ACKED or RESEND.
3030 						 */
3031 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3032 						    *biggest_newly_acked_tsn)) {
3033 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3034 						}
3035 						/*-
3036 						 * CMT: SFR algo (and HTNA) - set
3037 						 * saw_newack to 1 for dest being
3038 						 * newly acked. update
3039 						 * this_sack_highest_newack if
3040 						 * appropriate.
3041 						 */
3042 						if (tp1->rec.data.chunk_was_revoked == 0)
3043 							tp1->whoTo->saw_newack = 1;
3044 
3045 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3046 						    tp1->whoTo->this_sack_highest_newack)) {
3047 							tp1->whoTo->this_sack_highest_newack =
3048 							    tp1->rec.data.tsn;
3049 						}
3050 						/*-
3051 						 * CMT DAC algo: also update
3052 						 * this_sack_lowest_newack
3053 						 */
3054 						if (*this_sack_lowest_newack == 0) {
3055 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3056 								sctp_log_sack(*this_sack_lowest_newack,
3057 								    last_tsn,
3058 								    tp1->rec.data.tsn,
3059 								    0,
3060 								    0,
3061 								    SCTP_LOG_TSN_ACKED);
3062 							}
3063 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3064 						}
3065 						/*-
3066 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3067 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3068 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3069 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3070 						 * Separate pseudo_cumack trackers for first transmissions and
3071 						 * retransmissions.
3072 						 */
3073 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3074 							if (tp1->rec.data.chunk_was_revoked == 0) {
3075 								tp1->whoTo->new_pseudo_cumack = 1;
3076 							}
3077 							tp1->whoTo->find_pseudo_cumack = 1;
3078 						}
3079 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3080 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3081 						}
3082 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3083 							if (tp1->rec.data.chunk_was_revoked == 0) {
3084 								tp1->whoTo->new_pseudo_cumack = 1;
3085 							}
3086 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3087 						}
3088 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3089 							sctp_log_sack(*biggest_newly_acked_tsn,
3090 							    last_tsn,
3091 							    tp1->rec.data.tsn,
3092 							    frag_strt,
3093 							    frag_end,
3094 							    SCTP_LOG_TSN_ACKED);
3095 						}
3096 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3097 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3098 							    tp1->whoTo->flight_size,
3099 							    tp1->book_size,
3100 							    (uint32_t)(uintptr_t)tp1->whoTo,
3101 							    tp1->rec.data.tsn);
3102 						}
3103 						sctp_flight_size_decrease(tp1);
3104 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3105 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3106 							    tp1);
3107 						}
3108 						sctp_total_flight_decrease(stcb, tp1);
3109 
3110 						tp1->whoTo->net_ack += tp1->send_size;
3111 						if (tp1->snd_count < 2) {
3112 							/*-
3113 							 * True non-retransmitted chunk
3114 							 */
3115 							tp1->whoTo->net_ack2 += tp1->send_size;
3116 
3117 							/*-
3118 							 * update RTO too ?
3119 							 */
3120 							if (tp1->do_rtt) {
3121 								if (*rto_ok &&
3122 								    sctp_calculate_rto(stcb,
3123 								    &stcb->asoc,
3124 								    tp1->whoTo,
3125 								    &tp1->sent_rcv_time,
3126 								    SCTP_RTT_FROM_DATA)) {
3127 									*rto_ok = 0;
3128 								}
3129 								if (tp1->whoTo->rto_needed == 0) {
3130 									tp1->whoTo->rto_needed = 1;
3131 								}
3132 								tp1->do_rtt = 0;
3133 							}
3134 						}
3135 
3136 					}
3137 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3138 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3139 						    stcb->asoc.this_sack_highest_gap)) {
3140 							stcb->asoc.this_sack_highest_gap =
3141 							    tp1->rec.data.tsn;
3142 						}
3143 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3144 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3145 #ifdef SCTP_AUDITING_ENABLED
3146 							sctp_audit_log(0xB2,
3147 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3148 #endif
3149 						}
3150 					}
3151 					/*-
3152 					 * All chunks NOT UNSENT fall through here and are marked
3153 					 * (leave PR-SCTP ones that are to skip alone though)
3154 					 */
3155 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3156 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3157 						tp1->sent = SCTP_DATAGRAM_MARKED;
3158 					}
3159 					if (tp1->rec.data.chunk_was_revoked) {
3160 						/* deflate the cwnd */
3161 						tp1->whoTo->cwnd -= tp1->book_size;
3162 						tp1->rec.data.chunk_was_revoked = 0;
3163 					}
3164 					/* NR Sack code here */
3165 					if (nr_sacking &&
3166 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3167 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3168 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3169 #ifdef INVARIANTS
3170 						} else {
3171 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3172 #endif
3173 						}
3174 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3175 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3176 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3177 							stcb->asoc.trigger_reset = 1;
3178 						}
3179 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3180 						if (tp1->data) {
3181 							/*
3182 							 * sa_ignore
3183 							 * NO_NULL_CHK
3184 							 */
3185 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3186 							sctp_m_freem(tp1->data);
3187 							tp1->data = NULL;
3188 						}
3189 						wake_him++;
3190 					}
3191 				}
3192 				break;
3193 			}	/* if (tp1->tsn == theTSN) */
3194 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3195 				break;
3196 			}
3197 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3198 			if ((tp1 == NULL) && (circled == 0)) {
3199 				circled++;
3200 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3201 			}
3202 		}		/* end while (tp1) */
3203 		if (tp1 == NULL) {
3204 			circled = 0;
3205 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3206 		}
3207 		/* In case the fragments were not in order we must reset */
3208 	}			/* end for (j = fragStart */
3209 	*p_tp1 = tp1;
3210 	return (wake_him);	/* Return value only used for nr-sack */
3211 }
3212 
3213 
3214 static int
3215 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3216     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3217     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3218     int num_seg, int num_nr_seg, int *rto_ok)
3219 {
3220 	struct sctp_gap_ack_block *frag, block;
3221 	struct sctp_tmit_chunk *tp1;
3222 	int i;
3223 	int num_frs = 0;
3224 	int chunk_freed;
3225 	int non_revocable;
3226 	uint16_t frag_strt, frag_end, prev_frag_end;
3227 
3228 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3229 	prev_frag_end = 0;
3230 	chunk_freed = 0;
3231 
3232 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3233 		if (i == num_seg) {
3234 			prev_frag_end = 0;
3235 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3236 		}
3237 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3238 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3239 		*offset += sizeof(block);
3240 		if (frag == NULL) {
3241 			return (chunk_freed);
3242 		}
3243 		frag_strt = ntohs(frag->start);
3244 		frag_end = ntohs(frag->end);
3245 
3246 		if (frag_strt > frag_end) {
3247 			/* This gap report is malformed, skip it. */
3248 			continue;
3249 		}
3250 		if (frag_strt <= prev_frag_end) {
3251 			/* This gap report is not in order, so restart. */
3252 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3253 		}
3254 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3255 			*biggest_tsn_acked = last_tsn + frag_end;
3256 		}
3257 		if (i < num_seg) {
3258 			non_revocable = 0;
3259 		} else {
3260 			non_revocable = 1;
3261 		}
3262 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3263 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3264 		    this_sack_lowest_newack, rto_ok)) {
3265 			chunk_freed = 1;
3266 		}
3267 		prev_frag_end = frag_end;
3268 	}
3269 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3270 		if (num_frs)
3271 			sctp_log_fr(*biggest_tsn_acked,
3272 			    *biggest_newly_acked_tsn,
3273 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3274 	}
3275 	return (chunk_freed);
3276 }
3277 
3278 static void
3279 sctp_check_for_revoked(struct sctp_tcb *stcb,
3280     struct sctp_association *asoc, uint32_t cumack,
3281     uint32_t biggest_tsn_acked)
3282 {
3283 	struct sctp_tmit_chunk *tp1;
3284 
3285 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3286 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3287 			/*
3288 			 * ok this guy is either ACK or MARKED. If it is
3289 			 * ACKED it has been previously acked but not this
3290 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3291 			 * again.
3292 			 */
3293 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3294 				break;
3295 			}
3296 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3297 				/* it has been revoked */
3298 				tp1->sent = SCTP_DATAGRAM_SENT;
3299 				tp1->rec.data.chunk_was_revoked = 1;
3300 				/*
3301 				 * We must add this stuff back in to assure
3302 				 * timers and such get started.
3303 				 */
3304 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3305 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3306 					    tp1->whoTo->flight_size,
3307 					    tp1->book_size,
3308 					    (uint32_t)(uintptr_t)tp1->whoTo,
3309 					    tp1->rec.data.tsn);
3310 				}
3311 				sctp_flight_size_increase(tp1);
3312 				sctp_total_flight_increase(stcb, tp1);
3313 				/*
3314 				 * We inflate the cwnd to compensate for our
3315 				 * artificial inflation of the flight_size.
3316 				 */
3317 				tp1->whoTo->cwnd += tp1->book_size;
3318 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3319 					sctp_log_sack(asoc->last_acked_seq,
3320 					    cumack,
3321 					    tp1->rec.data.tsn,
3322 					    0,
3323 					    0,
3324 					    SCTP_LOG_TSN_REVOKED);
3325 				}
3326 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3327 				/* it has been re-acked in this SACK */
3328 				tp1->sent = SCTP_DATAGRAM_ACKED;
3329 			}
3330 		}
3331 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3332 			break;
3333 	}
3334 }
3335 
3336 
3337 static void
3338 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3339     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3340 {
3341 	struct sctp_tmit_chunk *tp1;
3342 	int strike_flag = 0;
3343 	struct timeval now;
3344 	int tot_retrans = 0;
3345 	uint32_t sending_seq;
3346 	struct sctp_nets *net;
3347 	int num_dests_sacked = 0;
3348 
3349 	/*
3350 	 * select the sending_seq, this is either the next thing ready to be
3351 	 * sent but not transmitted, OR, the next seq we assign.
3352 	 */
3353 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3354 	if (tp1 == NULL) {
3355 		sending_seq = asoc->sending_seq;
3356 	} else {
3357 		sending_seq = tp1->rec.data.tsn;
3358 	}
3359 
3360 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3361 	if ((asoc->sctp_cmt_on_off > 0) &&
3362 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3363 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3364 			if (net->saw_newack)
3365 				num_dests_sacked++;
3366 		}
3367 	}
3368 	if (stcb->asoc.prsctp_supported) {
3369 		(void)SCTP_GETTIME_TIMEVAL(&now);
3370 	}
3371 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3372 		strike_flag = 0;
3373 		if (tp1->no_fr_allowed) {
3374 			/* this one had a timeout or something */
3375 			continue;
3376 		}
3377 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3378 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3379 				sctp_log_fr(biggest_tsn_newly_acked,
3380 				    tp1->rec.data.tsn,
3381 				    tp1->sent,
3382 				    SCTP_FR_LOG_CHECK_STRIKE);
3383 		}
3384 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3385 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3386 			/* done */
3387 			break;
3388 		}
3389 		if (stcb->asoc.prsctp_supported) {
3390 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3391 				/* Is it expired? */
3392 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3393 					/* Yes so drop it */
3394 					if (tp1->data != NULL) {
3395 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3396 						    SCTP_SO_NOT_LOCKED);
3397 					}
3398 					continue;
3399 				}
3400 			}
3401 
3402 		}
3403 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3404 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3405 			/* we are beyond the tsn in the sack  */
3406 			break;
3407 		}
3408 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3409 			/* either a RESEND, ACKED, or MARKED */
3410 			/* skip */
3411 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3412 				/* Continue strikin FWD-TSN chunks */
3413 				tp1->rec.data.fwd_tsn_cnt++;
3414 			}
3415 			continue;
3416 		}
3417 		/*
3418 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3419 		 */
3420 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3421 			/*
3422 			 * No new acks were receieved for data sent to this
3423 			 * dest. Therefore, according to the SFR algo for
3424 			 * CMT, no data sent to this dest can be marked for
3425 			 * FR using this SACK.
3426 			 */
3427 			continue;
3428 		} else if (tp1->whoTo &&
3429 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3430 			    tp1->whoTo->this_sack_highest_newack) &&
3431 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3432 			/*
3433 			 * CMT: New acks were receieved for data sent to
3434 			 * this dest. But no new acks were seen for data
3435 			 * sent after tp1. Therefore, according to the SFR
3436 			 * algo for CMT, tp1 cannot be marked for FR using
3437 			 * this SACK. This step covers part of the DAC algo
3438 			 * and the HTNA algo as well.
3439 			 */
3440 			continue;
3441 		}
3442 		/*
3443 		 * Here we check to see if we were have already done a FR
3444 		 * and if so we see if the biggest TSN we saw in the sack is
3445 		 * smaller than the recovery point. If so we don't strike
3446 		 * the tsn... otherwise we CAN strike the TSN.
3447 		 */
3448 		/*
3449 		 * @@@ JRI: Check for CMT if (accum_moved &&
3450 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3451 		 * 0)) {
3452 		 */
3453 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3454 			/*
3455 			 * Strike the TSN if in fast-recovery and cum-ack
3456 			 * moved.
3457 			 */
3458 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3459 				sctp_log_fr(biggest_tsn_newly_acked,
3460 				    tp1->rec.data.tsn,
3461 				    tp1->sent,
3462 				    SCTP_FR_LOG_STRIKE_CHUNK);
3463 			}
3464 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3465 				tp1->sent++;
3466 			}
3467 			if ((asoc->sctp_cmt_on_off > 0) &&
3468 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3469 				/*
3470 				 * CMT DAC algorithm: If SACK flag is set to
3471 				 * 0, then lowest_newack test will not pass
3472 				 * because it would have been set to the
3473 				 * cumack earlier. If not already to be
3474 				 * rtx'd, If not a mixed sack and if tp1 is
3475 				 * not between two sacked TSNs, then mark by
3476 				 * one more. NOTE that we are marking by one
3477 				 * additional time since the SACK DAC flag
3478 				 * indicates that two packets have been
3479 				 * received after this missing TSN.
3480 				 */
3481 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3482 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3483 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 						sctp_log_fr(16 + num_dests_sacked,
3485 						    tp1->rec.data.tsn,
3486 						    tp1->sent,
3487 						    SCTP_FR_LOG_STRIKE_CHUNK);
3488 					}
3489 					tp1->sent++;
3490 				}
3491 			}
3492 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3493 		    (asoc->sctp_cmt_on_off == 0)) {
3494 			/*
3495 			 * For those that have done a FR we must take
3496 			 * special consideration if we strike. I.e the
3497 			 * biggest_newly_acked must be higher than the
3498 			 * sending_seq at the time we did the FR.
3499 			 */
3500 			if (
3501 #ifdef SCTP_FR_TO_ALTERNATE
3502 			/*
3503 			 * If FR's go to new networks, then we must only do
3504 			 * this for singly homed asoc's. However if the FR's
3505 			 * go to the same network (Armando's work) then its
3506 			 * ok to FR multiple times.
3507 			 */
3508 			    (asoc->numnets < 2)
3509 #else
3510 			    (1)
3511 #endif
3512 			    ) {
3513 
3514 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3515 				    tp1->rec.data.fast_retran_tsn)) {
3516 					/*
3517 					 * Strike the TSN, since this ack is
3518 					 * beyond where things were when we
3519 					 * did a FR.
3520 					 */
3521 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3522 						sctp_log_fr(biggest_tsn_newly_acked,
3523 						    tp1->rec.data.tsn,
3524 						    tp1->sent,
3525 						    SCTP_FR_LOG_STRIKE_CHUNK);
3526 					}
3527 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3528 						tp1->sent++;
3529 					}
3530 					strike_flag = 1;
3531 					if ((asoc->sctp_cmt_on_off > 0) &&
3532 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3533 						/*
3534 						 * CMT DAC algorithm: If
3535 						 * SACK flag is set to 0,
3536 						 * then lowest_newack test
3537 						 * will not pass because it
3538 						 * would have been set to
3539 						 * the cumack earlier. If
3540 						 * not already to be rtx'd,
3541 						 * If not a mixed sack and
3542 						 * if tp1 is not between two
3543 						 * sacked TSNs, then mark by
3544 						 * one more. NOTE that we
3545 						 * are marking by one
3546 						 * additional time since the
3547 						 * SACK DAC flag indicates
3548 						 * that two packets have
3549 						 * been received after this
3550 						 * missing TSN.
3551 						 */
3552 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3553 						    (num_dests_sacked == 1) &&
3554 						    SCTP_TSN_GT(this_sack_lowest_newack,
3555 						    tp1->rec.data.tsn)) {
3556 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3557 								sctp_log_fr(32 + num_dests_sacked,
3558 								    tp1->rec.data.tsn,
3559 								    tp1->sent,
3560 								    SCTP_FR_LOG_STRIKE_CHUNK);
3561 							}
3562 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3563 								tp1->sent++;
3564 							}
3565 						}
3566 					}
3567 				}
3568 			}
3569 			/*
3570 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3571 			 * algo covers HTNA.
3572 			 */
3573 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3574 		    biggest_tsn_newly_acked)) {
3575 			/*
3576 			 * We don't strike these: This is the  HTNA
3577 			 * algorithm i.e. we don't strike If our TSN is
3578 			 * larger than the Highest TSN Newly Acked.
3579 			 */
3580 			;
3581 		} else {
3582 			/* Strike the TSN */
3583 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3584 				sctp_log_fr(biggest_tsn_newly_acked,
3585 				    tp1->rec.data.tsn,
3586 				    tp1->sent,
3587 				    SCTP_FR_LOG_STRIKE_CHUNK);
3588 			}
3589 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3590 				tp1->sent++;
3591 			}
3592 			if ((asoc->sctp_cmt_on_off > 0) &&
3593 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3594 				/*
3595 				 * CMT DAC algorithm: If SACK flag is set to
3596 				 * 0, then lowest_newack test will not pass
3597 				 * because it would have been set to the
3598 				 * cumack earlier. If not already to be
3599 				 * rtx'd, If not a mixed sack and if tp1 is
3600 				 * not between two sacked TSNs, then mark by
3601 				 * one more. NOTE that we are marking by one
3602 				 * additional time since the SACK DAC flag
3603 				 * indicates that two packets have been
3604 				 * received after this missing TSN.
3605 				 */
3606 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3607 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3608 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3609 						sctp_log_fr(48 + num_dests_sacked,
3610 						    tp1->rec.data.tsn,
3611 						    tp1->sent,
3612 						    SCTP_FR_LOG_STRIKE_CHUNK);
3613 					}
3614 					tp1->sent++;
3615 				}
3616 			}
3617 		}
3618 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3619 			struct sctp_nets *alt;
3620 
3621 			/* fix counts and things */
3622 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3623 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3624 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3625 				    tp1->book_size,
3626 				    (uint32_t)(uintptr_t)tp1->whoTo,
3627 				    tp1->rec.data.tsn);
3628 			}
3629 			if (tp1->whoTo) {
3630 				tp1->whoTo->net_ack++;
3631 				sctp_flight_size_decrease(tp1);
3632 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3633 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3634 					    tp1);
3635 				}
3636 			}
3637 
3638 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3639 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3640 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3641 			}
3642 			/* add back to the rwnd */
3643 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3644 
3645 			/* remove from the total flight */
3646 			sctp_total_flight_decrease(stcb, tp1);
3647 
3648 			if ((stcb->asoc.prsctp_supported) &&
3649 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3650 				/*
3651 				 * Has it been retransmitted tv_sec times? -
3652 				 * we store the retran count there.
3653 				 */
3654 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3655 					/* Yes, so drop it */
3656 					if (tp1->data != NULL) {
3657 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3658 						    SCTP_SO_NOT_LOCKED);
3659 					}
3660 					/* Make sure to flag we had a FR */
3661 					if (tp1->whoTo != NULL) {
3662 						tp1->whoTo->net_ack++;
3663 					}
3664 					continue;
3665 				}
3666 			}
3667 			/*
3668 			 * SCTP_PRINTF("OK, we are now ready to FR this
3669 			 * guy\n");
3670 			 */
3671 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3672 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3673 				    0, SCTP_FR_MARKED);
3674 			}
3675 			if (strike_flag) {
3676 				/* This is a subsequent FR */
3677 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3678 			}
3679 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3680 			if (asoc->sctp_cmt_on_off > 0) {
3681 				/*
3682 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3683 				 * If CMT is being used, then pick dest with
3684 				 * largest ssthresh for any retransmission.
3685 				 */
3686 				tp1->no_fr_allowed = 1;
3687 				alt = tp1->whoTo;
3688 				/* sa_ignore NO_NULL_CHK */
3689 				if (asoc->sctp_cmt_pf > 0) {
3690 					/*
3691 					 * JRS 5/18/07 - If CMT PF is on,
3692 					 * use the PF version of
3693 					 * find_alt_net()
3694 					 */
3695 					alt = sctp_find_alternate_net(stcb, alt, 2);
3696 				} else {
3697 					/*
3698 					 * JRS 5/18/07 - If only CMT is on,
3699 					 * use the CMT version of
3700 					 * find_alt_net()
3701 					 */
3702 					/* sa_ignore NO_NULL_CHK */
3703 					alt = sctp_find_alternate_net(stcb, alt, 1);
3704 				}
3705 				if (alt == NULL) {
3706 					alt = tp1->whoTo;
3707 				}
3708 				/*
3709 				 * CUCv2: If a different dest is picked for
3710 				 * the retransmission, then new
3711 				 * (rtx-)pseudo_cumack needs to be tracked
3712 				 * for orig dest. Let CUCv2 track new (rtx-)
3713 				 * pseudo-cumack always.
3714 				 */
3715 				if (tp1->whoTo) {
3716 					tp1->whoTo->find_pseudo_cumack = 1;
3717 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3718 				}
3719 
3720 			} else {	/* CMT is OFF */
3721 
3722 #ifdef SCTP_FR_TO_ALTERNATE
3723 				/* Can we find an alternate? */
3724 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3725 #else
3726 				/*
3727 				 * default behavior is to NOT retransmit
3728 				 * FR's to an alternate. Armando Caro's
3729 				 * paper details why.
3730 				 */
3731 				alt = tp1->whoTo;
3732 #endif
3733 			}
3734 
3735 			tp1->rec.data.doing_fast_retransmit = 1;
3736 			tot_retrans++;
3737 			/* mark the sending seq for possible subsequent FR's */
3738 			/*
3739 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3740 			 * (uint32_t)tpi->rec.data.tsn);
3741 			 */
3742 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3743 				/*
3744 				 * If the queue of send is empty then its
3745 				 * the next sequence number that will be
3746 				 * assigned so we subtract one from this to
3747 				 * get the one we last sent.
3748 				 */
3749 				tp1->rec.data.fast_retran_tsn = sending_seq;
3750 			} else {
3751 				/*
3752 				 * If there are chunks on the send queue
3753 				 * (unsent data that has made it from the
3754 				 * stream queues but not out the door, we
3755 				 * take the first one (which will have the
3756 				 * lowest TSN) and subtract one to get the
3757 				 * one we last sent.
3758 				 */
3759 				struct sctp_tmit_chunk *ttt;
3760 
3761 				ttt = TAILQ_FIRST(&asoc->send_queue);
3762 				tp1->rec.data.fast_retran_tsn =
3763 				    ttt->rec.data.tsn;
3764 			}
3765 
3766 			if (tp1->do_rtt) {
3767 				/*
3768 				 * this guy had a RTO calculation pending on
3769 				 * it, cancel it
3770 				 */
3771 				if ((tp1->whoTo != NULL) &&
3772 				    (tp1->whoTo->rto_needed == 0)) {
3773 					tp1->whoTo->rto_needed = 1;
3774 				}
3775 				tp1->do_rtt = 0;
3776 			}
3777 			if (alt != tp1->whoTo) {
3778 				/* yes, there is an alternate. */
3779 				sctp_free_remote_addr(tp1->whoTo);
3780 				/* sa_ignore FREED_MEMORY */
3781 				tp1->whoTo = alt;
3782 				atomic_add_int(&alt->ref_count, 1);
3783 			}
3784 		}
3785 	}
3786 }
3787 
3788 struct sctp_tmit_chunk *
3789 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3790     struct sctp_association *asoc)
3791 {
3792 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3793 	struct timeval now;
3794 	int now_filled = 0;
3795 
3796 	if (asoc->prsctp_supported == 0) {
3797 		return (NULL);
3798 	}
3799 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3800 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3801 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3802 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3803 			/* no chance to advance, out of here */
3804 			break;
3805 		}
3806 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3807 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3808 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3809 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3810 				    asoc->advanced_peer_ack_point,
3811 				    tp1->rec.data.tsn, 0, 0);
3812 			}
3813 		}
3814 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3815 			/*
3816 			 * We can't fwd-tsn past any that are reliable aka
3817 			 * retransmitted until the asoc fails.
3818 			 */
3819 			break;
3820 		}
3821 		if (!now_filled) {
3822 			(void)SCTP_GETTIME_TIMEVAL(&now);
3823 			now_filled = 1;
3824 		}
3825 		/*
3826 		 * now we got a chunk which is marked for another
3827 		 * retransmission to a PR-stream but has run out its chances
3828 		 * already maybe OR has been marked to skip now. Can we skip
3829 		 * it if its a resend?
3830 		 */
3831 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3832 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3833 			/*
3834 			 * Now is this one marked for resend and its time is
3835 			 * now up?
3836 			 */
3837 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3838 				/* Yes so drop it */
3839 				if (tp1->data) {
3840 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3841 					    1, SCTP_SO_NOT_LOCKED);
3842 				}
3843 			} else {
3844 				/*
3845 				 * No, we are done when hit one for resend
3846 				 * whos time as not expired.
3847 				 */
3848 				break;
3849 			}
3850 		}
3851 		/*
3852 		 * Ok now if this chunk is marked to drop it we can clean up
3853 		 * the chunk, advance our peer ack point and we can check
3854 		 * the next chunk.
3855 		 */
3856 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3857 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3858 			/* advance PeerAckPoint goes forward */
3859 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3860 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3861 				a_adv = tp1;
3862 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3863 				/* No update but we do save the chk */
3864 				a_adv = tp1;
3865 			}
3866 		} else {
3867 			/*
3868 			 * If it is still in RESEND we can advance no
3869 			 * further
3870 			 */
3871 			break;
3872 		}
3873 	}
3874 	return (a_adv);
3875 }
3876 
3877 static int
3878 sctp_fs_audit(struct sctp_association *asoc)
3879 {
3880 	struct sctp_tmit_chunk *chk;
3881 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3882 	int ret;
3883 #ifndef INVARIANTS
3884 	int entry_flight, entry_cnt;
3885 #endif
3886 
3887 	ret = 0;
3888 #ifndef INVARIANTS
3889 	entry_flight = asoc->total_flight;
3890 	entry_cnt = asoc->total_flight_count;
3891 #endif
3892 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3893 		return (0);
3894 
3895 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3896 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3897 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3898 			    chk->rec.data.tsn,
3899 			    chk->send_size,
3900 			    chk->snd_count);
3901 			inflight++;
3902 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3903 			resend++;
3904 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3905 			inbetween++;
3906 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3907 			above++;
3908 		} else {
3909 			acked++;
3910 		}
3911 	}
3912 
3913 	if ((inflight > 0) || (inbetween > 0)) {
3914 #ifdef INVARIANTS
3915 		panic("Flight size-express incorrect? \n");
3916 #else
3917 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3918 		    entry_flight, entry_cnt);
3919 
3920 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3921 		    inflight, inbetween, resend, above, acked);
3922 		ret = 1;
3923 #endif
3924 	}
3925 	return (ret);
3926 }
3927 
3928 
3929 static void
3930 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3931     struct sctp_association *asoc,
3932     struct sctp_tmit_chunk *tp1)
3933 {
3934 	tp1->window_probe = 0;
3935 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3936 		/* TSN's skipped we do NOT move back. */
3937 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3938 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3939 		    tp1->book_size,
3940 		    (uint32_t)(uintptr_t)tp1->whoTo,
3941 		    tp1->rec.data.tsn);
3942 		return;
3943 	}
3944 	/* First setup this by shrinking flight */
3945 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3946 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3947 		    tp1);
3948 	}
3949 	sctp_flight_size_decrease(tp1);
3950 	sctp_total_flight_decrease(stcb, tp1);
3951 	/* Now mark for resend */
3952 	tp1->sent = SCTP_DATAGRAM_RESEND;
3953 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3954 
3955 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3956 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3957 		    tp1->whoTo->flight_size,
3958 		    tp1->book_size,
3959 		    (uint32_t)(uintptr_t)tp1->whoTo,
3960 		    tp1->rec.data.tsn);
3961 	}
3962 }
3963 
3964 void
3965 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3966     uint32_t rwnd, int *abort_now, int ecne_seen)
3967 {
3968 	struct sctp_nets *net;
3969 	struct sctp_association *asoc;
3970 	struct sctp_tmit_chunk *tp1, *tp2;
3971 	uint32_t old_rwnd;
3972 	int win_probe_recovery = 0;
3973 	int win_probe_recovered = 0;
3974 	int j, done_once = 0;
3975 	int rto_ok = 1;
3976 	uint32_t send_s;
3977 
3978 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3979 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3980 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3981 	}
3982 	SCTP_TCB_LOCK_ASSERT(stcb);
3983 #ifdef SCTP_ASOCLOG_OF_TSNS
3984 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3985 	stcb->asoc.cumack_log_at++;
3986 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3987 		stcb->asoc.cumack_log_at = 0;
3988 	}
3989 #endif
3990 	asoc = &stcb->asoc;
3991 	old_rwnd = asoc->peers_rwnd;
3992 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3993 		/* old ack */
3994 		return;
3995 	} else if (asoc->last_acked_seq == cumack) {
3996 		/* Window update sack */
3997 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3998 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3999 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4000 			/* SWS sender side engages */
4001 			asoc->peers_rwnd = 0;
4002 		}
4003 		if (asoc->peers_rwnd > old_rwnd) {
4004 			goto again;
4005 		}
4006 		return;
4007 	}
4008 
4009 	/* First setup for CC stuff */
4010 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4011 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4012 			/* Drag along the window_tsn for cwr's */
4013 			net->cwr_window_tsn = cumack;
4014 		}
4015 		net->prev_cwnd = net->cwnd;
4016 		net->net_ack = 0;
4017 		net->net_ack2 = 0;
4018 
4019 		/*
4020 		 * CMT: Reset CUC and Fast recovery algo variables before
4021 		 * SACK processing
4022 		 */
4023 		net->new_pseudo_cumack = 0;
4024 		net->will_exit_fast_recovery = 0;
4025 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4026 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4027 		}
4028 	}
4029 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4030 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4031 		    sctpchunk_listhead);
4032 		send_s = tp1->rec.data.tsn + 1;
4033 	} else {
4034 		send_s = asoc->sending_seq;
4035 	}
4036 	if (SCTP_TSN_GE(cumack, send_s)) {
4037 		struct mbuf *op_err;
4038 		char msg[SCTP_DIAG_INFO_LEN];
4039 
4040 		*abort_now = 1;
4041 		/* XXX */
4042 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4043 		    cumack, send_s);
4044 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4045 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4046 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4047 		return;
4048 	}
4049 	asoc->this_sack_highest_gap = cumack;
4050 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4051 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4052 		    stcb->asoc.overall_error_count,
4053 		    0,
4054 		    SCTP_FROM_SCTP_INDATA,
4055 		    __LINE__);
4056 	}
4057 	stcb->asoc.overall_error_count = 0;
4058 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4059 		/* process the new consecutive TSN first */
4060 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4061 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4062 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4063 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4064 				}
4065 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4066 					/*
4067 					 * If it is less than ACKED, it is
4068 					 * now no-longer in flight. Higher
4069 					 * values may occur during marking
4070 					 */
4071 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4072 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4073 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4074 							    tp1->whoTo->flight_size,
4075 							    tp1->book_size,
4076 							    (uint32_t)(uintptr_t)tp1->whoTo,
4077 							    tp1->rec.data.tsn);
4078 						}
4079 						sctp_flight_size_decrease(tp1);
4080 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4081 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4082 							    tp1);
4083 						}
4084 						/* sa_ignore NO_NULL_CHK */
4085 						sctp_total_flight_decrease(stcb, tp1);
4086 					}
4087 					tp1->whoTo->net_ack += tp1->send_size;
4088 					if (tp1->snd_count < 2) {
4089 						/*
4090 						 * True non-retransmitted
4091 						 * chunk
4092 						 */
4093 						tp1->whoTo->net_ack2 +=
4094 						    tp1->send_size;
4095 
4096 						/* update RTO too? */
4097 						if (tp1->do_rtt) {
4098 							if (rto_ok &&
4099 							    sctp_calculate_rto(stcb,
4100 							    &stcb->asoc,
4101 							    tp1->whoTo,
4102 							    &tp1->sent_rcv_time,
4103 							    SCTP_RTT_FROM_DATA)) {
4104 								rto_ok = 0;
4105 							}
4106 							if (tp1->whoTo->rto_needed == 0) {
4107 								tp1->whoTo->rto_needed = 1;
4108 							}
4109 							tp1->do_rtt = 0;
4110 						}
4111 					}
4112 					/*
4113 					 * CMT: CUCv2 algorithm. From the
4114 					 * cumack'd TSNs, for each TSN being
4115 					 * acked for the first time, set the
4116 					 * following variables for the
4117 					 * corresp destination.
4118 					 * new_pseudo_cumack will trigger a
4119 					 * cwnd update.
4120 					 * find_(rtx_)pseudo_cumack will
4121 					 * trigger search for the next
4122 					 * expected (rtx-)pseudo-cumack.
4123 					 */
4124 					tp1->whoTo->new_pseudo_cumack = 1;
4125 					tp1->whoTo->find_pseudo_cumack = 1;
4126 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4127 
4128 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4129 						/* sa_ignore NO_NULL_CHK */
4130 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4131 					}
4132 				}
4133 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4134 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4135 				}
4136 				if (tp1->rec.data.chunk_was_revoked) {
4137 					/* deflate the cwnd */
4138 					tp1->whoTo->cwnd -= tp1->book_size;
4139 					tp1->rec.data.chunk_was_revoked = 0;
4140 				}
4141 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4142 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4143 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4144 #ifdef INVARIANTS
4145 					} else {
4146 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4147 #endif
4148 					}
4149 				}
4150 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4151 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4152 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4153 					asoc->trigger_reset = 1;
4154 				}
4155 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4156 				if (tp1->data) {
4157 					/* sa_ignore NO_NULL_CHK */
4158 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4159 					sctp_m_freem(tp1->data);
4160 					tp1->data = NULL;
4161 				}
4162 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4163 					sctp_log_sack(asoc->last_acked_seq,
4164 					    cumack,
4165 					    tp1->rec.data.tsn,
4166 					    0,
4167 					    0,
4168 					    SCTP_LOG_FREE_SENT);
4169 				}
4170 				asoc->sent_queue_cnt--;
4171 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4172 			} else {
4173 				break;
4174 			}
4175 		}
4176 
4177 	}
4178 	/* sa_ignore NO_NULL_CHK */
4179 	if (stcb->sctp_socket) {
4180 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4181 		struct socket *so;
4182 
4183 #endif
4184 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4185 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4186 			/* sa_ignore NO_NULL_CHK */
4187 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4188 		}
4189 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4190 		so = SCTP_INP_SO(stcb->sctp_ep);
4191 		atomic_add_int(&stcb->asoc.refcnt, 1);
4192 		SCTP_TCB_UNLOCK(stcb);
4193 		SCTP_SOCKET_LOCK(so, 1);
4194 		SCTP_TCB_LOCK(stcb);
4195 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4196 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4197 			/* assoc was freed while we were unlocked */
4198 			SCTP_SOCKET_UNLOCK(so, 1);
4199 			return;
4200 		}
4201 #endif
4202 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4203 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4204 		SCTP_SOCKET_UNLOCK(so, 1);
4205 #endif
4206 	} else {
4207 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4208 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4209 		}
4210 	}
4211 
4212 	/* JRS - Use the congestion control given in the CC module */
4213 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4214 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4215 			if (net->net_ack2 > 0) {
4216 				/*
4217 				 * Karn's rule applies to clearing error
4218 				 * count, this is optional.
4219 				 */
4220 				net->error_count = 0;
4221 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4222 					/* addr came good */
4223 					net->dest_state |= SCTP_ADDR_REACHABLE;
4224 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4225 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4226 				}
4227 				if (net == stcb->asoc.primary_destination) {
4228 					if (stcb->asoc.alternate) {
4229 						/*
4230 						 * release the alternate,
4231 						 * primary is good
4232 						 */
4233 						sctp_free_remote_addr(stcb->asoc.alternate);
4234 						stcb->asoc.alternate = NULL;
4235 					}
4236 				}
4237 				if (net->dest_state & SCTP_ADDR_PF) {
4238 					net->dest_state &= ~SCTP_ADDR_PF;
4239 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4240 					    stcb->sctp_ep, stcb, net,
4241 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4242 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4243 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4244 					/* Done with this net */
4245 					net->net_ack = 0;
4246 				}
4247 				/* restore any doubled timers */
4248 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4249 				if (net->RTO < stcb->asoc.minrto) {
4250 					net->RTO = stcb->asoc.minrto;
4251 				}
4252 				if (net->RTO > stcb->asoc.maxrto) {
4253 					net->RTO = stcb->asoc.maxrto;
4254 				}
4255 			}
4256 		}
4257 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4258 	}
4259 	asoc->last_acked_seq = cumack;
4260 
4261 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4262 		/* nothing left in-flight */
4263 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4264 			net->flight_size = 0;
4265 			net->partial_bytes_acked = 0;
4266 		}
4267 		asoc->total_flight = 0;
4268 		asoc->total_flight_count = 0;
4269 	}
4270 
4271 	/* RWND update */
4272 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4273 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4274 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4275 		/* SWS sender side engages */
4276 		asoc->peers_rwnd = 0;
4277 	}
4278 	if (asoc->peers_rwnd > old_rwnd) {
4279 		win_probe_recovery = 1;
4280 	}
4281 	/* Now assure a timer where data is queued at */
4282 again:
4283 	j = 0;
4284 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4285 		if (win_probe_recovery && (net->window_probe)) {
4286 			win_probe_recovered = 1;
4287 			/*
4288 			 * Find first chunk that was used with window probe
4289 			 * and clear the sent
4290 			 */
4291 			/* sa_ignore FREED_MEMORY */
4292 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4293 				if (tp1->window_probe) {
4294 					/* move back to data send queue */
4295 					sctp_window_probe_recovery(stcb, asoc, tp1);
4296 					break;
4297 				}
4298 			}
4299 		}
4300 		if (net->flight_size) {
4301 			j++;
4302 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4303 			if (net->window_probe) {
4304 				net->window_probe = 0;
4305 			}
4306 		} else {
4307 			if (net->window_probe) {
4308 				/*
4309 				 * In window probes we must assure a timer
4310 				 * is still running there
4311 				 */
4312 				net->window_probe = 0;
4313 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4314 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4315 				}
4316 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4317 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4318 				    stcb, net,
4319 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4320 			}
4321 		}
4322 	}
4323 	if ((j == 0) &&
4324 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4325 	    (asoc->sent_queue_retran_cnt == 0) &&
4326 	    (win_probe_recovered == 0) &&
4327 	    (done_once == 0)) {
4328 		/*
4329 		 * huh, this should not happen unless all packets are
4330 		 * PR-SCTP and marked to skip of course.
4331 		 */
4332 		if (sctp_fs_audit(asoc)) {
4333 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4334 				net->flight_size = 0;
4335 			}
4336 			asoc->total_flight = 0;
4337 			asoc->total_flight_count = 0;
4338 			asoc->sent_queue_retran_cnt = 0;
4339 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4340 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4341 					sctp_flight_size_increase(tp1);
4342 					sctp_total_flight_increase(stcb, tp1);
4343 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4344 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4345 				}
4346 			}
4347 		}
4348 		done_once = 1;
4349 		goto again;
4350 	}
4351 	/**********************************/
4352 	/* Now what about shutdown issues */
4353 	/**********************************/
4354 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4355 		/* nothing left on sendqueue.. consider done */
4356 		/* clean up */
4357 		if ((asoc->stream_queue_cnt == 1) &&
4358 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4359 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4360 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4361 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4362 		}
4363 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4364 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4365 		    (asoc->stream_queue_cnt == 1) &&
4366 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4367 			struct mbuf *op_err;
4368 
4369 			*abort_now = 1;
4370 			/* XXX */
4371 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4372 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4373 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4374 			return;
4375 		}
4376 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4377 		    (asoc->stream_queue_cnt == 0)) {
4378 			struct sctp_nets *netp;
4379 
4380 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4381 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4382 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4383 			}
4384 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4385 			sctp_stop_timers_for_shutdown(stcb);
4386 			if (asoc->alternate) {
4387 				netp = asoc->alternate;
4388 			} else {
4389 				netp = asoc->primary_destination;
4390 			}
4391 			sctp_send_shutdown(stcb, netp);
4392 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4393 			    stcb->sctp_ep, stcb, netp);
4394 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4395 			    stcb->sctp_ep, stcb, netp);
4396 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4397 		    (asoc->stream_queue_cnt == 0)) {
4398 			struct sctp_nets *netp;
4399 
4400 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4401 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4402 			sctp_stop_timers_for_shutdown(stcb);
4403 			if (asoc->alternate) {
4404 				netp = asoc->alternate;
4405 			} else {
4406 				netp = asoc->primary_destination;
4407 			}
4408 			sctp_send_shutdown_ack(stcb, netp);
4409 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4410 			    stcb->sctp_ep, stcb, netp);
4411 		}
4412 	}
4413 	/*********************************************/
4414 	/* Here we perform PR-SCTP procedures        */
4415 	/* (section 4.2)                             */
4416 	/*********************************************/
4417 	/* C1. update advancedPeerAckPoint */
4418 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4419 		asoc->advanced_peer_ack_point = cumack;
4420 	}
4421 	/* PR-Sctp issues need to be addressed too */
4422 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4423 		struct sctp_tmit_chunk *lchk;
4424 		uint32_t old_adv_peer_ack_point;
4425 
4426 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4427 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4428 		/* C3. See if we need to send a Fwd-TSN */
4429 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4430 			/*
4431 			 * ISSUE with ECN, see FWD-TSN processing.
4432 			 */
4433 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4434 				send_forward_tsn(stcb, asoc);
4435 			} else if (lchk) {
4436 				/* try to FR fwd-tsn's that get lost too */
4437 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4438 					send_forward_tsn(stcb, asoc);
4439 				}
4440 			}
4441 		}
4442 		if (lchk) {
4443 			/* Assure a timer is up */
4444 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4445 			    stcb->sctp_ep, stcb, lchk->whoTo);
4446 		}
4447 	}
4448 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4449 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4450 		    rwnd,
4451 		    stcb->asoc.peers_rwnd,
4452 		    stcb->asoc.total_flight,
4453 		    stcb->asoc.total_output_queue_size);
4454 	}
4455 }
4456 
4457 void
4458 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4459     struct sctp_tcb *stcb,
4460     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4461     int *abort_now, uint8_t flags,
4462     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4463 {
4464 	struct sctp_association *asoc;
4465 	struct sctp_tmit_chunk *tp1, *tp2;
4466 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4467 	uint16_t wake_him = 0;
4468 	uint32_t send_s = 0;
4469 	long j;
4470 	int accum_moved = 0;
4471 	int will_exit_fast_recovery = 0;
4472 	uint32_t a_rwnd, old_rwnd;
4473 	int win_probe_recovery = 0;
4474 	int win_probe_recovered = 0;
4475 	struct sctp_nets *net = NULL;
4476 	int done_once;
4477 	int rto_ok = 1;
4478 	uint8_t reneged_all = 0;
4479 	uint8_t cmt_dac_flag;
4480 
4481 	/*
4482 	 * we take any chance we can to service our queues since we cannot
4483 	 * get awoken when the socket is read from :<
4484 	 */
4485 	/*
4486 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4487 	 * old sack, if so discard. 2) If there is nothing left in the send
4488 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4489 	 * too, update any rwnd change and verify no timers are running.
4490 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4491 	 * moved process these first and note that it moved. 4) Process any
4492 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4493 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4494 	 * sync up flightsizes and things, stop all timers and also check
4495 	 * for shutdown_pending state. If so then go ahead and send off the
4496 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4497 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4498 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4499 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4500 	 * if in shutdown_recv state.
4501 	 */
4502 	SCTP_TCB_LOCK_ASSERT(stcb);
4503 	/* CMT DAC algo */
4504 	this_sack_lowest_newack = 0;
4505 	SCTP_STAT_INCR(sctps_slowpath_sack);
4506 	last_tsn = cum_ack;
4507 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4508 #ifdef SCTP_ASOCLOG_OF_TSNS
4509 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4510 	stcb->asoc.cumack_log_at++;
4511 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4512 		stcb->asoc.cumack_log_at = 0;
4513 	}
4514 #endif
4515 	a_rwnd = rwnd;
4516 
4517 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4518 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4519 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4520 	}
4521 
4522 	old_rwnd = stcb->asoc.peers_rwnd;
4523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4524 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4525 		    stcb->asoc.overall_error_count,
4526 		    0,
4527 		    SCTP_FROM_SCTP_INDATA,
4528 		    __LINE__);
4529 	}
4530 	stcb->asoc.overall_error_count = 0;
4531 	asoc = &stcb->asoc;
4532 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4533 		sctp_log_sack(asoc->last_acked_seq,
4534 		    cum_ack,
4535 		    0,
4536 		    num_seg,
4537 		    num_dup,
4538 		    SCTP_LOG_NEW_SACK);
4539 	}
4540 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4541 		uint16_t i;
4542 		uint32_t *dupdata, dblock;
4543 
4544 		for (i = 0; i < num_dup; i++) {
4545 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4546 			    sizeof(uint32_t), (uint8_t *)&dblock);
4547 			if (dupdata == NULL) {
4548 				break;
4549 			}
4550 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4551 		}
4552 	}
4553 	/* reality check */
4554 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4555 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4556 		    sctpchunk_listhead);
4557 		send_s = tp1->rec.data.tsn + 1;
4558 	} else {
4559 		tp1 = NULL;
4560 		send_s = asoc->sending_seq;
4561 	}
4562 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4563 		struct mbuf *op_err;
4564 		char msg[SCTP_DIAG_INFO_LEN];
4565 
4566 		/*
4567 		 * no way, we have not even sent this TSN out yet. Peer is
4568 		 * hopelessly messed up with us.
4569 		 */
4570 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4571 		    cum_ack, send_s);
4572 		if (tp1) {
4573 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4574 			    tp1->rec.data.tsn, (void *)tp1);
4575 		}
4576 hopeless_peer:
4577 		*abort_now = 1;
4578 		/* XXX */
4579 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4580 		    cum_ack, send_s);
4581 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4582 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4583 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4584 		return;
4585 	}
4586 	/**********************/
4587 	/* 1) check the range */
4588 	/**********************/
4589 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4590 		/* acking something behind */
4591 		return;
4592 	}
4593 
4594 	/* update the Rwnd of the peer */
4595 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4596 	    TAILQ_EMPTY(&asoc->send_queue) &&
4597 	    (asoc->stream_queue_cnt == 0)) {
4598 		/* nothing left on send/sent and strmq */
4599 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4600 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4601 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4602 		}
4603 		asoc->peers_rwnd = a_rwnd;
4604 		if (asoc->sent_queue_retran_cnt) {
4605 			asoc->sent_queue_retran_cnt = 0;
4606 		}
4607 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4608 			/* SWS sender side engages */
4609 			asoc->peers_rwnd = 0;
4610 		}
4611 		/* stop any timers */
4612 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4613 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4614 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4615 			net->partial_bytes_acked = 0;
4616 			net->flight_size = 0;
4617 		}
4618 		asoc->total_flight = 0;
4619 		asoc->total_flight_count = 0;
4620 		return;
4621 	}
4622 	/*
4623 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4624 	 * things. The total byte count acked is tracked in netAckSz AND
4625 	 * netAck2 is used to track the total bytes acked that are un-
4626 	 * amibguious and were never retransmitted. We track these on a per
4627 	 * destination address basis.
4628 	 */
4629 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4630 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4631 			/* Drag along the window_tsn for cwr's */
4632 			net->cwr_window_tsn = cum_ack;
4633 		}
4634 		net->prev_cwnd = net->cwnd;
4635 		net->net_ack = 0;
4636 		net->net_ack2 = 0;
4637 
4638 		/*
4639 		 * CMT: Reset CUC and Fast recovery algo variables before
4640 		 * SACK processing
4641 		 */
4642 		net->new_pseudo_cumack = 0;
4643 		net->will_exit_fast_recovery = 0;
4644 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4645 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4646 		}
4647 
4648 		/*
4649 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4650 		 * to be greater than the cumack. Also reset saw_newack to 0
4651 		 * for all dests.
4652 		 */
4653 		net->saw_newack = 0;
4654 		net->this_sack_highest_newack = last_tsn;
4655 	}
4656 	/* process the new consecutive TSN first */
4657 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4658 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4659 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4660 				accum_moved = 1;
4661 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4662 					/*
4663 					 * If it is less than ACKED, it is
4664 					 * now no-longer in flight. Higher
4665 					 * values may occur during marking
4666 					 */
4667 					if ((tp1->whoTo->dest_state &
4668 					    SCTP_ADDR_UNCONFIRMED) &&
4669 					    (tp1->snd_count < 2)) {
4670 						/*
4671 						 * If there was no retran
4672 						 * and the address is
4673 						 * un-confirmed and we sent
4674 						 * there and are now
4675 						 * sacked.. its confirmed,
4676 						 * mark it so.
4677 						 */
4678 						tp1->whoTo->dest_state &=
4679 						    ~SCTP_ADDR_UNCONFIRMED;
4680 					}
4681 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4682 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4683 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4684 							    tp1->whoTo->flight_size,
4685 							    tp1->book_size,
4686 							    (uint32_t)(uintptr_t)tp1->whoTo,
4687 							    tp1->rec.data.tsn);
4688 						}
4689 						sctp_flight_size_decrease(tp1);
4690 						sctp_total_flight_decrease(stcb, tp1);
4691 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4692 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4693 							    tp1);
4694 						}
4695 					}
4696 					tp1->whoTo->net_ack += tp1->send_size;
4697 
4698 					/* CMT SFR and DAC algos */
4699 					this_sack_lowest_newack = tp1->rec.data.tsn;
4700 					tp1->whoTo->saw_newack = 1;
4701 
4702 					if (tp1->snd_count < 2) {
4703 						/*
4704 						 * True non-retransmitted
4705 						 * chunk
4706 						 */
4707 						tp1->whoTo->net_ack2 +=
4708 						    tp1->send_size;
4709 
4710 						/* update RTO too? */
4711 						if (tp1->do_rtt) {
4712 							if (rto_ok &&
4713 							    sctp_calculate_rto(stcb,
4714 							    &stcb->asoc,
4715 							    tp1->whoTo,
4716 							    &tp1->sent_rcv_time,
4717 							    SCTP_RTT_FROM_DATA)) {
4718 								rto_ok = 0;
4719 							}
4720 							if (tp1->whoTo->rto_needed == 0) {
4721 								tp1->whoTo->rto_needed = 1;
4722 							}
4723 							tp1->do_rtt = 0;
4724 						}
4725 					}
4726 					/*
4727 					 * CMT: CUCv2 algorithm. From the
4728 					 * cumack'd TSNs, for each TSN being
4729 					 * acked for the first time, set the
4730 					 * following variables for the
4731 					 * corresp destination.
4732 					 * new_pseudo_cumack will trigger a
4733 					 * cwnd update.
4734 					 * find_(rtx_)pseudo_cumack will
4735 					 * trigger search for the next
4736 					 * expected (rtx-)pseudo-cumack.
4737 					 */
4738 					tp1->whoTo->new_pseudo_cumack = 1;
4739 					tp1->whoTo->find_pseudo_cumack = 1;
4740 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4741 
4742 
4743 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4744 						sctp_log_sack(asoc->last_acked_seq,
4745 						    cum_ack,
4746 						    tp1->rec.data.tsn,
4747 						    0,
4748 						    0,
4749 						    SCTP_LOG_TSN_ACKED);
4750 					}
4751 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4752 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4753 					}
4754 				}
4755 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4756 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4757 #ifdef SCTP_AUDITING_ENABLED
4758 					sctp_audit_log(0xB3,
4759 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4760 #endif
4761 				}
4762 				if (tp1->rec.data.chunk_was_revoked) {
4763 					/* deflate the cwnd */
4764 					tp1->whoTo->cwnd -= tp1->book_size;
4765 					tp1->rec.data.chunk_was_revoked = 0;
4766 				}
4767 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4768 					tp1->sent = SCTP_DATAGRAM_ACKED;
4769 				}
4770 			}
4771 		} else {
4772 			break;
4773 		}
4774 	}
4775 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4776 	/* always set this up to cum-ack */
4777 	asoc->this_sack_highest_gap = last_tsn;
4778 
4779 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4780 
4781 		/*
4782 		 * thisSackHighestGap will increase while handling NEW
4783 		 * segments this_sack_highest_newack will increase while
4784 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4785 		 * used for CMT DAC algo. saw_newack will also change.
4786 		 */
4787 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4788 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4789 		    num_seg, num_nr_seg, &rto_ok)) {
4790 			wake_him++;
4791 		}
4792 		/*
4793 		 * validate the biggest_tsn_acked in the gap acks if strict
4794 		 * adherence is wanted.
4795 		 */
4796 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4797 			/*
4798 			 * peer is either confused or we are under attack.
4799 			 * We must abort.
4800 			 */
4801 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4802 			    biggest_tsn_acked, send_s);
4803 			goto hopeless_peer;
4804 		}
4805 	}
4806 	/*******************************************/
4807 	/* cancel ALL T3-send timer if accum moved */
4808 	/*******************************************/
4809 	if (asoc->sctp_cmt_on_off > 0) {
4810 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4811 			if (net->new_pseudo_cumack)
4812 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4813 				    stcb, net,
4814 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4815 
4816 		}
4817 	} else {
4818 		if (accum_moved) {
4819 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4820 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4821 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4822 			}
4823 		}
4824 	}
4825 	/********************************************/
4826 	/* drop the acked chunks from the sentqueue */
4827 	/********************************************/
4828 	asoc->last_acked_seq = cum_ack;
4829 
4830 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4831 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4832 			break;
4833 		}
4834 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4835 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4836 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4837 #ifdef INVARIANTS
4838 			} else {
4839 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4840 #endif
4841 			}
4842 		}
4843 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4844 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4845 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4846 			asoc->trigger_reset = 1;
4847 		}
4848 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4849 		if (PR_SCTP_ENABLED(tp1->flags)) {
4850 			if (asoc->pr_sctp_cnt != 0)
4851 				asoc->pr_sctp_cnt--;
4852 		}
4853 		asoc->sent_queue_cnt--;
4854 		if (tp1->data) {
4855 			/* sa_ignore NO_NULL_CHK */
4856 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4857 			sctp_m_freem(tp1->data);
4858 			tp1->data = NULL;
4859 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4860 				asoc->sent_queue_cnt_removeable--;
4861 			}
4862 		}
4863 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4864 			sctp_log_sack(asoc->last_acked_seq,
4865 			    cum_ack,
4866 			    tp1->rec.data.tsn,
4867 			    0,
4868 			    0,
4869 			    SCTP_LOG_FREE_SENT);
4870 		}
4871 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4872 		wake_him++;
4873 	}
4874 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4875 #ifdef INVARIANTS
4876 		panic("Warning flight size is positive and should be 0");
4877 #else
4878 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4879 		    asoc->total_flight);
4880 #endif
4881 		asoc->total_flight = 0;
4882 	}
4883 
4884 	/* sa_ignore NO_NULL_CHK */
4885 	if ((wake_him) && (stcb->sctp_socket)) {
4886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4887 		struct socket *so;
4888 
4889 #endif
4890 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4891 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4892 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4893 		}
4894 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4895 		so = SCTP_INP_SO(stcb->sctp_ep);
4896 		atomic_add_int(&stcb->asoc.refcnt, 1);
4897 		SCTP_TCB_UNLOCK(stcb);
4898 		SCTP_SOCKET_LOCK(so, 1);
4899 		SCTP_TCB_LOCK(stcb);
4900 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4901 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4902 			/* assoc was freed while we were unlocked */
4903 			SCTP_SOCKET_UNLOCK(so, 1);
4904 			return;
4905 		}
4906 #endif
4907 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4908 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4909 		SCTP_SOCKET_UNLOCK(so, 1);
4910 #endif
4911 	} else {
4912 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4913 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4914 		}
4915 	}
4916 
4917 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4918 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4919 			/* Setup so we will exit RFC2582 fast recovery */
4920 			will_exit_fast_recovery = 1;
4921 		}
4922 	}
4923 	/*
4924 	 * Check for revoked fragments:
4925 	 *
4926 	 * if Previous sack - Had no frags then we can't have any revoked if
4927 	 * Previous sack - Had frag's then - If we now have frags aka
4928 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4929 	 * some of them. else - The peer revoked all ACKED fragments, since
4930 	 * we had some before and now we have NONE.
4931 	 */
4932 
4933 	if (num_seg) {
4934 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4935 		asoc->saw_sack_with_frags = 1;
4936 	} else if (asoc->saw_sack_with_frags) {
4937 		int cnt_revoked = 0;
4938 
4939 		/* Peer revoked all dg's marked or acked */
4940 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4941 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4942 				tp1->sent = SCTP_DATAGRAM_SENT;
4943 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4944 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4945 					    tp1->whoTo->flight_size,
4946 					    tp1->book_size,
4947 					    (uint32_t)(uintptr_t)tp1->whoTo,
4948 					    tp1->rec.data.tsn);
4949 				}
4950 				sctp_flight_size_increase(tp1);
4951 				sctp_total_flight_increase(stcb, tp1);
4952 				tp1->rec.data.chunk_was_revoked = 1;
4953 				/*
4954 				 * To ensure that this increase in
4955 				 * flightsize, which is artificial, does not
4956 				 * throttle the sender, we also increase the
4957 				 * cwnd artificially.
4958 				 */
4959 				tp1->whoTo->cwnd += tp1->book_size;
4960 				cnt_revoked++;
4961 			}
4962 		}
4963 		if (cnt_revoked) {
4964 			reneged_all = 1;
4965 		}
4966 		asoc->saw_sack_with_frags = 0;
4967 	}
4968 	if (num_nr_seg > 0)
4969 		asoc->saw_sack_with_nr_frags = 1;
4970 	else
4971 		asoc->saw_sack_with_nr_frags = 0;
4972 
4973 	/* JRS - Use the congestion control given in the CC module */
4974 	if (ecne_seen == 0) {
4975 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4976 			if (net->net_ack2 > 0) {
4977 				/*
4978 				 * Karn's rule applies to clearing error
4979 				 * count, this is optional.
4980 				 */
4981 				net->error_count = 0;
4982 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4983 					/* addr came good */
4984 					net->dest_state |= SCTP_ADDR_REACHABLE;
4985 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4986 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4987 				}
4988 
4989 				if (net == stcb->asoc.primary_destination) {
4990 					if (stcb->asoc.alternate) {
4991 						/*
4992 						 * release the alternate,
4993 						 * primary is good
4994 						 */
4995 						sctp_free_remote_addr(stcb->asoc.alternate);
4996 						stcb->asoc.alternate = NULL;
4997 					}
4998 				}
4999 
5000 				if (net->dest_state & SCTP_ADDR_PF) {
5001 					net->dest_state &= ~SCTP_ADDR_PF;
5002 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5003 					    stcb->sctp_ep, stcb, net,
5004 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5005 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5006 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5007 					/* Done with this net */
5008 					net->net_ack = 0;
5009 				}
5010 				/* restore any doubled timers */
5011 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5012 				if (net->RTO < stcb->asoc.minrto) {
5013 					net->RTO = stcb->asoc.minrto;
5014 				}
5015 				if (net->RTO > stcb->asoc.maxrto) {
5016 					net->RTO = stcb->asoc.maxrto;
5017 				}
5018 			}
5019 		}
5020 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5021 	}
5022 
5023 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5024 		/* nothing left in-flight */
5025 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5026 			/* stop all timers */
5027 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5028 			    stcb, net,
5029 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5030 			net->flight_size = 0;
5031 			net->partial_bytes_acked = 0;
5032 		}
5033 		asoc->total_flight = 0;
5034 		asoc->total_flight_count = 0;
5035 	}
5036 
5037 	/**********************************/
5038 	/* Now what about shutdown issues */
5039 	/**********************************/
5040 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5041 		/* nothing left on sendqueue.. consider done */
5042 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5043 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5044 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5045 		}
5046 		asoc->peers_rwnd = a_rwnd;
5047 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5048 			/* SWS sender side engages */
5049 			asoc->peers_rwnd = 0;
5050 		}
5051 		/* clean up */
5052 		if ((asoc->stream_queue_cnt == 1) &&
5053 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5054 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5055 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5056 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5057 		}
5058 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5059 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5060 		    (asoc->stream_queue_cnt == 1) &&
5061 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5062 			struct mbuf *op_err;
5063 
5064 			*abort_now = 1;
5065 			/* XXX */
5066 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5067 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5068 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5069 			return;
5070 		}
5071 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5072 		    (asoc->stream_queue_cnt == 0)) {
5073 			struct sctp_nets *netp;
5074 
5075 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5076 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5077 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5078 			}
5079 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5080 			sctp_stop_timers_for_shutdown(stcb);
5081 			if (asoc->alternate) {
5082 				netp = asoc->alternate;
5083 			} else {
5084 				netp = asoc->primary_destination;
5085 			}
5086 			sctp_send_shutdown(stcb, netp);
5087 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5088 			    stcb->sctp_ep, stcb, netp);
5089 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5090 			    stcb->sctp_ep, stcb, netp);
5091 			return;
5092 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5093 		    (asoc->stream_queue_cnt == 0)) {
5094 			struct sctp_nets *netp;
5095 
5096 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5097 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5098 			sctp_stop_timers_for_shutdown(stcb);
5099 			if (asoc->alternate) {
5100 				netp = asoc->alternate;
5101 			} else {
5102 				netp = asoc->primary_destination;
5103 			}
5104 			sctp_send_shutdown_ack(stcb, netp);
5105 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5106 			    stcb->sctp_ep, stcb, netp);
5107 			return;
5108 		}
5109 	}
5110 	/*
5111 	 * Now here we are going to recycle net_ack for a different use...
5112 	 * HEADS UP.
5113 	 */
5114 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115 		net->net_ack = 0;
5116 	}
5117 
5118 	/*
5119 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5120 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5121 	 * automatically ensure that.
5122 	 */
5123 	if ((asoc->sctp_cmt_on_off > 0) &&
5124 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5125 	    (cmt_dac_flag == 0)) {
5126 		this_sack_lowest_newack = cum_ack;
5127 	}
5128 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5129 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5130 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5131 	}
5132 	/* JRS - Use the congestion control given in the CC module */
5133 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5134 
5135 	/* Now are we exiting loss recovery ? */
5136 	if (will_exit_fast_recovery) {
5137 		/* Ok, we must exit fast recovery */
5138 		asoc->fast_retran_loss_recovery = 0;
5139 	}
5140 	if ((asoc->sat_t3_loss_recovery) &&
5141 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5142 		/* end satellite t3 loss recovery */
5143 		asoc->sat_t3_loss_recovery = 0;
5144 	}
5145 	/*
5146 	 * CMT Fast recovery
5147 	 */
5148 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5149 		if (net->will_exit_fast_recovery) {
5150 			/* Ok, we must exit fast recovery */
5151 			net->fast_retran_loss_recovery = 0;
5152 		}
5153 	}
5154 
5155 	/* Adjust and set the new rwnd value */
5156 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5157 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5158 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5159 	}
5160 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5161 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5162 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5163 		/* SWS sender side engages */
5164 		asoc->peers_rwnd = 0;
5165 	}
5166 	if (asoc->peers_rwnd > old_rwnd) {
5167 		win_probe_recovery = 1;
5168 	}
5169 
5170 	/*
5171 	 * Now we must setup so we have a timer up for anyone with
5172 	 * outstanding data.
5173 	 */
5174 	done_once = 0;
5175 again:
5176 	j = 0;
5177 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5178 		if (win_probe_recovery && (net->window_probe)) {
5179 			win_probe_recovered = 1;
5180 			/*-
5181 			 * Find first chunk that was used with
5182 			 * window probe and clear the event. Put
5183 			 * it back into the send queue as if has
5184 			 * not been sent.
5185 			 */
5186 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5187 				if (tp1->window_probe) {
5188 					sctp_window_probe_recovery(stcb, asoc, tp1);
5189 					break;
5190 				}
5191 			}
5192 		}
5193 		if (net->flight_size) {
5194 			j++;
5195 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5196 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5197 				    stcb->sctp_ep, stcb, net);
5198 			}
5199 			if (net->window_probe) {
5200 				net->window_probe = 0;
5201 			}
5202 		} else {
5203 			if (net->window_probe) {
5204 				/*
5205 				 * In window probes we must assure a timer
5206 				 * is still running there
5207 				 */
5208 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5209 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5210 					    stcb->sctp_ep, stcb, net);
5211 
5212 				}
5213 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5214 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5215 				    stcb, net,
5216 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5217 			}
5218 		}
5219 	}
5220 	if ((j == 0) &&
5221 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5222 	    (asoc->sent_queue_retran_cnt == 0) &&
5223 	    (win_probe_recovered == 0) &&
5224 	    (done_once == 0)) {
5225 		/*
5226 		 * huh, this should not happen unless all packets are
5227 		 * PR-SCTP and marked to skip of course.
5228 		 */
5229 		if (sctp_fs_audit(asoc)) {
5230 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5231 				net->flight_size = 0;
5232 			}
5233 			asoc->total_flight = 0;
5234 			asoc->total_flight_count = 0;
5235 			asoc->sent_queue_retran_cnt = 0;
5236 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5237 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5238 					sctp_flight_size_increase(tp1);
5239 					sctp_total_flight_increase(stcb, tp1);
5240 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5241 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5242 				}
5243 			}
5244 		}
5245 		done_once = 1;
5246 		goto again;
5247 	}
5248 	/*********************************************/
5249 	/* Here we perform PR-SCTP procedures        */
5250 	/* (section 4.2)                             */
5251 	/*********************************************/
5252 	/* C1. update advancedPeerAckPoint */
5253 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5254 		asoc->advanced_peer_ack_point = cum_ack;
5255 	}
5256 	/* C2. try to further move advancedPeerAckPoint ahead */
5257 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5258 		struct sctp_tmit_chunk *lchk;
5259 		uint32_t old_adv_peer_ack_point;
5260 
5261 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5262 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5263 		/* C3. See if we need to send a Fwd-TSN */
5264 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5265 			/*
5266 			 * ISSUE with ECN, see FWD-TSN processing.
5267 			 */
5268 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5269 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5270 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5271 				    old_adv_peer_ack_point);
5272 			}
5273 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5274 				send_forward_tsn(stcb, asoc);
5275 			} else if (lchk) {
5276 				/* try to FR fwd-tsn's that get lost too */
5277 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5278 					send_forward_tsn(stcb, asoc);
5279 				}
5280 			}
5281 		}
5282 		if (lchk) {
5283 			/* Assure a timer is up */
5284 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5285 			    stcb->sctp_ep, stcb, lchk->whoTo);
5286 		}
5287 	}
5288 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5289 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5290 		    a_rwnd,
5291 		    stcb->asoc.peers_rwnd,
5292 		    stcb->asoc.total_flight,
5293 		    stcb->asoc.total_output_queue_size);
5294 	}
5295 }
5296 
5297 void
5298 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5299 {
5300 	/* Copy cum-ack */
5301 	uint32_t cum_ack, a_rwnd;
5302 
5303 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5304 	/* Arrange so a_rwnd does NOT change */
5305 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5306 
5307 	/* Now call the express sack handling */
5308 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5309 }
5310 
5311 static void
5312 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5313     struct sctp_stream_in *strmin)
5314 {
5315 	struct sctp_queued_to_read *control, *ncontrol;
5316 	struct sctp_association *asoc;
5317 	uint32_t mid;
5318 	int need_reasm_check = 0;
5319 
5320 	asoc = &stcb->asoc;
5321 	mid = strmin->last_mid_delivered;
5322 	/*
5323 	 * First deliver anything prior to and including the stream no that
5324 	 * came in.
5325 	 */
5326 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5327 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5328 			/* this is deliverable now */
5329 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5330 				if (control->on_strm_q) {
5331 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5332 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5333 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5334 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5335 #ifdef INVARIANTS
5336 					} else {
5337 						panic("strmin: %p ctl: %p unknown %d",
5338 						    strmin, control, control->on_strm_q);
5339 #endif
5340 					}
5341 					control->on_strm_q = 0;
5342 				}
5343 				/* subtract pending on streams */
5344 				if (asoc->size_on_all_streams >= control->length) {
5345 					asoc->size_on_all_streams -= control->length;
5346 				} else {
5347 #ifdef INVARIANTS
5348 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5349 #else
5350 					asoc->size_on_all_streams = 0;
5351 #endif
5352 				}
5353 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5354 				/* deliver it to at least the delivery-q */
5355 				if (stcb->sctp_socket) {
5356 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5357 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5358 					    control,
5359 					    &stcb->sctp_socket->so_rcv,
5360 					    1, SCTP_READ_LOCK_HELD,
5361 					    SCTP_SO_NOT_LOCKED);
5362 				}
5363 			} else {
5364 				/* Its a fragmented message */
5365 				if (control->first_frag_seen) {
5366 					/*
5367 					 * Make it so this is next to
5368 					 * deliver, we restore later
5369 					 */
5370 					strmin->last_mid_delivered = control->mid - 1;
5371 					need_reasm_check = 1;
5372 					break;
5373 				}
5374 			}
5375 		} else {
5376 			/* no more delivery now. */
5377 			break;
5378 		}
5379 	}
5380 	if (need_reasm_check) {
5381 		int ret;
5382 
5383 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5384 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5385 			/* Restore the next to deliver unless we are ahead */
5386 			strmin->last_mid_delivered = mid;
5387 		}
5388 		if (ret == 0) {
5389 			/* Left the front Partial one on */
5390 			return;
5391 		}
5392 		need_reasm_check = 0;
5393 	}
5394 	/*
5395 	 * now we must deliver things in queue the normal way  if any are
5396 	 * now ready.
5397 	 */
5398 	mid = strmin->last_mid_delivered + 1;
5399 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5400 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5401 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5402 				/* this is deliverable now */
5403 				if (control->on_strm_q) {
5404 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5405 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5406 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5407 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5408 #ifdef INVARIANTS
5409 					} else {
5410 						panic("strmin: %p ctl: %p unknown %d",
5411 						    strmin, control, control->on_strm_q);
5412 #endif
5413 					}
5414 					control->on_strm_q = 0;
5415 				}
5416 				/* subtract pending on streams */
5417 				if (asoc->size_on_all_streams >= control->length) {
5418 					asoc->size_on_all_streams -= control->length;
5419 				} else {
5420 #ifdef INVARIANTS
5421 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5422 #else
5423 					asoc->size_on_all_streams = 0;
5424 #endif
5425 				}
5426 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5427 				/* deliver it to at least the delivery-q */
5428 				strmin->last_mid_delivered = control->mid;
5429 				if (stcb->sctp_socket) {
5430 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5431 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5432 					    control,
5433 					    &stcb->sctp_socket->so_rcv, 1,
5434 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5435 
5436 				}
5437 				mid = strmin->last_mid_delivered + 1;
5438 			} else {
5439 				/* Its a fragmented message */
5440 				if (control->first_frag_seen) {
5441 					/*
5442 					 * Make it so this is next to
5443 					 * deliver
5444 					 */
5445 					strmin->last_mid_delivered = control->mid - 1;
5446 					need_reasm_check = 1;
5447 					break;
5448 				}
5449 			}
5450 		} else {
5451 			break;
5452 		}
5453 	}
5454 	if (need_reasm_check) {
5455 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5456 	}
5457 }
5458 
5459 
5460 
5461 static void
5462 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5463     struct sctp_association *asoc,
5464     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5465 {
5466 	struct sctp_queued_to_read *control;
5467 	struct sctp_stream_in *strm;
5468 	struct sctp_tmit_chunk *chk, *nchk;
5469 	int cnt_removed = 0;
5470 
5471 	/*
5472 	 * For now large messages held on the stream reasm that are complete
5473 	 * will be tossed too. We could in theory do more work to spin
5474 	 * through and stop after dumping one msg aka seeing the start of a
5475 	 * new msg at the head, and call the delivery function... to see if
5476 	 * it can be delivered... But for now we just dump everything on the
5477 	 * queue.
5478 	 */
5479 	strm = &asoc->strmin[stream];
5480 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5481 	if (control == NULL) {
5482 		/* Not found */
5483 		return;
5484 	}
5485 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5486 		return;
5487 	}
5488 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5489 		/* Purge hanging chunks */
5490 		if (!asoc->idata_supported && (ordered == 0)) {
5491 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5492 				break;
5493 			}
5494 		}
5495 		cnt_removed++;
5496 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5497 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5498 			asoc->size_on_reasm_queue -= chk->send_size;
5499 		} else {
5500 #ifdef INVARIANTS
5501 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5502 #else
5503 			asoc->size_on_reasm_queue = 0;
5504 #endif
5505 		}
5506 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5507 		if (chk->data) {
5508 			sctp_m_freem(chk->data);
5509 			chk->data = NULL;
5510 		}
5511 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5512 	}
5513 	if (!TAILQ_EMPTY(&control->reasm)) {
5514 		/* This has to be old data, unordered */
5515 		if (control->data) {
5516 			sctp_m_freem(control->data);
5517 			control->data = NULL;
5518 		}
5519 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5520 		chk = TAILQ_FIRST(&control->reasm);
5521 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5522 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5523 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5524 			    chk, SCTP_READ_LOCK_HELD);
5525 		}
5526 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5527 		return;
5528 	}
5529 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5530 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5531 		if (asoc->size_on_all_streams >= control->length) {
5532 			asoc->size_on_all_streams -= control->length;
5533 		} else {
5534 #ifdef INVARIANTS
5535 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5536 #else
5537 			asoc->size_on_all_streams = 0;
5538 #endif
5539 		}
5540 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5541 		control->on_strm_q = 0;
5542 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5543 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5544 		control->on_strm_q = 0;
5545 #ifdef INVARIANTS
5546 	} else if (control->on_strm_q) {
5547 		panic("strm: %p ctl: %p unknown %d",
5548 		    strm, control, control->on_strm_q);
5549 #endif
5550 	}
5551 	control->on_strm_q = 0;
5552 	if (control->on_read_q == 0) {
5553 		sctp_free_remote_addr(control->whoFrom);
5554 		if (control->data) {
5555 			sctp_m_freem(control->data);
5556 			control->data = NULL;
5557 		}
5558 		sctp_free_a_readq(stcb, control);
5559 	}
5560 }
5561 
5562 void
5563 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5564     struct sctp_forward_tsn_chunk *fwd,
5565     int *abort_flag, struct mbuf *m, int offset)
5566 {
5567 	/* The pr-sctp fwd tsn */
5568 	/*
5569 	 * here we will perform all the data receiver side steps for
5570 	 * processing FwdTSN, as required in by pr-sctp draft:
5571 	 *
5572 	 * Assume we get FwdTSN(x):
5573 	 *
5574 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5575 	 * + others we have 3) examine and update re-ordering queue on
5576 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5577 	 * report where we are.
5578 	 */
5579 	struct sctp_association *asoc;
5580 	uint32_t new_cum_tsn, gap;
5581 	unsigned int i, fwd_sz, m_size;
5582 	uint32_t str_seq;
5583 	struct sctp_stream_in *strm;
5584 	struct sctp_queued_to_read *control, *sv;
5585 
5586 	asoc = &stcb->asoc;
5587 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5588 		SCTPDBG(SCTP_DEBUG_INDATA1,
5589 		    "Bad size too small/big fwd-tsn\n");
5590 		return;
5591 	}
5592 	m_size = (stcb->asoc.mapping_array_size << 3);
5593 	/*************************************************************/
5594 	/* 1. Here we update local cumTSN and shift the bitmap array */
5595 	/*************************************************************/
5596 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5597 
5598 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5599 		/* Already got there ... */
5600 		return;
5601 	}
5602 	/*
5603 	 * now we know the new TSN is more advanced, let's find the actual
5604 	 * gap
5605 	 */
5606 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5607 	asoc->cumulative_tsn = new_cum_tsn;
5608 	if (gap >= m_size) {
5609 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5610 			struct mbuf *op_err;
5611 			char msg[SCTP_DIAG_INFO_LEN];
5612 
5613 			/*
5614 			 * out of range (of single byte chunks in the rwnd I
5615 			 * give out). This must be an attacker.
5616 			 */
5617 			*abort_flag = 1;
5618 			snprintf(msg, sizeof(msg),
5619 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5620 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5621 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5622 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5623 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5624 			return;
5625 		}
5626 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5627 
5628 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5629 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5630 		asoc->highest_tsn_inside_map = new_cum_tsn;
5631 
5632 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5633 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5634 
5635 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5636 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5637 		}
5638 	} else {
5639 		SCTP_TCB_LOCK_ASSERT(stcb);
5640 		for (i = 0; i <= gap; i++) {
5641 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5642 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5643 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5644 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5645 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5646 				}
5647 			}
5648 		}
5649 	}
5650 	/*************************************************************/
5651 	/* 2. Clear up re-assembly queue                             */
5652 	/*************************************************************/
5653 
5654 	/* This is now done as part of clearing up the stream/seq */
5655 	if (asoc->idata_supported == 0) {
5656 		uint16_t sid;
5657 
5658 		/* Flush all the un-ordered data based on cum-tsn */
5659 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5660 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5661 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5662 		}
5663 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5664 	}
5665 	/*******************************************************/
5666 	/* 3. Update the PR-stream re-ordering queues and fix  */
5667 	/* delivery issues as needed.                       */
5668 	/*******************************************************/
5669 	fwd_sz -= sizeof(*fwd);
5670 	if (m && fwd_sz) {
5671 		/* New method. */
5672 		unsigned int num_str;
5673 		uint32_t mid, cur_mid;
5674 		uint16_t sid;
5675 		uint16_t ordered, flags;
5676 		struct sctp_strseq *stseq, strseqbuf;
5677 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5678 
5679 		offset += sizeof(*fwd);
5680 
5681 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5682 		if (asoc->idata_supported) {
5683 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5684 		} else {
5685 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5686 		}
5687 		for (i = 0; i < num_str; i++) {
5688 			if (asoc->idata_supported) {
5689 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5690 				    sizeof(struct sctp_strseq_mid),
5691 				    (uint8_t *)&strseqbuf_m);
5692 				offset += sizeof(struct sctp_strseq_mid);
5693 				if (stseq_m == NULL) {
5694 					break;
5695 				}
5696 				sid = ntohs(stseq_m->sid);
5697 				mid = ntohl(stseq_m->mid);
5698 				flags = ntohs(stseq_m->flags);
5699 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5700 					ordered = 0;
5701 				} else {
5702 					ordered = 1;
5703 				}
5704 			} else {
5705 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5706 				    sizeof(struct sctp_strseq),
5707 				    (uint8_t *)&strseqbuf);
5708 				offset += sizeof(struct sctp_strseq);
5709 				if (stseq == NULL) {
5710 					break;
5711 				}
5712 				sid = ntohs(stseq->sid);
5713 				mid = (uint32_t)ntohs(stseq->ssn);
5714 				ordered = 1;
5715 			}
5716 			/* Convert */
5717 
5718 			/* now process */
5719 
5720 			/*
5721 			 * Ok we now look for the stream/seq on the read
5722 			 * queue where its not all delivered. If we find it
5723 			 * we transmute the read entry into a PDI_ABORTED.
5724 			 */
5725 			if (sid >= asoc->streamincnt) {
5726 				/* screwed up streams, stop!  */
5727 				break;
5728 			}
5729 			if ((asoc->str_of_pdapi == sid) &&
5730 			    (asoc->ssn_of_pdapi == mid)) {
5731 				/*
5732 				 * If this is the one we were partially
5733 				 * delivering now then we no longer are.
5734 				 * Note this will change with the reassembly
5735 				 * re-write.
5736 				 */
5737 				asoc->fragmented_delivery_inprogress = 0;
5738 			}
5739 			strm = &asoc->strmin[sid];
5740 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5741 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5742 			}
5743 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5744 				if ((control->sinfo_stream == sid) &&
5745 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5746 					str_seq = (sid << 16) | (0x0000ffff & mid);
5747 					control->pdapi_aborted = 1;
5748 					sv = stcb->asoc.control_pdapi;
5749 					control->end_added = 1;
5750 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5751 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5752 						if (asoc->size_on_all_streams >= control->length) {
5753 							asoc->size_on_all_streams -= control->length;
5754 						} else {
5755 #ifdef INVARIANTS
5756 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5757 #else
5758 							asoc->size_on_all_streams = 0;
5759 #endif
5760 						}
5761 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5762 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5763 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5764 #ifdef INVARIANTS
5765 					} else if (control->on_strm_q) {
5766 						panic("strm: %p ctl: %p unknown %d",
5767 						    strm, control, control->on_strm_q);
5768 #endif
5769 					}
5770 					control->on_strm_q = 0;
5771 					stcb->asoc.control_pdapi = control;
5772 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5773 					    stcb,
5774 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5775 					    (void *)&str_seq,
5776 					    SCTP_SO_NOT_LOCKED);
5777 					stcb->asoc.control_pdapi = sv;
5778 					break;
5779 				} else if ((control->sinfo_stream == sid) &&
5780 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5781 					/* We are past our victim SSN */
5782 					break;
5783 				}
5784 			}
5785 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5786 				/* Update the sequence number */
5787 				strm->last_mid_delivered = mid;
5788 			}
5789 			/* now kick the stream the new way */
5790 			/* sa_ignore NO_NULL_CHK */
5791 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5792 		}
5793 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5794 	}
5795 	/*
5796 	 * Now slide thing forward.
5797 	 */
5798 	sctp_slide_mapping_arrays(stcb);
5799 }
5800