xref: /freebsd/sys/netinet/sctp_indata.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 failed_build:
168 	return (read_queue_e);
169 }
170 
171 struct mbuf *
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
173 {
174 	struct sctp_extrcvinfo *seinfo;
175 	struct sctp_sndrcvinfo *outinfo;
176 	struct sctp_rcvinfo *rcvinfo;
177 	struct sctp_nxtinfo *nxtinfo;
178 	struct cmsghdr *cmh;
179 	struct mbuf *ret;
180 	int len;
181 	int use_extended;
182 	int provide_nxt;
183 
184 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 		/* user does not want any ancillary data */
188 		return (NULL);
189 	}
190 
191 	len = 0;
192 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 	}
195 	seinfo = (struct sctp_extrcvinfo *)sinfo;
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
198 		provide_nxt = 1;
199 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 	} else {
201 		provide_nxt = 0;
202 	}
203 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
205 			use_extended = 1;
206 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 		} else {
208 			use_extended = 0;
209 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 		}
211 	} else {
212 		use_extended = 0;
213 	}
214 
215 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 	if (ret == NULL) {
217 		/* No space */
218 		return (ret);
219 	}
220 	SCTP_BUF_LEN(ret) = 0;
221 
222 	/* We need a CMSG header followed by the struct */
223 	cmh = mtod(ret, struct cmsghdr *);
224 	/*
225 	 * Make sure that there is no un-initialized padding between the
226 	 * cmsg header and cmsg data and after the cmsg data.
227 	 */
228 	memset(cmh, 0, len);
229 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 		cmh->cmsg_level = IPPROTO_SCTP;
231 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 		cmh->cmsg_type = SCTP_RCVINFO;
233 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 		rcvinfo->rcv_context = sinfo->sinfo_context;
241 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 	}
245 	if (provide_nxt) {
246 		cmh->cmsg_level = IPPROTO_SCTP;
247 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 		cmh->cmsg_type = SCTP_NXTINFO;
249 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 		nxtinfo->nxt_flags = 0;
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 		}
261 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 	}
267 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 		cmh->cmsg_level = IPPROTO_SCTP;
269 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 		if (use_extended) {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 			cmh->cmsg_type = SCTP_EXTRCV;
273 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 		} else {
276 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 			cmh->cmsg_type = SCTP_SNDRCV;
278 			*outinfo = *sinfo;
279 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 		}
281 	}
282 	return (ret);
283 }
284 
285 
286 static void
287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
288 {
289 	uint32_t gap, i, cumackp1;
290 	int fnd = 0;
291 	int in_r = 0, in_nr = 0;
292 
293 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 		return;
295 	}
296 	cumackp1 = asoc->cumulative_tsn + 1;
297 	if (SCTP_TSN_GT(cumackp1, tsn)) {
298 		/*
299 		 * this tsn is behind the cum ack and thus we don't need to
300 		 * worry about it being moved from one to the other.
301 		 */
302 		return;
303 	}
304 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
305 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
306 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	if ((in_r == 0) && (in_nr == 0)) {
308 #ifdef INVARIANTS
309 		panic("Things are really messed up now");
310 #else
311 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
312 		sctp_print_mapping_array(asoc);
313 #endif
314 	}
315 	if (in_nr == 0)
316 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	if (in_r)
318 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
319 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
320 		asoc->highest_tsn_inside_nr_map = tsn;
321 	}
322 	if (tsn == asoc->highest_tsn_inside_map) {
323 		/* We must back down to see what the new highest is */
324 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 static int
339 sctp_place_control_in_stream(struct sctp_stream_in *strm,
340     struct sctp_association *asoc,
341     struct sctp_queued_to_read *control)
342 {
343 	struct sctp_queued_to_read *at;
344 	struct sctp_readhead *q;
345 	uint8_t flags, unordered;
346 
347 	flags = (control->sinfo_flags >> 8);
348 	unordered = flags & SCTP_DATA_UNORDERED;
349 	if (unordered) {
350 		q = &strm->uno_inqueue;
351 		if (asoc->idata_supported == 0) {
352 			if (!TAILQ_EMPTY(q)) {
353 				/*
354 				 * Only one stream can be here in old style
355 				 * -- abort
356 				 */
357 				return (-1);
358 			}
359 			TAILQ_INSERT_TAIL(q, control, next_instrm);
360 			control->on_strm_q = SCTP_ON_UNORDERED;
361 			return (0);
362 		}
363 	} else {
364 		q = &strm->inqueue;
365 	}
366 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
367 		control->end_added = 1;
368 		control->first_frag_seen = 1;
369 		control->last_frag_seen = 1;
370 	}
371 	if (TAILQ_EMPTY(q)) {
372 		/* Empty queue */
373 		TAILQ_INSERT_HEAD(q, control, next_instrm);
374 		if (unordered) {
375 			control->on_strm_q = SCTP_ON_UNORDERED;
376 		} else {
377 			control->on_strm_q = SCTP_ON_ORDERED;
378 		}
379 		return (0);
380 	} else {
381 		TAILQ_FOREACH(at, q, next_instrm) {
382 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * one in queue is bigger than the new one,
385 				 * insert before this one
386 				 */
387 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
388 				if (unordered) {
389 					control->on_strm_q = SCTP_ON_UNORDERED;
390 				} else {
391 					control->on_strm_q = SCTP_ON_ORDERED;
392 				}
393 				break;
394 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
395 				/*
396 				 * Gak, He sent me a duplicate msg id
397 				 * number?? return -1 to abort.
398 				 */
399 				return (-1);
400 			} else {
401 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
402 					/*
403 					 * We are at the end, insert it
404 					 * after this one
405 					 */
406 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
407 						sctp_log_strm_del(control, at,
408 						    SCTP_STR_LOG_FROM_INSERT_TL);
409 					}
410 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
411 					if (unordered) {
412 						control->on_strm_q = SCTP_ON_UNORDERED;
413 					} else {
414 						control->on_strm_q = SCTP_ON_ORDERED;
415 					}
416 					break;
417 				}
418 			}
419 		}
420 	}
421 	return (0);
422 }
423 
424 static void
425 sctp_abort_in_reasm(struct sctp_tcb *stcb,
426     struct sctp_queued_to_read *control,
427     struct sctp_tmit_chunk *chk,
428     int *abort_flag, int opspot)
429 {
430 	char msg[SCTP_DIAG_INFO_LEN];
431 	struct mbuf *oper;
432 
433 	if (stcb->asoc.idata_supported) {
434 		snprintf(msg, sizeof(msg),
435 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
436 		    opspot,
437 		    control->fsn_included,
438 		    chk->rec.data.tsn,
439 		    chk->rec.data.sid,
440 		    chk->rec.data.fsn, chk->rec.data.mid);
441 	} else {
442 		snprintf(msg, sizeof(msg),
443 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
444 		    opspot,
445 		    control->fsn_included,
446 		    chk->rec.data.tsn,
447 		    chk->rec.data.sid,
448 		    chk->rec.data.fsn,
449 		    (uint16_t)chk->rec.data.mid);
450 	}
451 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
452 	sctp_m_freem(chk->data);
453 	chk->data = NULL;
454 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
455 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
456 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
457 	*abort_flag = 1;
458 }
459 
460 static void
461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
462 {
463 	/*
464 	 * The control could not be placed and must be cleaned.
465 	 */
466 	struct sctp_tmit_chunk *chk, *nchk;
467 
468 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
469 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
470 		if (chk->data)
471 			sctp_m_freem(chk->data);
472 		chk->data = NULL;
473 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
474 	}
475 	sctp_free_a_readq(stcb, control);
476 }
477 
478 /*
479  * Queue the chunk either right into the socket buffer if it is the next one
480  * to go OR put it in the correct place in the delivery queue.  If we do
481  * append to the so_buf, keep doing so until we are out of order as
482  * long as the control's entered are non-fragmented.
483  */
484 static void
485 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
486     struct sctp_association *asoc,
487     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
488 {
489 	/*
490 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
491 	 * all the data in one stream this could happen quite rapidly. One
492 	 * could use the TSN to keep track of things, but this scheme breaks
493 	 * down in the other type of stream usage that could occur. Send a
494 	 * single msg to stream 0, send 4Billion messages to stream 1, now
495 	 * send a message to stream 0. You have a situation where the TSN
496 	 * has wrapped but not in the stream. Is this worth worrying about
497 	 * or should we just change our queue sort at the bottom to be by
498 	 * TSN.
499 	 *
500 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
501 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
502 	 * assignment this could happen... and I don't see how this would be
503 	 * a violation. So for now I am undecided an will leave the sort by
504 	 * SSN alone. Maybe a hybred approach is the answer
505 	 *
506 	 */
507 	struct sctp_queued_to_read *at;
508 	int queue_needed;
509 	uint32_t nxt_todel;
510 	struct mbuf *op_err;
511 	struct sctp_stream_in *strm;
512 	char msg[SCTP_DIAG_INFO_LEN];
513 
514 	strm = &asoc->strmin[control->sinfo_stream];
515 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
516 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
517 	}
518 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
519 		/* The incoming sseq is behind where we last delivered? */
520 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
521 		    strm->last_mid_delivered, control->mid);
522 		/*
523 		 * throw it in the stream so it gets cleaned up in
524 		 * association destruction
525 		 */
526 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
527 		if (asoc->idata_supported) {
528 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
529 			    strm->last_mid_delivered, control->sinfo_tsn,
530 			    control->sinfo_stream, control->mid);
531 		} else {
532 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
533 			    (uint16_t)strm->last_mid_delivered,
534 			    control->sinfo_tsn,
535 			    control->sinfo_stream,
536 			    (uint16_t)control->mid);
537 		}
538 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
539 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
540 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
541 		*abort_flag = 1;
542 		return;
543 
544 	}
545 	queue_needed = 1;
546 	asoc->size_on_all_streams += control->length;
547 	sctp_ucount_incr(asoc->cnt_on_all_streams);
548 	nxt_todel = strm->last_mid_delivered + 1;
549 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
551 		struct socket *so;
552 
553 		so = SCTP_INP_SO(stcb->sctp_ep);
554 		atomic_add_int(&stcb->asoc.refcnt, 1);
555 		SCTP_TCB_UNLOCK(stcb);
556 		SCTP_SOCKET_LOCK(so, 1);
557 		SCTP_TCB_LOCK(stcb);
558 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
559 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
560 			SCTP_SOCKET_UNLOCK(so, 1);
561 			return;
562 		}
563 #endif
564 		/* can be delivered right away? */
565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
567 		}
568 		/* EY it wont be queued if it could be delivered directly */
569 		queue_needed = 0;
570 		if (asoc->size_on_all_streams >= control->length) {
571 			asoc->size_on_all_streams -= control->length;
572 		} else {
573 #ifdef INVARIANTS
574 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
575 #else
576 			asoc->size_on_all_streams = 0;
577 #endif
578 		}
579 		sctp_ucount_decr(asoc->cnt_on_all_streams);
580 		strm->last_mid_delivered++;
581 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
582 		sctp_add_to_readq(stcb->sctp_ep, stcb,
583 		    control,
584 		    &stcb->sctp_socket->so_rcv, 1,
585 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
586 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
587 			/* all delivered */
588 			nxt_todel = strm->last_mid_delivered + 1;
589 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
590 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
591 				if (control->on_strm_q == SCTP_ON_ORDERED) {
592 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
593 					if (asoc->size_on_all_streams >= control->length) {
594 						asoc->size_on_all_streams -= control->length;
595 					} else {
596 #ifdef INVARIANTS
597 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
598 #else
599 						asoc->size_on_all_streams = 0;
600 #endif
601 					}
602 					sctp_ucount_decr(asoc->cnt_on_all_streams);
603 #ifdef INVARIANTS
604 				} else {
605 					panic("Huh control: %p is on_strm_q: %d",
606 					    control, control->on_strm_q);
607 #endif
608 				}
609 				control->on_strm_q = 0;
610 				strm->last_mid_delivered++;
611 				/*
612 				 * We ignore the return of deliver_data here
613 				 * since we always can hold the chunk on the
614 				 * d-queue. And we have a finite number that
615 				 * can be delivered from the strq.
616 				 */
617 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
618 					sctp_log_strm_del(control, NULL,
619 					    SCTP_STR_LOG_FROM_IMMED_DEL);
620 				}
621 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
622 				sctp_add_to_readq(stcb->sctp_ep, stcb,
623 				    control,
624 				    &stcb->sctp_socket->so_rcv, 1,
625 				    SCTP_READ_LOCK_NOT_HELD,
626 				    SCTP_SO_LOCKED);
627 				continue;
628 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
629 				*need_reasm = 1;
630 			}
631 			break;
632 		}
633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
634 		SCTP_SOCKET_UNLOCK(so, 1);
635 #endif
636 	}
637 	if (queue_needed) {
638 		/*
639 		 * Ok, we did not deliver this guy, find the correct place
640 		 * to put it on the queue.
641 		 */
642 		if (sctp_place_control_in_stream(strm, asoc, control)) {
643 			snprintf(msg, sizeof(msg),
644 			    "Queue to str MID: %u duplicate",
645 			    control->mid);
646 			sctp_clean_up_control(stcb, control);
647 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
648 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
649 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
650 			*abort_flag = 1;
651 		}
652 	}
653 }
654 
655 
656 static void
657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
658 {
659 	struct mbuf *m, *prev = NULL;
660 	struct sctp_tcb *stcb;
661 
662 	stcb = control->stcb;
663 	control->held_length = 0;
664 	control->length = 0;
665 	m = control->data;
666 	while (m) {
667 		if (SCTP_BUF_LEN(m) == 0) {
668 			/* Skip mbufs with NO length */
669 			if (prev == NULL) {
670 				/* First one */
671 				control->data = sctp_m_free(m);
672 				m = control->data;
673 			} else {
674 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
675 				m = SCTP_BUF_NEXT(prev);
676 			}
677 			if (m == NULL) {
678 				control->tail_mbuf = prev;
679 			}
680 			continue;
681 		}
682 		prev = m;
683 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
684 		if (control->on_read_q) {
685 			/*
686 			 * On read queue so we must increment the SB stuff,
687 			 * we assume caller has done any locks of SB.
688 			 */
689 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
690 		}
691 		m = SCTP_BUF_NEXT(m);
692 	}
693 	if (prev) {
694 		control->tail_mbuf = prev;
695 	}
696 }
697 
698 static void
699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
700 {
701 	struct mbuf *prev = NULL;
702 	struct sctp_tcb *stcb;
703 
704 	stcb = control->stcb;
705 	if (stcb == NULL) {
706 #ifdef INVARIANTS
707 		panic("Control broken");
708 #else
709 		return;
710 #endif
711 	}
712 	if (control->tail_mbuf == NULL) {
713 		/* TSNH */
714 		control->data = m;
715 		sctp_setup_tail_pointer(control);
716 		return;
717 	}
718 	control->tail_mbuf->m_next = m;
719 	while (m) {
720 		if (SCTP_BUF_LEN(m) == 0) {
721 			/* Skip mbufs with NO length */
722 			if (prev == NULL) {
723 				/* First one */
724 				control->tail_mbuf->m_next = sctp_m_free(m);
725 				m = control->tail_mbuf->m_next;
726 			} else {
727 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
728 				m = SCTP_BUF_NEXT(prev);
729 			}
730 			if (m == NULL) {
731 				control->tail_mbuf = prev;
732 			}
733 			continue;
734 		}
735 		prev = m;
736 		if (control->on_read_q) {
737 			/*
738 			 * On read queue so we must increment the SB stuff,
739 			 * we assume caller has done any locks of SB.
740 			 */
741 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
742 		}
743 		*added += SCTP_BUF_LEN(m);
744 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
745 		m = SCTP_BUF_NEXT(m);
746 	}
747 	if (prev) {
748 		control->tail_mbuf = prev;
749 	}
750 }
751 
752 static void
753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
754 {
755 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
756 	nc->sinfo_stream = control->sinfo_stream;
757 	nc->mid = control->mid;
758 	TAILQ_INIT(&nc->reasm);
759 	nc->top_fsn = control->top_fsn;
760 	nc->mid = control->mid;
761 	nc->sinfo_flags = control->sinfo_flags;
762 	nc->sinfo_ppid = control->sinfo_ppid;
763 	nc->sinfo_context = control->sinfo_context;
764 	nc->fsn_included = 0xffffffff;
765 	nc->sinfo_tsn = control->sinfo_tsn;
766 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
767 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
768 	nc->whoFrom = control->whoFrom;
769 	atomic_add_int(&nc->whoFrom->ref_count, 1);
770 	nc->stcb = control->stcb;
771 	nc->port_from = control->port_from;
772 }
773 
774 static void
775 sctp_reset_a_control(struct sctp_queued_to_read *control,
776     struct sctp_inpcb *inp, uint32_t tsn)
777 {
778 	control->fsn_included = tsn;
779 	if (control->on_read_q) {
780 		/*
781 		 * We have to purge it from there, hopefully this will work
782 		 * :-)
783 		 */
784 		TAILQ_REMOVE(&inp->read_queue, control, next);
785 		control->on_read_q = 0;
786 	}
787 }
788 
789 static int
790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
791     struct sctp_association *asoc,
792     struct sctp_stream_in *strm,
793     struct sctp_queued_to_read *control,
794     uint32_t pd_point,
795     int inp_read_lock_held)
796 {
797 	/*
798 	 * Special handling for the old un-ordered data chunk. All the
799 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
800 	 * to see if we have it all. If you return one, no other control
801 	 * entries on the un-ordered queue will be looked at. In theory
802 	 * there should be no others entries in reality, unless the guy is
803 	 * sending both unordered NDATA and unordered DATA...
804 	 */
805 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
806 	uint32_t fsn;
807 	struct sctp_queued_to_read *nc;
808 	int cnt_added;
809 
810 	if (control->first_frag_seen == 0) {
811 		/* Nothing we can do, we have not seen the first piece yet */
812 		return (1);
813 	}
814 	/* Collapse any we can */
815 	cnt_added = 0;
816 restart:
817 	fsn = control->fsn_included + 1;
818 	/* Now what can we add? */
819 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
820 		if (chk->rec.data.fsn == fsn) {
821 			/* Ok lets add it */
822 			sctp_alloc_a_readq(stcb, nc);
823 			if (nc == NULL) {
824 				break;
825 			}
826 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
827 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
828 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
829 			fsn++;
830 			cnt_added++;
831 			chk = NULL;
832 			if (control->end_added) {
833 				/* We are done */
834 				if (!TAILQ_EMPTY(&control->reasm)) {
835 					/*
836 					 * Ok we have to move anything left
837 					 * on the control queue to a new
838 					 * control.
839 					 */
840 					sctp_build_readq_entry_from_ctl(nc, control);
841 					tchk = TAILQ_FIRST(&control->reasm);
842 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
843 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
844 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
845 							asoc->size_on_reasm_queue -= tchk->send_size;
846 						} else {
847 #ifdef INVARIANTS
848 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
849 #else
850 							asoc->size_on_reasm_queue = 0;
851 #endif
852 						}
853 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
854 						nc->first_frag_seen = 1;
855 						nc->fsn_included = tchk->rec.data.fsn;
856 						nc->data = tchk->data;
857 						nc->sinfo_ppid = tchk->rec.data.ppid;
858 						nc->sinfo_tsn = tchk->rec.data.tsn;
859 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
860 						tchk->data = NULL;
861 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
862 						sctp_setup_tail_pointer(nc);
863 						tchk = TAILQ_FIRST(&control->reasm);
864 					}
865 					/* Spin the rest onto the queue */
866 					while (tchk) {
867 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
868 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
869 						tchk = TAILQ_FIRST(&control->reasm);
870 					}
871 					/*
872 					 * Now lets add it to the queue
873 					 * after removing control
874 					 */
875 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
876 					nc->on_strm_q = SCTP_ON_UNORDERED;
877 					if (control->on_strm_q) {
878 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
879 						control->on_strm_q = 0;
880 					}
881 				}
882 				if (control->pdapi_started) {
883 					strm->pd_api_started = 0;
884 					control->pdapi_started = 0;
885 				}
886 				if (control->on_strm_q) {
887 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 					control->on_strm_q = 0;
889 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
890 				}
891 				if (control->on_read_q == 0) {
892 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
893 					    &stcb->sctp_socket->so_rcv, control->end_added,
894 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
895 				}
896 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
897 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
898 					/*
899 					 * Switch to the new guy and
900 					 * continue
901 					 */
902 					control = nc;
903 					goto restart;
904 				} else {
905 					if (nc->on_strm_q == 0) {
906 						sctp_free_a_readq(stcb, nc);
907 					}
908 				}
909 				return (1);
910 			} else {
911 				sctp_free_a_readq(stcb, nc);
912 			}
913 		} else {
914 			/* Can't add more */
915 			break;
916 		}
917 	}
918 	if (cnt_added && strm->pd_api_started) {
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 	}
921 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
922 		strm->pd_api_started = 1;
923 		control->pdapi_started = 1;
924 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
925 		    &stcb->sctp_socket->so_rcv, control->end_added,
926 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
927 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
928 		return (0);
929 	} else {
930 		return (1);
931 	}
932 }
933 
934 static void
935 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
936     struct sctp_association *asoc,
937     struct sctp_queued_to_read *control,
938     struct sctp_tmit_chunk *chk,
939     int *abort_flag)
940 {
941 	struct sctp_tmit_chunk *at;
942 	int inserted;
943 
944 	/*
945 	 * Here we need to place the chunk into the control structure sorted
946 	 * in the correct order.
947 	 */
948 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
949 		/* Its the very first one. */
950 		SCTPDBG(SCTP_DEBUG_XXX,
951 		    "chunk is a first fsn: %u becomes fsn_included\n",
952 		    chk->rec.data.fsn);
953 		at = TAILQ_FIRST(&control->reasm);
954 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
955 			/*
956 			 * The first chunk in the reassembly is a smaller
957 			 * TSN than this one, even though this has a first,
958 			 * it must be from a subsequent msg.
959 			 */
960 			goto place_chunk;
961 		}
962 		if (control->first_frag_seen) {
963 			/*
964 			 * In old un-ordered we can reassembly on one
965 			 * control multiple messages. As long as the next
966 			 * FIRST is greater then the old first (TSN i.e. FSN
967 			 * wise)
968 			 */
969 			struct mbuf *tdata;
970 			uint32_t tmp;
971 
972 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
973 				/*
974 				 * Easy way the start of a new guy beyond
975 				 * the lowest
976 				 */
977 				goto place_chunk;
978 			}
979 			if ((chk->rec.data.fsn == control->fsn_included) ||
980 			    (control->pdapi_started)) {
981 				/*
982 				 * Ok this should not happen, if it does we
983 				 * started the pd-api on the higher TSN
984 				 * (since the equals part is a TSN failure
985 				 * it must be that).
986 				 *
987 				 * We are completly hosed in that case since
988 				 * I have no way to recover. This really
989 				 * will only happen if we can get more TSN's
990 				 * higher before the pd-api-point.
991 				 */
992 				sctp_abort_in_reasm(stcb, control, chk,
993 				    abort_flag,
994 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
995 
996 				return;
997 			}
998 			/*
999 			 * Ok we have two firsts and the one we just got is
1000 			 * smaller than the one we previously placed.. yuck!
1001 			 * We must swap them out.
1002 			 */
1003 			/* swap the mbufs */
1004 			tdata = control->data;
1005 			control->data = chk->data;
1006 			chk->data = tdata;
1007 			/* Save the lengths */
1008 			chk->send_size = control->length;
1009 			/* Recompute length of control and tail pointer */
1010 			sctp_setup_tail_pointer(control);
1011 			/* Fix the FSN included */
1012 			tmp = control->fsn_included;
1013 			control->fsn_included = chk->rec.data.fsn;
1014 			chk->rec.data.fsn = tmp;
1015 			/* Fix the TSN included */
1016 			tmp = control->sinfo_tsn;
1017 			control->sinfo_tsn = chk->rec.data.tsn;
1018 			chk->rec.data.tsn = tmp;
1019 			/* Fix the PPID included */
1020 			tmp = control->sinfo_ppid;
1021 			control->sinfo_ppid = chk->rec.data.ppid;
1022 			chk->rec.data.ppid = tmp;
1023 			/* Fix tail pointer */
1024 			goto place_chunk;
1025 		}
1026 		control->first_frag_seen = 1;
1027 		control->fsn_included = chk->rec.data.fsn;
1028 		control->top_fsn = chk->rec.data.fsn;
1029 		control->sinfo_tsn = chk->rec.data.tsn;
1030 		control->sinfo_ppid = chk->rec.data.ppid;
1031 		control->data = chk->data;
1032 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1033 		chk->data = NULL;
1034 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1035 		sctp_setup_tail_pointer(control);
1036 		return;
1037 	}
1038 place_chunk:
1039 	inserted = 0;
1040 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1041 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1042 			/*
1043 			 * This one in queue is bigger than the new one,
1044 			 * insert the new one before at.
1045 			 */
1046 			asoc->size_on_reasm_queue += chk->send_size;
1047 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 			inserted = 1;
1049 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1050 			break;
1051 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1052 			/*
1053 			 * They sent a duplicate fsn number. This really
1054 			 * should not happen since the FSN is a TSN and it
1055 			 * should have been dropped earlier.
1056 			 */
1057 			sctp_abort_in_reasm(stcb, control, chk,
1058 			    abort_flag,
1059 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1060 			return;
1061 		}
1062 
1063 	}
1064 	if (inserted == 0) {
1065 		/* Its at the end */
1066 		asoc->size_on_reasm_queue += chk->send_size;
1067 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1068 		control->top_fsn = chk->rec.data.fsn;
1069 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1070 	}
1071 }
1072 
1073 static int
1074 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1075     struct sctp_stream_in *strm, int inp_read_lock_held)
1076 {
1077 	/*
1078 	 * Given a stream, strm, see if any of the SSN's on it that are
1079 	 * fragmented are ready to deliver. If so go ahead and place them on
1080 	 * the read queue. In so placing if we have hit the end, then we
1081 	 * need to remove them from the stream's queue.
1082 	 */
1083 	struct sctp_queued_to_read *control, *nctl = NULL;
1084 	uint32_t next_to_del;
1085 	uint32_t pd_point;
1086 	int ret = 0;
1087 
1088 	if (stcb->sctp_socket) {
1089 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1090 		    stcb->sctp_ep->partial_delivery_point);
1091 	} else {
1092 		pd_point = stcb->sctp_ep->partial_delivery_point;
1093 	}
1094 	control = TAILQ_FIRST(&strm->uno_inqueue);
1095 
1096 	if ((control != NULL) &&
1097 	    (asoc->idata_supported == 0)) {
1098 		/* Special handling needed for "old" data format */
1099 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1100 			goto done_un;
1101 		}
1102 	}
1103 	if (strm->pd_api_started) {
1104 		/* Can't add more */
1105 		return (0);
1106 	}
1107 	while (control) {
1108 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1109 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1110 		nctl = TAILQ_NEXT(control, next_instrm);
1111 		if (control->end_added) {
1112 			/* We just put the last bit on */
1113 			if (control->on_strm_q) {
1114 #ifdef INVARIANTS
1115 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1116 					panic("Huh control: %p on_q: %d -- not unordered?",
1117 					    control, control->on_strm_q);
1118 				}
1119 #endif
1120 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1121 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1122 				control->on_strm_q = 0;
1123 			}
1124 			if (control->on_read_q == 0) {
1125 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1126 				    control,
1127 				    &stcb->sctp_socket->so_rcv, control->end_added,
1128 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1129 			}
1130 		} else {
1131 			/* Can we do a PD-API for this un-ordered guy? */
1132 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1133 				strm->pd_api_started = 1;
1134 				control->pdapi_started = 1;
1135 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1136 				    control,
1137 				    &stcb->sctp_socket->so_rcv, control->end_added,
1138 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1139 
1140 				break;
1141 			}
1142 		}
1143 		control = nctl;
1144 	}
1145 done_un:
1146 	control = TAILQ_FIRST(&strm->inqueue);
1147 	if (strm->pd_api_started) {
1148 		/* Can't add more */
1149 		return (0);
1150 	}
1151 	if (control == NULL) {
1152 		return (ret);
1153 	}
1154 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1155 		/*
1156 		 * Ok the guy at the top was being partially delivered
1157 		 * completed, so we remove it. Note the pd_api flag was
1158 		 * taken off when the chunk was merged on in
1159 		 * sctp_queue_data_for_reasm below.
1160 		 */
1161 		nctl = TAILQ_NEXT(control, next_instrm);
1162 		SCTPDBG(SCTP_DEBUG_XXX,
1163 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1164 		    control, control->end_added, control->mid,
1165 		    control->top_fsn, control->fsn_included,
1166 		    strm->last_mid_delivered);
1167 		if (control->end_added) {
1168 			if (control->on_strm_q) {
1169 #ifdef INVARIANTS
1170 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1171 					panic("Huh control: %p on_q: %d -- not ordered?",
1172 					    control, control->on_strm_q);
1173 				}
1174 #endif
1175 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1176 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1177 				if (asoc->size_on_all_streams >= control->length) {
1178 					asoc->size_on_all_streams -= control->length;
1179 				} else {
1180 #ifdef INVARIANTS
1181 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1182 #else
1183 					asoc->size_on_all_streams = 0;
1184 #endif
1185 				}
1186 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1187 				control->on_strm_q = 0;
1188 			}
1189 			if (strm->pd_api_started && control->pdapi_started) {
1190 				control->pdapi_started = 0;
1191 				strm->pd_api_started = 0;
1192 			}
1193 			if (control->on_read_q == 0) {
1194 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1195 				    control,
1196 				    &stcb->sctp_socket->so_rcv, control->end_added,
1197 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1198 			}
1199 			control = nctl;
1200 		}
1201 	}
1202 	if (strm->pd_api_started) {
1203 		/*
1204 		 * Can't add more must have gotten an un-ordered above being
1205 		 * partially delivered.
1206 		 */
1207 		return (0);
1208 	}
1209 deliver_more:
1210 	next_to_del = strm->last_mid_delivered + 1;
1211 	if (control) {
1212 		SCTPDBG(SCTP_DEBUG_XXX,
1213 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1214 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1215 		    next_to_del);
1216 		nctl = TAILQ_NEXT(control, next_instrm);
1217 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1218 		    (control->first_frag_seen)) {
1219 			int done;
1220 
1221 			/* Ok we can deliver it onto the stream. */
1222 			if (control->end_added) {
1223 				/* We are done with it afterwards */
1224 				if (control->on_strm_q) {
1225 #ifdef INVARIANTS
1226 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1227 						panic("Huh control: %p on_q: %d -- not ordered?",
1228 						    control, control->on_strm_q);
1229 					}
1230 #endif
1231 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1232 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1233 					if (asoc->size_on_all_streams >= control->length) {
1234 						asoc->size_on_all_streams -= control->length;
1235 					} else {
1236 #ifdef INVARIANTS
1237 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1238 #else
1239 						asoc->size_on_all_streams = 0;
1240 #endif
1241 					}
1242 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1243 					control->on_strm_q = 0;
1244 				}
1245 				ret++;
1246 			}
1247 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1248 				/*
1249 				 * A singleton now slipping through - mark
1250 				 * it non-revokable too
1251 				 */
1252 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1253 			} else if (control->end_added == 0) {
1254 				/*
1255 				 * Check if we can defer adding until its
1256 				 * all there
1257 				 */
1258 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1259 					/*
1260 					 * Don't need it or cannot add more
1261 					 * (one being delivered that way)
1262 					 */
1263 					goto out;
1264 				}
1265 			}
1266 			done = (control->end_added) && (control->last_frag_seen);
1267 			if (control->on_read_q == 0) {
1268 				if (!done) {
1269 					if (asoc->size_on_all_streams >= control->length) {
1270 						asoc->size_on_all_streams -= control->length;
1271 					} else {
1272 #ifdef INVARIANTS
1273 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1274 #else
1275 						asoc->size_on_all_streams = 0;
1276 #endif
1277 					}
1278 					strm->pd_api_started = 1;
1279 					control->pdapi_started = 1;
1280 				}
1281 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1282 				    control,
1283 				    &stcb->sctp_socket->so_rcv, control->end_added,
1284 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1285 			}
1286 			strm->last_mid_delivered = next_to_del;
1287 			if (done) {
1288 				control = nctl;
1289 				goto deliver_more;
1290 			}
1291 		}
1292 	}
1293 out:
1294 	return (ret);
1295 }
1296 
1297 
1298 uint32_t
1299 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1300     struct sctp_stream_in *strm,
1301     struct sctp_tcb *stcb, struct sctp_association *asoc,
1302     struct sctp_tmit_chunk *chk, int hold_rlock)
1303 {
1304 	/*
1305 	 * Given a control and a chunk, merge the data from the chk onto the
1306 	 * control and free up the chunk resources.
1307 	 */
1308 	uint32_t added = 0;
1309 	int i_locked = 0;
1310 
1311 	if (control->on_read_q && (hold_rlock == 0)) {
1312 		/*
1313 		 * Its being pd-api'd so we must do some locks.
1314 		 */
1315 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1316 		i_locked = 1;
1317 	}
1318 	if (control->data == NULL) {
1319 		control->data = chk->data;
1320 		sctp_setup_tail_pointer(control);
1321 	} else {
1322 		sctp_add_to_tail_pointer(control, chk->data, &added);
1323 	}
1324 	control->fsn_included = chk->rec.data.fsn;
1325 	asoc->size_on_reasm_queue -= chk->send_size;
1326 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1327 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1328 	chk->data = NULL;
1329 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1330 		control->first_frag_seen = 1;
1331 		control->sinfo_tsn = chk->rec.data.tsn;
1332 		control->sinfo_ppid = chk->rec.data.ppid;
1333 	}
1334 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1335 		/* Its complete */
1336 		if ((control->on_strm_q) && (control->on_read_q)) {
1337 			if (control->pdapi_started) {
1338 				control->pdapi_started = 0;
1339 				strm->pd_api_started = 0;
1340 			}
1341 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1342 				/* Unordered */
1343 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1344 				control->on_strm_q = 0;
1345 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1346 				/* Ordered */
1347 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1348 				/*
1349 				 * Don't need to decrement
1350 				 * size_on_all_streams, since control is on
1351 				 * the read queue.
1352 				 */
1353 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1354 				control->on_strm_q = 0;
1355 #ifdef INVARIANTS
1356 			} else if (control->on_strm_q) {
1357 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1358 				    control->on_strm_q);
1359 #endif
1360 			}
1361 		}
1362 		control->end_added = 1;
1363 		control->last_frag_seen = 1;
1364 	}
1365 	if (i_locked) {
1366 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1367 	}
1368 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1369 	return (added);
1370 }
1371 
1372 /*
1373  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1374  * queue, see if anthing can be delivered. If so pull it off (or as much as
1375  * we can. If we run out of space then we must dump what we can and set the
1376  * appropriate flag to say we queued what we could.
1377  */
1378 static void
1379 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1380     struct sctp_queued_to_read *control,
1381     struct sctp_tmit_chunk *chk,
1382     int created_control,
1383     int *abort_flag, uint32_t tsn)
1384 {
1385 	uint32_t next_fsn;
1386 	struct sctp_tmit_chunk *at, *nat;
1387 	struct sctp_stream_in *strm;
1388 	int do_wakeup, unordered;
1389 	uint32_t lenadded;
1390 
1391 	strm = &asoc->strmin[control->sinfo_stream];
1392 	/*
1393 	 * For old un-ordered data chunks.
1394 	 */
1395 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1396 		unordered = 1;
1397 	} else {
1398 		unordered = 0;
1399 	}
1400 	/* Must be added to the stream-in queue */
1401 	if (created_control) {
1402 		if (unordered == 0) {
1403 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1404 		}
1405 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1406 			/* Duplicate SSN? */
1407 			sctp_abort_in_reasm(stcb, control, chk,
1408 			    abort_flag,
1409 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1410 			sctp_clean_up_control(stcb, control);
1411 			return;
1412 		}
1413 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1414 			/*
1415 			 * Ok we created this control and now lets validate
1416 			 * that its legal i.e. there is a B bit set, if not
1417 			 * and we have up to the cum-ack then its invalid.
1418 			 */
1419 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1420 				sctp_abort_in_reasm(stcb, control, chk,
1421 				    abort_flag,
1422 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1423 				return;
1424 			}
1425 		}
1426 	}
1427 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1428 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1429 		return;
1430 	}
1431 	/*
1432 	 * Ok we must queue the chunk into the reasembly portion: o if its
1433 	 * the first it goes to the control mbuf. o if its not first but the
1434 	 * next in sequence it goes to the control, and each succeeding one
1435 	 * in order also goes. o if its not in order we place it on the list
1436 	 * in its place.
1437 	 */
1438 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1439 		/* Its the very first one. */
1440 		SCTPDBG(SCTP_DEBUG_XXX,
1441 		    "chunk is a first fsn: %u becomes fsn_included\n",
1442 		    chk->rec.data.fsn);
1443 		if (control->first_frag_seen) {
1444 			/*
1445 			 * Error on senders part, they either sent us two
1446 			 * data chunks with FIRST, or they sent two
1447 			 * un-ordered chunks that were fragmented at the
1448 			 * same time in the same stream.
1449 			 */
1450 			sctp_abort_in_reasm(stcb, control, chk,
1451 			    abort_flag,
1452 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1453 			return;
1454 		}
1455 		control->first_frag_seen = 1;
1456 		control->sinfo_ppid = chk->rec.data.ppid;
1457 		control->sinfo_tsn = chk->rec.data.tsn;
1458 		control->fsn_included = chk->rec.data.fsn;
1459 		control->data = chk->data;
1460 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1461 		chk->data = NULL;
1462 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1463 		sctp_setup_tail_pointer(control);
1464 		asoc->size_on_all_streams += control->length;
1465 	} else {
1466 		/* Place the chunk in our list */
1467 		int inserted = 0;
1468 
1469 		if (control->last_frag_seen == 0) {
1470 			/* Still willing to raise highest FSN seen */
1471 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1472 				SCTPDBG(SCTP_DEBUG_XXX,
1473 				    "We have a new top_fsn: %u\n",
1474 				    chk->rec.data.fsn);
1475 				control->top_fsn = chk->rec.data.fsn;
1476 			}
1477 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 				SCTPDBG(SCTP_DEBUG_XXX,
1479 				    "The last fsn is now in place fsn: %u\n",
1480 				    chk->rec.data.fsn);
1481 				control->last_frag_seen = 1;
1482 			}
1483 			if (asoc->idata_supported || control->first_frag_seen) {
1484 				/*
1485 				 * For IDATA we always check since we know
1486 				 * that the first fragment is 0. For old
1487 				 * DATA we have to receive the first before
1488 				 * we know the first FSN (which is the TSN).
1489 				 */
1490 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1491 					/*
1492 					 * We have already delivered up to
1493 					 * this so its a dup
1494 					 */
1495 					sctp_abort_in_reasm(stcb, control, chk,
1496 					    abort_flag,
1497 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1498 					return;
1499 				}
1500 			}
1501 		} else {
1502 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1503 				/* Second last? huh? */
1504 				SCTPDBG(SCTP_DEBUG_XXX,
1505 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1506 				    chk->rec.data.fsn, control->top_fsn);
1507 				sctp_abort_in_reasm(stcb, control,
1508 				    chk, abort_flag,
1509 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510 				return;
1511 			}
1512 			if (asoc->idata_supported || control->first_frag_seen) {
1513 				/*
1514 				 * For IDATA we always check since we know
1515 				 * that the first fragment is 0. For old
1516 				 * DATA we have to receive the first before
1517 				 * we know the first FSN (which is the TSN).
1518 				 */
1519 
1520 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1521 					/*
1522 					 * We have already delivered up to
1523 					 * this so its a dup
1524 					 */
1525 					SCTPDBG(SCTP_DEBUG_XXX,
1526 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1527 					    chk->rec.data.fsn, control->fsn_included);
1528 					sctp_abort_in_reasm(stcb, control, chk,
1529 					    abort_flag,
1530 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1531 					return;
1532 				}
1533 			}
1534 			/*
1535 			 * validate not beyond top FSN if we have seen last
1536 			 * one
1537 			 */
1538 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1539 				SCTPDBG(SCTP_DEBUG_XXX,
1540 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1541 				    chk->rec.data.fsn,
1542 				    control->top_fsn);
1543 				sctp_abort_in_reasm(stcb, control, chk,
1544 				    abort_flag,
1545 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1546 				return;
1547 			}
1548 		}
1549 		/*
1550 		 * If we reach here, we need to place the new chunk in the
1551 		 * reassembly for this control.
1552 		 */
1553 		SCTPDBG(SCTP_DEBUG_XXX,
1554 		    "chunk is a not first fsn: %u needs to be inserted\n",
1555 		    chk->rec.data.fsn);
1556 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1557 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1558 				/*
1559 				 * This one in queue is bigger than the new
1560 				 * one, insert the new one before at.
1561 				 */
1562 				SCTPDBG(SCTP_DEBUG_XXX,
1563 				    "Insert it before fsn: %u\n",
1564 				    at->rec.data.fsn);
1565 				asoc->size_on_reasm_queue += chk->send_size;
1566 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1567 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1568 				inserted = 1;
1569 				break;
1570 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1571 				/*
1572 				 * Gak, He sent me a duplicate str seq
1573 				 * number
1574 				 */
1575 				/*
1576 				 * foo bar, I guess I will just free this
1577 				 * new guy, should we abort too? FIX ME
1578 				 * MAYBE? Or it COULD be that the SSN's have
1579 				 * wrapped. Maybe I should compare to TSN
1580 				 * somehow... sigh for now just blow away
1581 				 * the chunk!
1582 				 */
1583 				SCTPDBG(SCTP_DEBUG_XXX,
1584 				    "Duplicate to fsn: %u -- abort\n",
1585 				    at->rec.data.fsn);
1586 				sctp_abort_in_reasm(stcb, control,
1587 				    chk, abort_flag,
1588 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1589 				return;
1590 			}
1591 		}
1592 		if (inserted == 0) {
1593 			/* Goes on the end */
1594 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1595 			    chk->rec.data.fsn);
1596 			asoc->size_on_reasm_queue += chk->send_size;
1597 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1598 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1599 		}
1600 	}
1601 	/*
1602 	 * Ok lets see if we can suck any up into the control structure that
1603 	 * are in seq if it makes sense.
1604 	 */
1605 	do_wakeup = 0;
1606 	/*
1607 	 * If the first fragment has not been seen there is no sense in
1608 	 * looking.
1609 	 */
1610 	if (control->first_frag_seen) {
1611 		next_fsn = control->fsn_included + 1;
1612 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1613 			if (at->rec.data.fsn == next_fsn) {
1614 				/* We can add this one now to the control */
1615 				SCTPDBG(SCTP_DEBUG_XXX,
1616 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1617 				    control, at,
1618 				    at->rec.data.fsn,
1619 				    next_fsn, control->fsn_included);
1620 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1621 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1622 				if (control->on_read_q) {
1623 					do_wakeup = 1;
1624 				} else {
1625 					/*
1626 					 * We only add to the
1627 					 * size-on-all-streams if its not on
1628 					 * the read q. The read q flag will
1629 					 * cause a sballoc so its accounted
1630 					 * for there.
1631 					 */
1632 					asoc->size_on_all_streams += lenadded;
1633 				}
1634 				next_fsn++;
1635 				if (control->end_added && control->pdapi_started) {
1636 					if (strm->pd_api_started) {
1637 						strm->pd_api_started = 0;
1638 						control->pdapi_started = 0;
1639 					}
1640 					if (control->on_read_q == 0) {
1641 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1642 						    control,
1643 						    &stcb->sctp_socket->so_rcv, control->end_added,
1644 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1645 					}
1646 					break;
1647 				}
1648 			} else {
1649 				break;
1650 			}
1651 		}
1652 	}
1653 	if (do_wakeup) {
1654 		/* Need to wakeup the reader */
1655 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1656 	}
1657 }
1658 
1659 static struct sctp_queued_to_read *
1660 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1661 {
1662 	struct sctp_queued_to_read *control;
1663 
1664 	if (ordered) {
1665 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1666 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1667 				break;
1668 			}
1669 		}
1670 	} else {
1671 		if (idata_supported) {
1672 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1673 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1674 					break;
1675 				}
1676 			}
1677 		} else {
1678 			control = TAILQ_FIRST(&strm->uno_inqueue);
1679 		}
1680 	}
1681 	return (control);
1682 }
1683 
1684 static int
1685 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1686     struct mbuf **m, int offset, int chk_length,
1687     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1688     int *break_flag, int last_chunk, uint8_t chk_type)
1689 {
1690 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1691 	uint32_t tsn, fsn, gap, mid;
1692 	struct mbuf *dmbuf;
1693 	int the_len;
1694 	int need_reasm_check = 0;
1695 	uint16_t sid;
1696 	struct mbuf *op_err;
1697 	char msg[SCTP_DIAG_INFO_LEN];
1698 	struct sctp_queued_to_read *control, *ncontrol;
1699 	uint32_t ppid;
1700 	uint8_t chk_flags;
1701 	struct sctp_stream_reset_list *liste;
1702 	int ordered;
1703 	size_t clen;
1704 	int created_control = 0;
1705 
1706 	if (chk_type == SCTP_IDATA) {
1707 		struct sctp_idata_chunk *chunk, chunk_buf;
1708 
1709 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1710 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1711 		chk_flags = chunk->ch.chunk_flags;
1712 		clen = sizeof(struct sctp_idata_chunk);
1713 		tsn = ntohl(chunk->dp.tsn);
1714 		sid = ntohs(chunk->dp.sid);
1715 		mid = ntohl(chunk->dp.mid);
1716 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1717 			fsn = 0;
1718 			ppid = chunk->dp.ppid_fsn.ppid;
1719 		} else {
1720 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1721 			ppid = 0xffffffff;	/* Use as an invalid value. */
1722 		}
1723 	} else {
1724 		struct sctp_data_chunk *chunk, chunk_buf;
1725 
1726 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1727 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1728 		chk_flags = chunk->ch.chunk_flags;
1729 		clen = sizeof(struct sctp_data_chunk);
1730 		tsn = ntohl(chunk->dp.tsn);
1731 		sid = ntohs(chunk->dp.sid);
1732 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1733 		fsn = tsn;
1734 		ppid = chunk->dp.ppid;
1735 	}
1736 	if ((size_t)chk_length == clen) {
1737 		/*
1738 		 * Need to send an abort since we had a empty data chunk.
1739 		 */
1740 		op_err = sctp_generate_no_user_data_cause(tsn);
1741 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1742 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1743 		*abort_flag = 1;
1744 		return (0);
1745 	}
1746 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1747 		asoc->send_sack = 1;
1748 	}
1749 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1750 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1751 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1752 	}
1753 	if (stcb == NULL) {
1754 		return (0);
1755 	}
1756 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1757 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1758 		/* It is a duplicate */
1759 		SCTP_STAT_INCR(sctps_recvdupdata);
1760 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1761 			/* Record a dup for the next outbound sack */
1762 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1763 			asoc->numduptsns++;
1764 		}
1765 		asoc->send_sack = 1;
1766 		return (0);
1767 	}
1768 	/* Calculate the number of TSN's between the base and this TSN */
1769 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1770 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1771 		/* Can't hold the bit in the mapping at max array, toss it */
1772 		return (0);
1773 	}
1774 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1775 		SCTP_TCB_LOCK_ASSERT(stcb);
1776 		if (sctp_expand_mapping_array(asoc, gap)) {
1777 			/* Can't expand, drop it */
1778 			return (0);
1779 		}
1780 	}
1781 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1782 		*high_tsn = tsn;
1783 	}
1784 	/* See if we have received this one already */
1785 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1786 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1787 		SCTP_STAT_INCR(sctps_recvdupdata);
1788 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1789 			/* Record a dup for the next outbound sack */
1790 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1791 			asoc->numduptsns++;
1792 		}
1793 		asoc->send_sack = 1;
1794 		return (0);
1795 	}
1796 	/*
1797 	 * Check to see about the GONE flag, duplicates would cause a sack
1798 	 * to be sent up above
1799 	 */
1800 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1801 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1802 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1803 		/*
1804 		 * wait a minute, this guy is gone, there is no longer a
1805 		 * receiver. Send peer an ABORT!
1806 		 */
1807 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1808 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1809 		*abort_flag = 1;
1810 		return (0);
1811 	}
1812 	/*
1813 	 * Now before going further we see if there is room. If NOT then we
1814 	 * MAY let one through only IF this TSN is the one we are waiting
1815 	 * for on a partial delivery API.
1816 	 */
1817 
1818 	/* Is the stream valid? */
1819 	if (sid >= asoc->streamincnt) {
1820 		struct sctp_error_invalid_stream *cause;
1821 
1822 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1823 		    0, M_NOWAIT, 1, MT_DATA);
1824 		if (op_err != NULL) {
1825 			/* add some space up front so prepend will work well */
1826 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1827 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1828 			/*
1829 			 * Error causes are just param's and this one has
1830 			 * two back to back phdr, one with the error type
1831 			 * and size, the other with the streamid and a rsvd
1832 			 */
1833 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1834 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1835 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1836 			cause->stream_id = htons(sid);
1837 			cause->reserved = htons(0);
1838 			sctp_queue_op_err(stcb, op_err);
1839 		}
1840 		SCTP_STAT_INCR(sctps_badsid);
1841 		SCTP_TCB_LOCK_ASSERT(stcb);
1842 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1843 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1844 			asoc->highest_tsn_inside_nr_map = tsn;
1845 		}
1846 		if (tsn == (asoc->cumulative_tsn + 1)) {
1847 			/* Update cum-ack */
1848 			asoc->cumulative_tsn = tsn;
1849 		}
1850 		return (0);
1851 	}
1852 	/*
1853 	 * If its a fragmented message, lets see if we can find the control
1854 	 * on the reassembly queues.
1855 	 */
1856 	if ((chk_type == SCTP_IDATA) &&
1857 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1858 	    (fsn == 0)) {
1859 		/*
1860 		 * The first *must* be fsn 0, and other (middle/end) pieces
1861 		 * can *not* be fsn 0. XXX: This can happen in case of a
1862 		 * wrap around. Ignore is for now.
1863 		 */
1864 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1865 		    mid, chk_flags);
1866 		goto err_out;
1867 	}
1868 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1869 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1870 	    chk_flags, control);
1871 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1872 		/* See if we can find the re-assembly entity */
1873 		if (control != NULL) {
1874 			/* We found something, does it belong? */
1875 			if (ordered && (mid != control->mid)) {
1876 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1877 		err_out:
1878 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1879 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1880 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1881 				*abort_flag = 1;
1882 				return (0);
1883 			}
1884 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1885 				/*
1886 				 * We can't have a switched order with an
1887 				 * unordered chunk
1888 				 */
1889 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1890 				    tsn);
1891 				goto err_out;
1892 			}
1893 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1894 				/*
1895 				 * We can't have a switched unordered with a
1896 				 * ordered chunk
1897 				 */
1898 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1899 				    tsn);
1900 				goto err_out;
1901 			}
1902 		}
1903 	} else {
1904 		/*
1905 		 * Its a complete segment. Lets validate we don't have a
1906 		 * re-assembly going on with the same Stream/Seq (for
1907 		 * ordered) or in the same Stream for unordered.
1908 		 */
1909 		if (control != NULL) {
1910 			if (ordered || asoc->idata_supported) {
1911 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1912 				    chk_flags, mid);
1913 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1914 				goto err_out;
1915 			} else {
1916 				if ((tsn == control->fsn_included + 1) &&
1917 				    (control->end_added == 0)) {
1918 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1919 					goto err_out;
1920 				} else {
1921 					control = NULL;
1922 				}
1923 			}
1924 		}
1925 	}
1926 	/* now do the tests */
1927 	if (((asoc->cnt_on_all_streams +
1928 	    asoc->cnt_on_reasm_queue +
1929 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1930 	    (((int)asoc->my_rwnd) <= 0)) {
1931 		/*
1932 		 * When we have NO room in the rwnd we check to make sure
1933 		 * the reader is doing its job...
1934 		 */
1935 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1936 			/* some to read, wake-up */
1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1938 			struct socket *so;
1939 
1940 			so = SCTP_INP_SO(stcb->sctp_ep);
1941 			atomic_add_int(&stcb->asoc.refcnt, 1);
1942 			SCTP_TCB_UNLOCK(stcb);
1943 			SCTP_SOCKET_LOCK(so, 1);
1944 			SCTP_TCB_LOCK(stcb);
1945 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1946 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1947 				/* assoc was freed while we were unlocked */
1948 				SCTP_SOCKET_UNLOCK(so, 1);
1949 				return (0);
1950 			}
1951 #endif
1952 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1954 			SCTP_SOCKET_UNLOCK(so, 1);
1955 #endif
1956 		}
1957 		/* now is it in the mapping array of what we have accepted? */
1958 		if (chk_type == SCTP_DATA) {
1959 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1960 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1961 				/* Nope not in the valid range dump it */
1962 		dump_packet:
1963 				sctp_set_rwnd(stcb, asoc);
1964 				if ((asoc->cnt_on_all_streams +
1965 				    asoc->cnt_on_reasm_queue +
1966 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1967 					SCTP_STAT_INCR(sctps_datadropchklmt);
1968 				} else {
1969 					SCTP_STAT_INCR(sctps_datadroprwnd);
1970 				}
1971 				*break_flag = 1;
1972 				return (0);
1973 			}
1974 		} else {
1975 			if (control == NULL) {
1976 				goto dump_packet;
1977 			}
1978 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1979 				goto dump_packet;
1980 			}
1981 		}
1982 	}
1983 #ifdef SCTP_ASOCLOG_OF_TSNS
1984 	SCTP_TCB_LOCK_ASSERT(stcb);
1985 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1986 		asoc->tsn_in_at = 0;
1987 		asoc->tsn_in_wrapped = 1;
1988 	}
1989 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1990 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1991 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1992 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1993 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1994 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1995 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1996 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1997 	asoc->tsn_in_at++;
1998 #endif
1999 	/*
2000 	 * Before we continue lets validate that we are not being fooled by
2001 	 * an evil attacker. We can only have Nk chunks based on our TSN
2002 	 * spread allowed by the mapping array N * 8 bits, so there is no
2003 	 * way our stream sequence numbers could have wrapped. We of course
2004 	 * only validate the FIRST fragment so the bit must be set.
2005 	 */
2006 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2007 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2008 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2009 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2010 		/* The incoming sseq is behind where we last delivered? */
2011 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2012 		    mid, asoc->strmin[sid].last_mid_delivered);
2013 
2014 		if (asoc->idata_supported) {
2015 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2016 			    asoc->strmin[sid].last_mid_delivered,
2017 			    tsn,
2018 			    sid,
2019 			    mid);
2020 		} else {
2021 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2022 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2023 			    tsn,
2024 			    sid,
2025 			    (uint16_t)mid);
2026 		}
2027 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2028 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2029 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2030 		*abort_flag = 1;
2031 		return (0);
2032 	}
2033 	if (chk_type == SCTP_IDATA) {
2034 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2035 	} else {
2036 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2037 	}
2038 	if (last_chunk == 0) {
2039 		if (chk_type == SCTP_IDATA) {
2040 			dmbuf = SCTP_M_COPYM(*m,
2041 			    (offset + sizeof(struct sctp_idata_chunk)),
2042 			    the_len, M_NOWAIT);
2043 		} else {
2044 			dmbuf = SCTP_M_COPYM(*m,
2045 			    (offset + sizeof(struct sctp_data_chunk)),
2046 			    the_len, M_NOWAIT);
2047 		}
2048 #ifdef SCTP_MBUF_LOGGING
2049 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2050 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2051 		}
2052 #endif
2053 	} else {
2054 		/* We can steal the last chunk */
2055 		int l_len;
2056 
2057 		dmbuf = *m;
2058 		/* lop off the top part */
2059 		if (chk_type == SCTP_IDATA) {
2060 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2061 		} else {
2062 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2063 		}
2064 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2065 			l_len = SCTP_BUF_LEN(dmbuf);
2066 		} else {
2067 			/*
2068 			 * need to count up the size hopefully does not hit
2069 			 * this to often :-0
2070 			 */
2071 			struct mbuf *lat;
2072 
2073 			l_len = 0;
2074 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2075 				l_len += SCTP_BUF_LEN(lat);
2076 			}
2077 		}
2078 		if (l_len > the_len) {
2079 			/* Trim the end round bytes off  too */
2080 			m_adj(dmbuf, -(l_len - the_len));
2081 		}
2082 	}
2083 	if (dmbuf == NULL) {
2084 		SCTP_STAT_INCR(sctps_nomem);
2085 		return (0);
2086 	}
2087 	/*
2088 	 * Now no matter what, we need a control, get one if we don't have
2089 	 * one (we may have gotten it above when we found the message was
2090 	 * fragmented
2091 	 */
2092 	if (control == NULL) {
2093 		sctp_alloc_a_readq(stcb, control);
2094 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2095 		    ppid,
2096 		    sid,
2097 		    chk_flags,
2098 		    NULL, fsn, mid);
2099 		if (control == NULL) {
2100 			SCTP_STAT_INCR(sctps_nomem);
2101 			return (0);
2102 		}
2103 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2104 			struct mbuf *mm;
2105 
2106 			control->data = dmbuf;
2107 			for (mm = control->data; mm; mm = mm->m_next) {
2108 				control->length += SCTP_BUF_LEN(mm);
2109 			}
2110 			control->tail_mbuf = NULL;
2111 			control->end_added = 1;
2112 			control->last_frag_seen = 1;
2113 			control->first_frag_seen = 1;
2114 			control->fsn_included = fsn;
2115 			control->top_fsn = fsn;
2116 		}
2117 		created_control = 1;
2118 	}
2119 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2120 	    chk_flags, ordered, mid, control);
2121 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2122 	    TAILQ_EMPTY(&asoc->resetHead) &&
2123 	    ((ordered == 0) ||
2124 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2125 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2126 		/* Candidate for express delivery */
2127 		/*
2128 		 * Its not fragmented, No PD-API is up, Nothing in the
2129 		 * delivery queue, Its un-ordered OR ordered and the next to
2130 		 * deliver AND nothing else is stuck on the stream queue,
2131 		 * And there is room for it in the socket buffer. Lets just
2132 		 * stuff it up the buffer....
2133 		 */
2134 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2135 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2136 			asoc->highest_tsn_inside_nr_map = tsn;
2137 		}
2138 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2139 		    control, mid);
2140 
2141 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2142 		    control, &stcb->sctp_socket->so_rcv,
2143 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2144 
2145 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2146 			/* for ordered, bump what we delivered */
2147 			asoc->strmin[sid].last_mid_delivered++;
2148 		}
2149 		SCTP_STAT_INCR(sctps_recvexpress);
2150 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2151 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2152 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2153 		}
2154 		control = NULL;
2155 		goto finish_express_del;
2156 	}
2157 
2158 	/* Now will we need a chunk too? */
2159 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2160 		sctp_alloc_a_chunk(stcb, chk);
2161 		if (chk == NULL) {
2162 			/* No memory so we drop the chunk */
2163 			SCTP_STAT_INCR(sctps_nomem);
2164 			if (last_chunk == 0) {
2165 				/* we copied it, free the copy */
2166 				sctp_m_freem(dmbuf);
2167 			}
2168 			return (0);
2169 		}
2170 		chk->rec.data.tsn = tsn;
2171 		chk->no_fr_allowed = 0;
2172 		chk->rec.data.fsn = fsn;
2173 		chk->rec.data.mid = mid;
2174 		chk->rec.data.sid = sid;
2175 		chk->rec.data.ppid = ppid;
2176 		chk->rec.data.context = stcb->asoc.context;
2177 		chk->rec.data.doing_fast_retransmit = 0;
2178 		chk->rec.data.rcv_flags = chk_flags;
2179 		chk->asoc = asoc;
2180 		chk->send_size = the_len;
2181 		chk->whoTo = net;
2182 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2183 		    chk,
2184 		    control, mid);
2185 		atomic_add_int(&net->ref_count, 1);
2186 		chk->data = dmbuf;
2187 	}
2188 	/* Set the appropriate TSN mark */
2189 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2190 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2191 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2192 			asoc->highest_tsn_inside_nr_map = tsn;
2193 		}
2194 	} else {
2195 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2196 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2197 			asoc->highest_tsn_inside_map = tsn;
2198 		}
2199 	}
2200 	/* Now is it complete (i.e. not fragmented)? */
2201 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2202 		/*
2203 		 * Special check for when streams are resetting. We could be
2204 		 * more smart about this and check the actual stream to see
2205 		 * if it is not being reset.. that way we would not create a
2206 		 * HOLB when amongst streams being reset and those not being
2207 		 * reset.
2208 		 *
2209 		 */
2210 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2211 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2212 			/*
2213 			 * yep its past where we need to reset... go ahead
2214 			 * and queue it.
2215 			 */
2216 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2217 				/* first one on */
2218 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2219 			} else {
2220 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2221 				unsigned char inserted = 0;
2222 
2223 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2224 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2225 
2226 						continue;
2227 					} else {
2228 						/* found it */
2229 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2230 						inserted = 1;
2231 						break;
2232 					}
2233 				}
2234 				if (inserted == 0) {
2235 					/*
2236 					 * must be put at end, use prevP
2237 					 * (all setup from loop) to setup
2238 					 * nextP.
2239 					 */
2240 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2241 				}
2242 			}
2243 			goto finish_express_del;
2244 		}
2245 		if (chk_flags & SCTP_DATA_UNORDERED) {
2246 			/* queue directly into socket buffer */
2247 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2248 			    control, mid);
2249 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2251 			    control,
2252 			    &stcb->sctp_socket->so_rcv, 1,
2253 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2254 
2255 		} else {
2256 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2257 			    mid);
2258 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2259 			if (*abort_flag) {
2260 				if (last_chunk) {
2261 					*m = NULL;
2262 				}
2263 				return (0);
2264 			}
2265 		}
2266 		goto finish_express_del;
2267 	}
2268 	/* If we reach here its a reassembly */
2269 	need_reasm_check = 1;
2270 	SCTPDBG(SCTP_DEBUG_XXX,
2271 	    "Queue data to stream for reasm control: %p MID: %u\n",
2272 	    control, mid);
2273 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2274 	if (*abort_flag) {
2275 		/*
2276 		 * the assoc is now gone and chk was put onto the reasm
2277 		 * queue, which has all been freed.
2278 		 */
2279 		if (last_chunk) {
2280 			*m = NULL;
2281 		}
2282 		return (0);
2283 	}
2284 finish_express_del:
2285 	/* Here we tidy up things */
2286 	if (tsn == (asoc->cumulative_tsn + 1)) {
2287 		/* Update cum-ack */
2288 		asoc->cumulative_tsn = tsn;
2289 	}
2290 	if (last_chunk) {
2291 		*m = NULL;
2292 	}
2293 	if (ordered) {
2294 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2295 	} else {
2296 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2297 	}
2298 	SCTP_STAT_INCR(sctps_recvdata);
2299 	/* Set it present please */
2300 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2302 	}
2303 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2306 	}
2307 	if (need_reasm_check) {
2308 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309 		need_reasm_check = 0;
2310 	}
2311 	/* check the special flag for stream resets */
2312 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2314 		/*
2315 		 * we have finished working through the backlogged TSN's now
2316 		 * time to reset streams. 1: call reset function. 2: free
2317 		 * pending_reply space 3: distribute any chunks in
2318 		 * pending_reply_queue.
2319 		 */
2320 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323 		SCTP_FREE(liste, SCTP_M_STRESET);
2324 		/* sa_ignore FREED_MEMORY */
2325 		liste = TAILQ_FIRST(&asoc->resetHead);
2326 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2327 			/* All can be removed */
2328 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2331 				if (*abort_flag) {
2332 					return (0);
2333 				}
2334 				if (need_reasm_check) {
2335 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2336 					need_reasm_check = 0;
2337 				}
2338 			}
2339 		} else {
2340 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2341 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2342 					break;
2343 				}
2344 				/*
2345 				 * if control->sinfo_tsn is <= liste->tsn we
2346 				 * can process it which is the NOT of
2347 				 * control->sinfo_tsn > liste->tsn
2348 				 */
2349 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2350 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2351 				if (*abort_flag) {
2352 					return (0);
2353 				}
2354 				if (need_reasm_check) {
2355 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2356 					need_reasm_check = 0;
2357 				}
2358 			}
2359 		}
2360 	}
2361 	return (1);
2362 }
2363 
2364 static const int8_t sctp_map_lookup_tab[256] = {
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 5,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 6,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 4,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 5,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 7,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 4,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 5,
2385 	0, 1, 0, 2, 0, 1, 0, 3,
2386 	0, 1, 0, 2, 0, 1, 0, 4,
2387 	0, 1, 0, 2, 0, 1, 0, 3,
2388 	0, 1, 0, 2, 0, 1, 0, 6,
2389 	0, 1, 0, 2, 0, 1, 0, 3,
2390 	0, 1, 0, 2, 0, 1, 0, 4,
2391 	0, 1, 0, 2, 0, 1, 0, 3,
2392 	0, 1, 0, 2, 0, 1, 0, 5,
2393 	0, 1, 0, 2, 0, 1, 0, 3,
2394 	0, 1, 0, 2, 0, 1, 0, 4,
2395 	0, 1, 0, 2, 0, 1, 0, 3,
2396 	0, 1, 0, 2, 0, 1, 0, 8
2397 };
2398 
2399 
2400 void
2401 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2402 {
2403 	/*
2404 	 * Now we also need to check the mapping array in a couple of ways.
2405 	 * 1) Did we move the cum-ack point?
2406 	 *
2407 	 * When you first glance at this you might think that all entries
2408 	 * that make up the position of the cum-ack would be in the
2409 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2410 	 * deliverable. Thats true with one exception, when its a fragmented
2411 	 * message we may not deliver the data until some threshold (or all
2412 	 * of it) is in place. So we must OR the nr_mapping_array and
2413 	 * mapping_array to get a true picture of the cum-ack.
2414 	 */
2415 	struct sctp_association *asoc;
2416 	int at;
2417 	uint8_t val;
2418 	int slide_from, slide_end, lgap, distance;
2419 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2420 
2421 	asoc = &stcb->asoc;
2422 
2423 	old_cumack = asoc->cumulative_tsn;
2424 	old_base = asoc->mapping_array_base_tsn;
2425 	old_highest = asoc->highest_tsn_inside_map;
2426 	/*
2427 	 * We could probably improve this a small bit by calculating the
2428 	 * offset of the current cum-ack as the starting point.
2429 	 */
2430 	at = 0;
2431 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2432 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2433 		if (val == 0xff) {
2434 			at += 8;
2435 		} else {
2436 			/* there is a 0 bit */
2437 			at += sctp_map_lookup_tab[val];
2438 			break;
2439 		}
2440 	}
2441 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2442 
2443 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2444 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2445 #ifdef INVARIANTS
2446 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2447 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2448 #else
2449 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2450 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 		sctp_print_mapping_array(asoc);
2452 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2453 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2454 		}
2455 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2456 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2457 #endif
2458 	}
2459 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2460 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2461 	} else {
2462 		highest_tsn = asoc->highest_tsn_inside_map;
2463 	}
2464 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2465 		/* The complete array was completed by a single FR */
2466 		/* highest becomes the cum-ack */
2467 		int clr;
2468 #ifdef INVARIANTS
2469 		unsigned int i;
2470 #endif
2471 
2472 		/* clear the array */
2473 		clr = ((at + 7) >> 3);
2474 		if (clr > asoc->mapping_array_size) {
2475 			clr = asoc->mapping_array_size;
2476 		}
2477 		memset(asoc->mapping_array, 0, clr);
2478 		memset(asoc->nr_mapping_array, 0, clr);
2479 #ifdef INVARIANTS
2480 		for (i = 0; i < asoc->mapping_array_size; i++) {
2481 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2482 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2483 				sctp_print_mapping_array(asoc);
2484 			}
2485 		}
2486 #endif
2487 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2488 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2489 	} else if (at >= 8) {
2490 		/* we can slide the mapping array down */
2491 		/* slide_from holds where we hit the first NON 0xff byte */
2492 
2493 		/*
2494 		 * now calculate the ceiling of the move using our highest
2495 		 * TSN value
2496 		 */
2497 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2498 		slide_end = (lgap >> 3);
2499 		if (slide_end < slide_from) {
2500 			sctp_print_mapping_array(asoc);
2501 #ifdef INVARIANTS
2502 			panic("impossible slide");
2503 #else
2504 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2505 			    lgap, slide_end, slide_from, at);
2506 			return;
2507 #endif
2508 		}
2509 		if (slide_end > asoc->mapping_array_size) {
2510 #ifdef INVARIANTS
2511 			panic("would overrun buffer");
2512 #else
2513 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2514 			    asoc->mapping_array_size, slide_end);
2515 			slide_end = asoc->mapping_array_size;
2516 #endif
2517 		}
2518 		distance = (slide_end - slide_from) + 1;
2519 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 			sctp_log_map(old_base, old_cumack, old_highest,
2521 			    SCTP_MAP_PREPARE_SLIDE);
2522 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2523 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2524 		}
2525 		if (distance + slide_from > asoc->mapping_array_size ||
2526 		    distance < 0) {
2527 			/*
2528 			 * Here we do NOT slide forward the array so that
2529 			 * hopefully when more data comes in to fill it up
2530 			 * we will be able to slide it forward. Really I
2531 			 * don't think this should happen :-0
2532 			 */
2533 
2534 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2535 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2536 				    (uint32_t)asoc->mapping_array_size,
2537 				    SCTP_MAP_SLIDE_NONE);
2538 			}
2539 		} else {
2540 			int ii;
2541 
2542 			for (ii = 0; ii < distance; ii++) {
2543 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2544 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2545 
2546 			}
2547 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2548 				asoc->mapping_array[ii] = 0;
2549 				asoc->nr_mapping_array[ii] = 0;
2550 			}
2551 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2552 				asoc->highest_tsn_inside_map += (slide_from << 3);
2553 			}
2554 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2555 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2556 			}
2557 			asoc->mapping_array_base_tsn += (slide_from << 3);
2558 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2559 				sctp_log_map(asoc->mapping_array_base_tsn,
2560 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2561 				    SCTP_MAP_SLIDE_RESULT);
2562 			}
2563 		}
2564 	}
2565 }
2566 
2567 void
2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2569 {
2570 	struct sctp_association *asoc;
2571 	uint32_t highest_tsn;
2572 	int is_a_gap;
2573 
2574 	sctp_slide_mapping_arrays(stcb);
2575 	asoc = &stcb->asoc;
2576 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2577 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2578 	} else {
2579 		highest_tsn = asoc->highest_tsn_inside_map;
2580 	}
2581 	/* Is there a gap now? */
2582 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2583 
2584 	/*
2585 	 * Now we need to see if we need to queue a sack or just start the
2586 	 * timer (if allowed).
2587 	 */
2588 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2589 		/*
2590 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2591 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2592 		 * SACK
2593 		 */
2594 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2595 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2596 			    stcb->sctp_ep, stcb, NULL,
2597 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2598 		}
2599 		sctp_send_shutdown(stcb,
2600 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2601 		if (is_a_gap) {
2602 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2603 		}
2604 	} else {
2605 		/*
2606 		 * CMT DAC algorithm: increase number of packets received
2607 		 * since last ack
2608 		 */
2609 		stcb->asoc.cmt_dac_pkts_rcvd++;
2610 
2611 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2612 							 * SACK */
2613 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2614 							 * longer is one */
2615 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2616 		    (is_a_gap) ||	/* is still a gap */
2617 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2618 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2619 		    ) {
2620 
2621 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 			    (stcb->asoc.send_sack == 0) &&
2624 			    (stcb->asoc.numduptsns == 0) &&
2625 			    (stcb->asoc.delayed_ack) &&
2626 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2627 
2628 				/*
2629 				 * CMT DAC algorithm: With CMT, delay acks
2630 				 * even in the face of
2631 				 *
2632 				 * reordering. Therefore, if acks that do
2633 				 * not have to be sent because of the above
2634 				 * reasons, will be delayed. That is, acks
2635 				 * that would have been sent due to gap
2636 				 * reports will be delayed with DAC. Start
2637 				 * the delayed ack timer.
2638 				 */
2639 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2640 				    stcb->sctp_ep, stcb, NULL);
2641 			} else {
2642 				/*
2643 				 * Ok we must build a SACK since the timer
2644 				 * is pending, we got our first packet OR
2645 				 * there are gaps or duplicates.
2646 				 */
2647 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2648 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2649 			}
2650 		} else {
2651 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2652 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2653 				    stcb->sctp_ep, stcb, NULL);
2654 			}
2655 		}
2656 	}
2657 }
2658 
2659 int
2660 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2661     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2662     struct sctp_nets *net, uint32_t *high_tsn)
2663 {
2664 	struct sctp_chunkhdr *ch, chunk_buf;
2665 	struct sctp_association *asoc;
2666 	int num_chunks = 0;	/* number of control chunks processed */
2667 	int stop_proc = 0;
2668 	int break_flag, last_chunk;
2669 	int abort_flag = 0, was_a_gap;
2670 	struct mbuf *m;
2671 	uint32_t highest_tsn;
2672 	uint16_t chk_length;
2673 
2674 	/* set the rwnd */
2675 	sctp_set_rwnd(stcb, &stcb->asoc);
2676 
2677 	m = *mm;
2678 	SCTP_TCB_LOCK_ASSERT(stcb);
2679 	asoc = &stcb->asoc;
2680 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2681 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2682 	} else {
2683 		highest_tsn = asoc->highest_tsn_inside_map;
2684 	}
2685 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2686 	/*
2687 	 * setup where we got the last DATA packet from for any SACK that
2688 	 * may need to go out. Don't bump the net. This is done ONLY when a
2689 	 * chunk is assigned.
2690 	 */
2691 	asoc->last_data_chunk_from = net;
2692 
2693 	/*-
2694 	 * Now before we proceed we must figure out if this is a wasted
2695 	 * cluster... i.e. it is a small packet sent in and yet the driver
2696 	 * underneath allocated a full cluster for it. If so we must copy it
2697 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2698 	 * with cluster starvation. Note for __Panda__ we don't do this
2699 	 * since it has clusters all the way down to 64 bytes.
2700 	 */
2701 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2702 		/* we only handle mbufs that are singletons.. not chains */
2703 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2704 		if (m) {
2705 			/* ok lets see if we can copy the data up */
2706 			caddr_t *from, *to;
2707 
2708 			/* get the pointers and copy */
2709 			to = mtod(m, caddr_t *);
2710 			from = mtod((*mm), caddr_t *);
2711 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2712 			/* copy the length and free up the old */
2713 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2714 			sctp_m_freem(*mm);
2715 			/* success, back copy */
2716 			*mm = m;
2717 		} else {
2718 			/* We are in trouble in the mbuf world .. yikes */
2719 			m = *mm;
2720 		}
2721 	}
2722 	/* get pointer to the first chunk header */
2723 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2724 	    sizeof(struct sctp_chunkhdr),
2725 	    (uint8_t *)&chunk_buf);
2726 	if (ch == NULL) {
2727 		return (1);
2728 	}
2729 	/*
2730 	 * process all DATA chunks...
2731 	 */
2732 	*high_tsn = asoc->cumulative_tsn;
2733 	break_flag = 0;
2734 	asoc->data_pkts_seen++;
2735 	while (stop_proc == 0) {
2736 		/* validate chunk length */
2737 		chk_length = ntohs(ch->chunk_length);
2738 		if (length - *offset < chk_length) {
2739 			/* all done, mutulated chunk */
2740 			stop_proc = 1;
2741 			continue;
2742 		}
2743 		if ((asoc->idata_supported == 1) &&
2744 		    (ch->chunk_type == SCTP_DATA)) {
2745 			struct mbuf *op_err;
2746 			char msg[SCTP_DIAG_INFO_LEN];
2747 
2748 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2751 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2752 			return (2);
2753 		}
2754 		if ((asoc->idata_supported == 0) &&
2755 		    (ch->chunk_type == SCTP_IDATA)) {
2756 			struct mbuf *op_err;
2757 			char msg[SCTP_DIAG_INFO_LEN];
2758 
2759 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2760 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2761 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2762 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2763 			return (2);
2764 		}
2765 		if ((ch->chunk_type == SCTP_DATA) ||
2766 		    (ch->chunk_type == SCTP_IDATA)) {
2767 			uint16_t clen;
2768 
2769 			if (ch->chunk_type == SCTP_DATA) {
2770 				clen = sizeof(struct sctp_data_chunk);
2771 			} else {
2772 				clen = sizeof(struct sctp_idata_chunk);
2773 			}
2774 			if (chk_length < clen) {
2775 				/*
2776 				 * Need to send an abort since we had a
2777 				 * invalid data chunk.
2778 				 */
2779 				struct mbuf *op_err;
2780 				char msg[SCTP_DIAG_INFO_LEN];
2781 
2782 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2783 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2784 				    chk_length);
2785 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2786 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2787 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2788 				return (2);
2789 			}
2790 #ifdef SCTP_AUDITING_ENABLED
2791 			sctp_audit_log(0xB1, 0);
2792 #endif
2793 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2794 				last_chunk = 1;
2795 			} else {
2796 				last_chunk = 0;
2797 			}
2798 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2799 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2800 			    last_chunk, ch->chunk_type)) {
2801 				num_chunks++;
2802 			}
2803 			if (abort_flag)
2804 				return (2);
2805 
2806 			if (break_flag) {
2807 				/*
2808 				 * Set because of out of rwnd space and no
2809 				 * drop rep space left.
2810 				 */
2811 				stop_proc = 1;
2812 				continue;
2813 			}
2814 		} else {
2815 			/* not a data chunk in the data region */
2816 			switch (ch->chunk_type) {
2817 			case SCTP_INITIATION:
2818 			case SCTP_INITIATION_ACK:
2819 			case SCTP_SELECTIVE_ACK:
2820 			case SCTP_NR_SELECTIVE_ACK:
2821 			case SCTP_HEARTBEAT_REQUEST:
2822 			case SCTP_HEARTBEAT_ACK:
2823 			case SCTP_ABORT_ASSOCIATION:
2824 			case SCTP_SHUTDOWN:
2825 			case SCTP_SHUTDOWN_ACK:
2826 			case SCTP_OPERATION_ERROR:
2827 			case SCTP_COOKIE_ECHO:
2828 			case SCTP_COOKIE_ACK:
2829 			case SCTP_ECN_ECHO:
2830 			case SCTP_ECN_CWR:
2831 			case SCTP_SHUTDOWN_COMPLETE:
2832 			case SCTP_AUTHENTICATION:
2833 			case SCTP_ASCONF_ACK:
2834 			case SCTP_PACKET_DROPPED:
2835 			case SCTP_STREAM_RESET:
2836 			case SCTP_FORWARD_CUM_TSN:
2837 			case SCTP_ASCONF:
2838 				{
2839 					/*
2840 					 * Now, what do we do with KNOWN
2841 					 * chunks that are NOT in the right
2842 					 * place?
2843 					 *
2844 					 * For now, I do nothing but ignore
2845 					 * them. We may later want to add
2846 					 * sysctl stuff to switch out and do
2847 					 * either an ABORT() or possibly
2848 					 * process them.
2849 					 */
2850 					struct mbuf *op_err;
2851 					char msg[SCTP_DIAG_INFO_LEN];
2852 
2853 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2854 					    ch->chunk_type);
2855 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2856 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2857 					return (2);
2858 				}
2859 			default:
2860 				/*
2861 				 * Unknown chunk type: use bit rules after
2862 				 * checking length
2863 				 */
2864 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2865 					/*
2866 					 * Need to send an abort since we
2867 					 * had a invalid chunk.
2868 					 */
2869 					struct mbuf *op_err;
2870 					char msg[SCTP_DIAG_INFO_LEN];
2871 
2872 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2873 					    chk_length);
2874 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2875 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2876 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2877 					return (2);
2878 				}
2879 				if (ch->chunk_type & 0x40) {
2880 					/* Add a error report to the queue */
2881 					struct mbuf *op_err;
2882 					struct sctp_gen_error_cause *cause;
2883 
2884 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2885 					    0, M_NOWAIT, 1, MT_DATA);
2886 					if (op_err != NULL) {
2887 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2888 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2889 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2890 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2891 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2892 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2893 							sctp_queue_op_err(stcb, op_err);
2894 						} else {
2895 							sctp_m_freem(op_err);
2896 						}
2897 					}
2898 				}
2899 				if ((ch->chunk_type & 0x80) == 0) {
2900 					/* discard the rest of this packet */
2901 					stop_proc = 1;
2902 				}	/* else skip this bad chunk and
2903 					 * continue... */
2904 				break;
2905 			}	/* switch of chunk type */
2906 		}
2907 		*offset += SCTP_SIZE32(chk_length);
2908 		if ((*offset >= length) || stop_proc) {
2909 			/* no more data left in the mbuf chain */
2910 			stop_proc = 1;
2911 			continue;
2912 		}
2913 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2914 		    sizeof(struct sctp_chunkhdr),
2915 		    (uint8_t *)&chunk_buf);
2916 		if (ch == NULL) {
2917 			*offset = length;
2918 			stop_proc = 1;
2919 			continue;
2920 		}
2921 	}
2922 	if (break_flag) {
2923 		/*
2924 		 * we need to report rwnd overrun drops.
2925 		 */
2926 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2927 	}
2928 	if (num_chunks) {
2929 		/*
2930 		 * Did we get data, if so update the time for auto-close and
2931 		 * give peer credit for being alive.
2932 		 */
2933 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2934 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2935 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2936 			    stcb->asoc.overall_error_count,
2937 			    0,
2938 			    SCTP_FROM_SCTP_INDATA,
2939 			    __LINE__);
2940 		}
2941 		stcb->asoc.overall_error_count = 0;
2942 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2943 	}
2944 	/* now service all of the reassm queue if needed */
2945 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2946 		/* Assure that we ack right away */
2947 		stcb->asoc.send_sack = 1;
2948 	}
2949 	/* Start a sack timer or QUEUE a SACK for sending */
2950 	sctp_sack_check(stcb, was_a_gap);
2951 	return (0);
2952 }
2953 
2954 static int
2955 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2956     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2957     int *num_frs,
2958     uint32_t *biggest_newly_acked_tsn,
2959     uint32_t *this_sack_lowest_newack,
2960     int *rto_ok)
2961 {
2962 	struct sctp_tmit_chunk *tp1;
2963 	unsigned int theTSN;
2964 	int j, wake_him = 0, circled = 0;
2965 
2966 	/* Recover the tp1 we last saw */
2967 	tp1 = *p_tp1;
2968 	if (tp1 == NULL) {
2969 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2970 	}
2971 	for (j = frag_strt; j <= frag_end; j++) {
2972 		theTSN = j + last_tsn;
2973 		while (tp1) {
2974 			if (tp1->rec.data.doing_fast_retransmit)
2975 				(*num_frs) += 1;
2976 
2977 			/*-
2978 			 * CMT: CUCv2 algorithm. For each TSN being
2979 			 * processed from the sent queue, track the
2980 			 * next expected pseudo-cumack, or
2981 			 * rtx_pseudo_cumack, if required. Separate
2982 			 * cumack trackers for first transmissions,
2983 			 * and retransmissions.
2984 			 */
2985 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2986 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2987 			    (tp1->snd_count == 1)) {
2988 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2989 				tp1->whoTo->find_pseudo_cumack = 0;
2990 			}
2991 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2992 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2993 			    (tp1->snd_count > 1)) {
2994 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2995 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2996 			}
2997 			if (tp1->rec.data.tsn == theTSN) {
2998 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2999 					/*-
3000 					 * must be held until
3001 					 * cum-ack passes
3002 					 */
3003 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3004 						/*-
3005 						 * If it is less than RESEND, it is
3006 						 * now no-longer in flight.
3007 						 * Higher values may already be set
3008 						 * via previous Gap Ack Blocks...
3009 						 * i.e. ACKED or RESEND.
3010 						 */
3011 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3012 						    *biggest_newly_acked_tsn)) {
3013 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3014 						}
3015 						/*-
3016 						 * CMT: SFR algo (and HTNA) - set
3017 						 * saw_newack to 1 for dest being
3018 						 * newly acked. update
3019 						 * this_sack_highest_newack if
3020 						 * appropriate.
3021 						 */
3022 						if (tp1->rec.data.chunk_was_revoked == 0)
3023 							tp1->whoTo->saw_newack = 1;
3024 
3025 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3026 						    tp1->whoTo->this_sack_highest_newack)) {
3027 							tp1->whoTo->this_sack_highest_newack =
3028 							    tp1->rec.data.tsn;
3029 						}
3030 						/*-
3031 						 * CMT DAC algo: also update
3032 						 * this_sack_lowest_newack
3033 						 */
3034 						if (*this_sack_lowest_newack == 0) {
3035 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3036 								sctp_log_sack(*this_sack_lowest_newack,
3037 								    last_tsn,
3038 								    tp1->rec.data.tsn,
3039 								    0,
3040 								    0,
3041 								    SCTP_LOG_TSN_ACKED);
3042 							}
3043 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3044 						}
3045 						/*-
3046 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3047 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3048 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3049 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3050 						 * Separate pseudo_cumack trackers for first transmissions and
3051 						 * retransmissions.
3052 						 */
3053 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3054 							if (tp1->rec.data.chunk_was_revoked == 0) {
3055 								tp1->whoTo->new_pseudo_cumack = 1;
3056 							}
3057 							tp1->whoTo->find_pseudo_cumack = 1;
3058 						}
3059 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3060 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3061 						}
3062 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3063 							if (tp1->rec.data.chunk_was_revoked == 0) {
3064 								tp1->whoTo->new_pseudo_cumack = 1;
3065 							}
3066 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3067 						}
3068 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3069 							sctp_log_sack(*biggest_newly_acked_tsn,
3070 							    last_tsn,
3071 							    tp1->rec.data.tsn,
3072 							    frag_strt,
3073 							    frag_end,
3074 							    SCTP_LOG_TSN_ACKED);
3075 						}
3076 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3077 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3078 							    tp1->whoTo->flight_size,
3079 							    tp1->book_size,
3080 							    (uint32_t)(uintptr_t)tp1->whoTo,
3081 							    tp1->rec.data.tsn);
3082 						}
3083 						sctp_flight_size_decrease(tp1);
3084 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3085 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3086 							    tp1);
3087 						}
3088 						sctp_total_flight_decrease(stcb, tp1);
3089 
3090 						tp1->whoTo->net_ack += tp1->send_size;
3091 						if (tp1->snd_count < 2) {
3092 							/*-
3093 							 * True non-retransmitted chunk
3094 							 */
3095 							tp1->whoTo->net_ack2 += tp1->send_size;
3096 
3097 							/*-
3098 							 * update RTO too ?
3099 							 */
3100 							if (tp1->do_rtt) {
3101 								if (*rto_ok) {
3102 									tp1->whoTo->RTO =
3103 									    sctp_calculate_rto(stcb,
3104 									    &stcb->asoc,
3105 									    tp1->whoTo,
3106 									    &tp1->sent_rcv_time,
3107 									    SCTP_RTT_FROM_DATA);
3108 									*rto_ok = 0;
3109 								}
3110 								if (tp1->whoTo->rto_needed == 0) {
3111 									tp1->whoTo->rto_needed = 1;
3112 								}
3113 								tp1->do_rtt = 0;
3114 							}
3115 						}
3116 
3117 					}
3118 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3119 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3120 						    stcb->asoc.this_sack_highest_gap)) {
3121 							stcb->asoc.this_sack_highest_gap =
3122 							    tp1->rec.data.tsn;
3123 						}
3124 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3125 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3126 #ifdef SCTP_AUDITING_ENABLED
3127 							sctp_audit_log(0xB2,
3128 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3129 #endif
3130 						}
3131 					}
3132 					/*-
3133 					 * All chunks NOT UNSENT fall through here and are marked
3134 					 * (leave PR-SCTP ones that are to skip alone though)
3135 					 */
3136 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3137 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3138 						tp1->sent = SCTP_DATAGRAM_MARKED;
3139 					}
3140 					if (tp1->rec.data.chunk_was_revoked) {
3141 						/* deflate the cwnd */
3142 						tp1->whoTo->cwnd -= tp1->book_size;
3143 						tp1->rec.data.chunk_was_revoked = 0;
3144 					}
3145 					/* NR Sack code here */
3146 					if (nr_sacking &&
3147 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3148 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3149 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3150 #ifdef INVARIANTS
3151 						} else {
3152 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3153 #endif
3154 						}
3155 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3156 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3157 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3158 							stcb->asoc.trigger_reset = 1;
3159 						}
3160 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3161 						if (tp1->data) {
3162 							/*
3163 							 * sa_ignore
3164 							 * NO_NULL_CHK
3165 							 */
3166 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3167 							sctp_m_freem(tp1->data);
3168 							tp1->data = NULL;
3169 						}
3170 						wake_him++;
3171 					}
3172 				}
3173 				break;
3174 			}	/* if (tp1->tsn == theTSN) */
3175 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3176 				break;
3177 			}
3178 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3179 			if ((tp1 == NULL) && (circled == 0)) {
3180 				circled++;
3181 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3182 			}
3183 		}		/* end while (tp1) */
3184 		if (tp1 == NULL) {
3185 			circled = 0;
3186 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3187 		}
3188 		/* In case the fragments were not in order we must reset */
3189 	}			/* end for (j = fragStart */
3190 	*p_tp1 = tp1;
3191 	return (wake_him);	/* Return value only used for nr-sack */
3192 }
3193 
3194 
3195 static int
3196 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3197     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3198     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3199     int num_seg, int num_nr_seg, int *rto_ok)
3200 {
3201 	struct sctp_gap_ack_block *frag, block;
3202 	struct sctp_tmit_chunk *tp1;
3203 	int i;
3204 	int num_frs = 0;
3205 	int chunk_freed;
3206 	int non_revocable;
3207 	uint16_t frag_strt, frag_end, prev_frag_end;
3208 
3209 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3210 	prev_frag_end = 0;
3211 	chunk_freed = 0;
3212 
3213 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3214 		if (i == num_seg) {
3215 			prev_frag_end = 0;
3216 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3217 		}
3218 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3219 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3220 		*offset += sizeof(block);
3221 		if (frag == NULL) {
3222 			return (chunk_freed);
3223 		}
3224 		frag_strt = ntohs(frag->start);
3225 		frag_end = ntohs(frag->end);
3226 
3227 		if (frag_strt > frag_end) {
3228 			/* This gap report is malformed, skip it. */
3229 			continue;
3230 		}
3231 		if (frag_strt <= prev_frag_end) {
3232 			/* This gap report is not in order, so restart. */
3233 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3234 		}
3235 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3236 			*biggest_tsn_acked = last_tsn + frag_end;
3237 		}
3238 		if (i < num_seg) {
3239 			non_revocable = 0;
3240 		} else {
3241 			non_revocable = 1;
3242 		}
3243 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3244 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3245 		    this_sack_lowest_newack, rto_ok)) {
3246 			chunk_freed = 1;
3247 		}
3248 		prev_frag_end = frag_end;
3249 	}
3250 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3251 		if (num_frs)
3252 			sctp_log_fr(*biggest_tsn_acked,
3253 			    *biggest_newly_acked_tsn,
3254 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3255 	}
3256 	return (chunk_freed);
3257 }
3258 
3259 static void
3260 sctp_check_for_revoked(struct sctp_tcb *stcb,
3261     struct sctp_association *asoc, uint32_t cumack,
3262     uint32_t biggest_tsn_acked)
3263 {
3264 	struct sctp_tmit_chunk *tp1;
3265 
3266 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3267 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3268 			/*
3269 			 * ok this guy is either ACK or MARKED. If it is
3270 			 * ACKED it has been previously acked but not this
3271 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3272 			 * again.
3273 			 */
3274 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3275 				break;
3276 			}
3277 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3278 				/* it has been revoked */
3279 				tp1->sent = SCTP_DATAGRAM_SENT;
3280 				tp1->rec.data.chunk_was_revoked = 1;
3281 				/*
3282 				 * We must add this stuff back in to assure
3283 				 * timers and such get started.
3284 				 */
3285 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3286 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3287 					    tp1->whoTo->flight_size,
3288 					    tp1->book_size,
3289 					    (uint32_t)(uintptr_t)tp1->whoTo,
3290 					    tp1->rec.data.tsn);
3291 				}
3292 				sctp_flight_size_increase(tp1);
3293 				sctp_total_flight_increase(stcb, tp1);
3294 				/*
3295 				 * We inflate the cwnd to compensate for our
3296 				 * artificial inflation of the flight_size.
3297 				 */
3298 				tp1->whoTo->cwnd += tp1->book_size;
3299 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3300 					sctp_log_sack(asoc->last_acked_seq,
3301 					    cumack,
3302 					    tp1->rec.data.tsn,
3303 					    0,
3304 					    0,
3305 					    SCTP_LOG_TSN_REVOKED);
3306 				}
3307 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3308 				/* it has been re-acked in this SACK */
3309 				tp1->sent = SCTP_DATAGRAM_ACKED;
3310 			}
3311 		}
3312 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3313 			break;
3314 	}
3315 }
3316 
3317 
3318 static void
3319 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3320     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3321 {
3322 	struct sctp_tmit_chunk *tp1;
3323 	int strike_flag = 0;
3324 	struct timeval now;
3325 	int tot_retrans = 0;
3326 	uint32_t sending_seq;
3327 	struct sctp_nets *net;
3328 	int num_dests_sacked = 0;
3329 
3330 	/*
3331 	 * select the sending_seq, this is either the next thing ready to be
3332 	 * sent but not transmitted, OR, the next seq we assign.
3333 	 */
3334 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3335 	if (tp1 == NULL) {
3336 		sending_seq = asoc->sending_seq;
3337 	} else {
3338 		sending_seq = tp1->rec.data.tsn;
3339 	}
3340 
3341 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3342 	if ((asoc->sctp_cmt_on_off > 0) &&
3343 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3344 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3345 			if (net->saw_newack)
3346 				num_dests_sacked++;
3347 		}
3348 	}
3349 	if (stcb->asoc.prsctp_supported) {
3350 		(void)SCTP_GETTIME_TIMEVAL(&now);
3351 	}
3352 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3353 		strike_flag = 0;
3354 		if (tp1->no_fr_allowed) {
3355 			/* this one had a timeout or something */
3356 			continue;
3357 		}
3358 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3359 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3360 				sctp_log_fr(biggest_tsn_newly_acked,
3361 				    tp1->rec.data.tsn,
3362 				    tp1->sent,
3363 				    SCTP_FR_LOG_CHECK_STRIKE);
3364 		}
3365 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3366 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3367 			/* done */
3368 			break;
3369 		}
3370 		if (stcb->asoc.prsctp_supported) {
3371 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3372 				/* Is it expired? */
3373 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3374 					/* Yes so drop it */
3375 					if (tp1->data != NULL) {
3376 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3377 						    SCTP_SO_NOT_LOCKED);
3378 					}
3379 					continue;
3380 				}
3381 			}
3382 
3383 		}
3384 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3385 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3386 			/* we are beyond the tsn in the sack  */
3387 			break;
3388 		}
3389 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3390 			/* either a RESEND, ACKED, or MARKED */
3391 			/* skip */
3392 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3393 				/* Continue strikin FWD-TSN chunks */
3394 				tp1->rec.data.fwd_tsn_cnt++;
3395 			}
3396 			continue;
3397 		}
3398 		/*
3399 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3400 		 */
3401 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3402 			/*
3403 			 * No new acks were receieved for data sent to this
3404 			 * dest. Therefore, according to the SFR algo for
3405 			 * CMT, no data sent to this dest can be marked for
3406 			 * FR using this SACK.
3407 			 */
3408 			continue;
3409 		} else if (tp1->whoTo &&
3410 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3411 			    tp1->whoTo->this_sack_highest_newack) &&
3412 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3413 			/*
3414 			 * CMT: New acks were receieved for data sent to
3415 			 * this dest. But no new acks were seen for data
3416 			 * sent after tp1. Therefore, according to the SFR
3417 			 * algo for CMT, tp1 cannot be marked for FR using
3418 			 * this SACK. This step covers part of the DAC algo
3419 			 * and the HTNA algo as well.
3420 			 */
3421 			continue;
3422 		}
3423 		/*
3424 		 * Here we check to see if we were have already done a FR
3425 		 * and if so we see if the biggest TSN we saw in the sack is
3426 		 * smaller than the recovery point. If so we don't strike
3427 		 * the tsn... otherwise we CAN strike the TSN.
3428 		 */
3429 		/*
3430 		 * @@@ JRI: Check for CMT if (accum_moved &&
3431 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3432 		 * 0)) {
3433 		 */
3434 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3435 			/*
3436 			 * Strike the TSN if in fast-recovery and cum-ack
3437 			 * moved.
3438 			 */
3439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3440 				sctp_log_fr(biggest_tsn_newly_acked,
3441 				    tp1->rec.data.tsn,
3442 				    tp1->sent,
3443 				    SCTP_FR_LOG_STRIKE_CHUNK);
3444 			}
3445 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3446 				tp1->sent++;
3447 			}
3448 			if ((asoc->sctp_cmt_on_off > 0) &&
3449 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3450 				/*
3451 				 * CMT DAC algorithm: If SACK flag is set to
3452 				 * 0, then lowest_newack test will not pass
3453 				 * because it would have been set to the
3454 				 * cumack earlier. If not already to be
3455 				 * rtx'd, If not a mixed sack and if tp1 is
3456 				 * not between two sacked TSNs, then mark by
3457 				 * one more. NOTE that we are marking by one
3458 				 * additional time since the SACK DAC flag
3459 				 * indicates that two packets have been
3460 				 * received after this missing TSN.
3461 				 */
3462 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3463 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3464 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3465 						sctp_log_fr(16 + num_dests_sacked,
3466 						    tp1->rec.data.tsn,
3467 						    tp1->sent,
3468 						    SCTP_FR_LOG_STRIKE_CHUNK);
3469 					}
3470 					tp1->sent++;
3471 				}
3472 			}
3473 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3474 		    (asoc->sctp_cmt_on_off == 0)) {
3475 			/*
3476 			 * For those that have done a FR we must take
3477 			 * special consideration if we strike. I.e the
3478 			 * biggest_newly_acked must be higher than the
3479 			 * sending_seq at the time we did the FR.
3480 			 */
3481 			if (
3482 #ifdef SCTP_FR_TO_ALTERNATE
3483 			/*
3484 			 * If FR's go to new networks, then we must only do
3485 			 * this for singly homed asoc's. However if the FR's
3486 			 * go to the same network (Armando's work) then its
3487 			 * ok to FR multiple times.
3488 			 */
3489 			    (asoc->numnets < 2)
3490 #else
3491 			    (1)
3492 #endif
3493 			    ) {
3494 
3495 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3496 				    tp1->rec.data.fast_retran_tsn)) {
3497 					/*
3498 					 * Strike the TSN, since this ack is
3499 					 * beyond where things were when we
3500 					 * did a FR.
3501 					 */
3502 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3503 						sctp_log_fr(biggest_tsn_newly_acked,
3504 						    tp1->rec.data.tsn,
3505 						    tp1->sent,
3506 						    SCTP_FR_LOG_STRIKE_CHUNK);
3507 					}
3508 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3509 						tp1->sent++;
3510 					}
3511 					strike_flag = 1;
3512 					if ((asoc->sctp_cmt_on_off > 0) &&
3513 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3514 						/*
3515 						 * CMT DAC algorithm: If
3516 						 * SACK flag is set to 0,
3517 						 * then lowest_newack test
3518 						 * will not pass because it
3519 						 * would have been set to
3520 						 * the cumack earlier. If
3521 						 * not already to be rtx'd,
3522 						 * If not a mixed sack and
3523 						 * if tp1 is not between two
3524 						 * sacked TSNs, then mark by
3525 						 * one more. NOTE that we
3526 						 * are marking by one
3527 						 * additional time since the
3528 						 * SACK DAC flag indicates
3529 						 * that two packets have
3530 						 * been received after this
3531 						 * missing TSN.
3532 						 */
3533 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3534 						    (num_dests_sacked == 1) &&
3535 						    SCTP_TSN_GT(this_sack_lowest_newack,
3536 						    tp1->rec.data.tsn)) {
3537 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3538 								sctp_log_fr(32 + num_dests_sacked,
3539 								    tp1->rec.data.tsn,
3540 								    tp1->sent,
3541 								    SCTP_FR_LOG_STRIKE_CHUNK);
3542 							}
3543 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3544 								tp1->sent++;
3545 							}
3546 						}
3547 					}
3548 				}
3549 			}
3550 			/*
3551 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3552 			 * algo covers HTNA.
3553 			 */
3554 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3555 		    biggest_tsn_newly_acked)) {
3556 			/*
3557 			 * We don't strike these: This is the  HTNA
3558 			 * algorithm i.e. we don't strike If our TSN is
3559 			 * larger than the Highest TSN Newly Acked.
3560 			 */
3561 			;
3562 		} else {
3563 			/* Strike the TSN */
3564 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3565 				sctp_log_fr(biggest_tsn_newly_acked,
3566 				    tp1->rec.data.tsn,
3567 				    tp1->sent,
3568 				    SCTP_FR_LOG_STRIKE_CHUNK);
3569 			}
3570 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3571 				tp1->sent++;
3572 			}
3573 			if ((asoc->sctp_cmt_on_off > 0) &&
3574 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3575 				/*
3576 				 * CMT DAC algorithm: If SACK flag is set to
3577 				 * 0, then lowest_newack test will not pass
3578 				 * because it would have been set to the
3579 				 * cumack earlier. If not already to be
3580 				 * rtx'd, If not a mixed sack and if tp1 is
3581 				 * not between two sacked TSNs, then mark by
3582 				 * one more. NOTE that we are marking by one
3583 				 * additional time since the SACK DAC flag
3584 				 * indicates that two packets have been
3585 				 * received after this missing TSN.
3586 				 */
3587 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3588 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3589 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3590 						sctp_log_fr(48 + num_dests_sacked,
3591 						    tp1->rec.data.tsn,
3592 						    tp1->sent,
3593 						    SCTP_FR_LOG_STRIKE_CHUNK);
3594 					}
3595 					tp1->sent++;
3596 				}
3597 			}
3598 		}
3599 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3600 			struct sctp_nets *alt;
3601 
3602 			/* fix counts and things */
3603 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3604 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3605 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3606 				    tp1->book_size,
3607 				    (uint32_t)(uintptr_t)tp1->whoTo,
3608 				    tp1->rec.data.tsn);
3609 			}
3610 			if (tp1->whoTo) {
3611 				tp1->whoTo->net_ack++;
3612 				sctp_flight_size_decrease(tp1);
3613 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3614 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3615 					    tp1);
3616 				}
3617 			}
3618 
3619 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3620 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3621 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3622 			}
3623 			/* add back to the rwnd */
3624 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3625 
3626 			/* remove from the total flight */
3627 			sctp_total_flight_decrease(stcb, tp1);
3628 
3629 			if ((stcb->asoc.prsctp_supported) &&
3630 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3631 				/*
3632 				 * Has it been retransmitted tv_sec times? -
3633 				 * we store the retran count there.
3634 				 */
3635 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3636 					/* Yes, so drop it */
3637 					if (tp1->data != NULL) {
3638 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3639 						    SCTP_SO_NOT_LOCKED);
3640 					}
3641 					/* Make sure to flag we had a FR */
3642 					if (tp1->whoTo != NULL) {
3643 						tp1->whoTo->net_ack++;
3644 					}
3645 					continue;
3646 				}
3647 			}
3648 			/*
3649 			 * SCTP_PRINTF("OK, we are now ready to FR this
3650 			 * guy\n");
3651 			 */
3652 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3653 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3654 				    0, SCTP_FR_MARKED);
3655 			}
3656 			if (strike_flag) {
3657 				/* This is a subsequent FR */
3658 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3659 			}
3660 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3661 			if (asoc->sctp_cmt_on_off > 0) {
3662 				/*
3663 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3664 				 * If CMT is being used, then pick dest with
3665 				 * largest ssthresh for any retransmission.
3666 				 */
3667 				tp1->no_fr_allowed = 1;
3668 				alt = tp1->whoTo;
3669 				/* sa_ignore NO_NULL_CHK */
3670 				if (asoc->sctp_cmt_pf > 0) {
3671 					/*
3672 					 * JRS 5/18/07 - If CMT PF is on,
3673 					 * use the PF version of
3674 					 * find_alt_net()
3675 					 */
3676 					alt = sctp_find_alternate_net(stcb, alt, 2);
3677 				} else {
3678 					/*
3679 					 * JRS 5/18/07 - If only CMT is on,
3680 					 * use the CMT version of
3681 					 * find_alt_net()
3682 					 */
3683 					/* sa_ignore NO_NULL_CHK */
3684 					alt = sctp_find_alternate_net(stcb, alt, 1);
3685 				}
3686 				if (alt == NULL) {
3687 					alt = tp1->whoTo;
3688 				}
3689 				/*
3690 				 * CUCv2: If a different dest is picked for
3691 				 * the retransmission, then new
3692 				 * (rtx-)pseudo_cumack needs to be tracked
3693 				 * for orig dest. Let CUCv2 track new (rtx-)
3694 				 * pseudo-cumack always.
3695 				 */
3696 				if (tp1->whoTo) {
3697 					tp1->whoTo->find_pseudo_cumack = 1;
3698 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3699 				}
3700 
3701 			} else {	/* CMT is OFF */
3702 
3703 #ifdef SCTP_FR_TO_ALTERNATE
3704 				/* Can we find an alternate? */
3705 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3706 #else
3707 				/*
3708 				 * default behavior is to NOT retransmit
3709 				 * FR's to an alternate. Armando Caro's
3710 				 * paper details why.
3711 				 */
3712 				alt = tp1->whoTo;
3713 #endif
3714 			}
3715 
3716 			tp1->rec.data.doing_fast_retransmit = 1;
3717 			tot_retrans++;
3718 			/* mark the sending seq for possible subsequent FR's */
3719 			/*
3720 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3721 			 * (uint32_t)tpi->rec.data.tsn);
3722 			 */
3723 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3724 				/*
3725 				 * If the queue of send is empty then its
3726 				 * the next sequence number that will be
3727 				 * assigned so we subtract one from this to
3728 				 * get the one we last sent.
3729 				 */
3730 				tp1->rec.data.fast_retran_tsn = sending_seq;
3731 			} else {
3732 				/*
3733 				 * If there are chunks on the send queue
3734 				 * (unsent data that has made it from the
3735 				 * stream queues but not out the door, we
3736 				 * take the first one (which will have the
3737 				 * lowest TSN) and subtract one to get the
3738 				 * one we last sent.
3739 				 */
3740 				struct sctp_tmit_chunk *ttt;
3741 
3742 				ttt = TAILQ_FIRST(&asoc->send_queue);
3743 				tp1->rec.data.fast_retran_tsn =
3744 				    ttt->rec.data.tsn;
3745 			}
3746 
3747 			if (tp1->do_rtt) {
3748 				/*
3749 				 * this guy had a RTO calculation pending on
3750 				 * it, cancel it
3751 				 */
3752 				if ((tp1->whoTo != NULL) &&
3753 				    (tp1->whoTo->rto_needed == 0)) {
3754 					tp1->whoTo->rto_needed = 1;
3755 				}
3756 				tp1->do_rtt = 0;
3757 			}
3758 			if (alt != tp1->whoTo) {
3759 				/* yes, there is an alternate. */
3760 				sctp_free_remote_addr(tp1->whoTo);
3761 				/* sa_ignore FREED_MEMORY */
3762 				tp1->whoTo = alt;
3763 				atomic_add_int(&alt->ref_count, 1);
3764 			}
3765 		}
3766 	}
3767 }
3768 
3769 struct sctp_tmit_chunk *
3770 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3771     struct sctp_association *asoc)
3772 {
3773 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3774 	struct timeval now;
3775 	int now_filled = 0;
3776 
3777 	if (asoc->prsctp_supported == 0) {
3778 		return (NULL);
3779 	}
3780 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3781 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3782 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3783 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3784 			/* no chance to advance, out of here */
3785 			break;
3786 		}
3787 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3788 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3789 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3790 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3791 				    asoc->advanced_peer_ack_point,
3792 				    tp1->rec.data.tsn, 0, 0);
3793 			}
3794 		}
3795 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3796 			/*
3797 			 * We can't fwd-tsn past any that are reliable aka
3798 			 * retransmitted until the asoc fails.
3799 			 */
3800 			break;
3801 		}
3802 		if (!now_filled) {
3803 			(void)SCTP_GETTIME_TIMEVAL(&now);
3804 			now_filled = 1;
3805 		}
3806 		/*
3807 		 * now we got a chunk which is marked for another
3808 		 * retransmission to a PR-stream but has run out its chances
3809 		 * already maybe OR has been marked to skip now. Can we skip
3810 		 * it if its a resend?
3811 		 */
3812 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3813 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3814 			/*
3815 			 * Now is this one marked for resend and its time is
3816 			 * now up?
3817 			 */
3818 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3819 				/* Yes so drop it */
3820 				if (tp1->data) {
3821 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3822 					    1, SCTP_SO_NOT_LOCKED);
3823 				}
3824 			} else {
3825 				/*
3826 				 * No, we are done when hit one for resend
3827 				 * whos time as not expired.
3828 				 */
3829 				break;
3830 			}
3831 		}
3832 		/*
3833 		 * Ok now if this chunk is marked to drop it we can clean up
3834 		 * the chunk, advance our peer ack point and we can check
3835 		 * the next chunk.
3836 		 */
3837 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3838 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3839 			/* advance PeerAckPoint goes forward */
3840 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3841 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3842 				a_adv = tp1;
3843 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3844 				/* No update but we do save the chk */
3845 				a_adv = tp1;
3846 			}
3847 		} else {
3848 			/*
3849 			 * If it is still in RESEND we can advance no
3850 			 * further
3851 			 */
3852 			break;
3853 		}
3854 	}
3855 	return (a_adv);
3856 }
3857 
3858 static int
3859 sctp_fs_audit(struct sctp_association *asoc)
3860 {
3861 	struct sctp_tmit_chunk *chk;
3862 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3863 	int ret;
3864 #ifndef INVARIANTS
3865 	int entry_flight, entry_cnt;
3866 #endif
3867 
3868 	ret = 0;
3869 #ifndef INVARIANTS
3870 	entry_flight = asoc->total_flight;
3871 	entry_cnt = asoc->total_flight_count;
3872 #endif
3873 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3874 		return (0);
3875 
3876 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3877 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3878 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3879 			    chk->rec.data.tsn,
3880 			    chk->send_size,
3881 			    chk->snd_count);
3882 			inflight++;
3883 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3884 			resend++;
3885 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3886 			inbetween++;
3887 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3888 			above++;
3889 		} else {
3890 			acked++;
3891 		}
3892 	}
3893 
3894 	if ((inflight > 0) || (inbetween > 0)) {
3895 #ifdef INVARIANTS
3896 		panic("Flight size-express incorrect? \n");
3897 #else
3898 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3899 		    entry_flight, entry_cnt);
3900 
3901 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3902 		    inflight, inbetween, resend, above, acked);
3903 		ret = 1;
3904 #endif
3905 	}
3906 	return (ret);
3907 }
3908 
3909 
3910 static void
3911 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3912     struct sctp_association *asoc,
3913     struct sctp_tmit_chunk *tp1)
3914 {
3915 	tp1->window_probe = 0;
3916 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3917 		/* TSN's skipped we do NOT move back. */
3918 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3919 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3920 		    tp1->book_size,
3921 		    (uint32_t)(uintptr_t)tp1->whoTo,
3922 		    tp1->rec.data.tsn);
3923 		return;
3924 	}
3925 	/* First setup this by shrinking flight */
3926 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3927 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3928 		    tp1);
3929 	}
3930 	sctp_flight_size_decrease(tp1);
3931 	sctp_total_flight_decrease(stcb, tp1);
3932 	/* Now mark for resend */
3933 	tp1->sent = SCTP_DATAGRAM_RESEND;
3934 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3935 
3936 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3937 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3938 		    tp1->whoTo->flight_size,
3939 		    tp1->book_size,
3940 		    (uint32_t)(uintptr_t)tp1->whoTo,
3941 		    tp1->rec.data.tsn);
3942 	}
3943 }
3944 
3945 void
3946 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3947     uint32_t rwnd, int *abort_now, int ecne_seen)
3948 {
3949 	struct sctp_nets *net;
3950 	struct sctp_association *asoc;
3951 	struct sctp_tmit_chunk *tp1, *tp2;
3952 	uint32_t old_rwnd;
3953 	int win_probe_recovery = 0;
3954 	int win_probe_recovered = 0;
3955 	int j, done_once = 0;
3956 	int rto_ok = 1;
3957 	uint32_t send_s;
3958 
3959 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3960 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3961 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3962 	}
3963 	SCTP_TCB_LOCK_ASSERT(stcb);
3964 #ifdef SCTP_ASOCLOG_OF_TSNS
3965 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3966 	stcb->asoc.cumack_log_at++;
3967 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3968 		stcb->asoc.cumack_log_at = 0;
3969 	}
3970 #endif
3971 	asoc = &stcb->asoc;
3972 	old_rwnd = asoc->peers_rwnd;
3973 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3974 		/* old ack */
3975 		return;
3976 	} else if (asoc->last_acked_seq == cumack) {
3977 		/* Window update sack */
3978 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3979 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3980 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3981 			/* SWS sender side engages */
3982 			asoc->peers_rwnd = 0;
3983 		}
3984 		if (asoc->peers_rwnd > old_rwnd) {
3985 			goto again;
3986 		}
3987 		return;
3988 	}
3989 
3990 	/* First setup for CC stuff */
3991 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3992 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3993 			/* Drag along the window_tsn for cwr's */
3994 			net->cwr_window_tsn = cumack;
3995 		}
3996 		net->prev_cwnd = net->cwnd;
3997 		net->net_ack = 0;
3998 		net->net_ack2 = 0;
3999 
4000 		/*
4001 		 * CMT: Reset CUC and Fast recovery algo variables before
4002 		 * SACK processing
4003 		 */
4004 		net->new_pseudo_cumack = 0;
4005 		net->will_exit_fast_recovery = 0;
4006 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4007 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4008 		}
4009 	}
4010 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4011 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4012 		    sctpchunk_listhead);
4013 		send_s = tp1->rec.data.tsn + 1;
4014 	} else {
4015 		send_s = asoc->sending_seq;
4016 	}
4017 	if (SCTP_TSN_GE(cumack, send_s)) {
4018 		struct mbuf *op_err;
4019 		char msg[SCTP_DIAG_INFO_LEN];
4020 
4021 		*abort_now = 1;
4022 		/* XXX */
4023 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4024 		    cumack, send_s);
4025 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4026 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4027 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4028 		return;
4029 	}
4030 	asoc->this_sack_highest_gap = cumack;
4031 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4032 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4033 		    stcb->asoc.overall_error_count,
4034 		    0,
4035 		    SCTP_FROM_SCTP_INDATA,
4036 		    __LINE__);
4037 	}
4038 	stcb->asoc.overall_error_count = 0;
4039 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4040 		/* process the new consecutive TSN first */
4041 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4042 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4043 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4044 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4045 				}
4046 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4047 					/*
4048 					 * If it is less than ACKED, it is
4049 					 * now no-longer in flight. Higher
4050 					 * values may occur during marking
4051 					 */
4052 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4053 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4054 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4055 							    tp1->whoTo->flight_size,
4056 							    tp1->book_size,
4057 							    (uint32_t)(uintptr_t)tp1->whoTo,
4058 							    tp1->rec.data.tsn);
4059 						}
4060 						sctp_flight_size_decrease(tp1);
4061 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4062 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4063 							    tp1);
4064 						}
4065 						/* sa_ignore NO_NULL_CHK */
4066 						sctp_total_flight_decrease(stcb, tp1);
4067 					}
4068 					tp1->whoTo->net_ack += tp1->send_size;
4069 					if (tp1->snd_count < 2) {
4070 						/*
4071 						 * True non-retransmitted
4072 						 * chunk
4073 						 */
4074 						tp1->whoTo->net_ack2 +=
4075 						    tp1->send_size;
4076 
4077 						/* update RTO too? */
4078 						if (tp1->do_rtt) {
4079 							if (rto_ok) {
4080 								tp1->whoTo->RTO =
4081 								/*
4082 								 * sa_ignore
4083 								 * NO_NULL_CHK
4084 								 */
4085 								    sctp_calculate_rto(stcb,
4086 								    asoc, tp1->whoTo,
4087 								    &tp1->sent_rcv_time,
4088 								    SCTP_RTT_FROM_DATA);
4089 								rto_ok = 0;
4090 							}
4091 							if (tp1->whoTo->rto_needed == 0) {
4092 								tp1->whoTo->rto_needed = 1;
4093 							}
4094 							tp1->do_rtt = 0;
4095 						}
4096 					}
4097 					/*
4098 					 * CMT: CUCv2 algorithm. From the
4099 					 * cumack'd TSNs, for each TSN being
4100 					 * acked for the first time, set the
4101 					 * following variables for the
4102 					 * corresp destination.
4103 					 * new_pseudo_cumack will trigger a
4104 					 * cwnd update.
4105 					 * find_(rtx_)pseudo_cumack will
4106 					 * trigger search for the next
4107 					 * expected (rtx-)pseudo-cumack.
4108 					 */
4109 					tp1->whoTo->new_pseudo_cumack = 1;
4110 					tp1->whoTo->find_pseudo_cumack = 1;
4111 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4112 
4113 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4114 						/* sa_ignore NO_NULL_CHK */
4115 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4116 					}
4117 				}
4118 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4119 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4120 				}
4121 				if (tp1->rec.data.chunk_was_revoked) {
4122 					/* deflate the cwnd */
4123 					tp1->whoTo->cwnd -= tp1->book_size;
4124 					tp1->rec.data.chunk_was_revoked = 0;
4125 				}
4126 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4127 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4128 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4129 #ifdef INVARIANTS
4130 					} else {
4131 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4132 #endif
4133 					}
4134 				}
4135 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4136 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4137 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4138 					asoc->trigger_reset = 1;
4139 				}
4140 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4141 				if (tp1->data) {
4142 					/* sa_ignore NO_NULL_CHK */
4143 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4144 					sctp_m_freem(tp1->data);
4145 					tp1->data = NULL;
4146 				}
4147 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4148 					sctp_log_sack(asoc->last_acked_seq,
4149 					    cumack,
4150 					    tp1->rec.data.tsn,
4151 					    0,
4152 					    0,
4153 					    SCTP_LOG_FREE_SENT);
4154 				}
4155 				asoc->sent_queue_cnt--;
4156 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4157 			} else {
4158 				break;
4159 			}
4160 		}
4161 
4162 	}
4163 	/* sa_ignore NO_NULL_CHK */
4164 	if (stcb->sctp_socket) {
4165 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4166 		struct socket *so;
4167 
4168 #endif
4169 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4170 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 			/* sa_ignore NO_NULL_CHK */
4172 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4173 		}
4174 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4175 		so = SCTP_INP_SO(stcb->sctp_ep);
4176 		atomic_add_int(&stcb->asoc.refcnt, 1);
4177 		SCTP_TCB_UNLOCK(stcb);
4178 		SCTP_SOCKET_LOCK(so, 1);
4179 		SCTP_TCB_LOCK(stcb);
4180 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4181 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4182 			/* assoc was freed while we were unlocked */
4183 			SCTP_SOCKET_UNLOCK(so, 1);
4184 			return;
4185 		}
4186 #endif
4187 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4189 		SCTP_SOCKET_UNLOCK(so, 1);
4190 #endif
4191 	} else {
4192 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4193 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4194 		}
4195 	}
4196 
4197 	/* JRS - Use the congestion control given in the CC module */
4198 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4199 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4200 			if (net->net_ack2 > 0) {
4201 				/*
4202 				 * Karn's rule applies to clearing error
4203 				 * count, this is optional.
4204 				 */
4205 				net->error_count = 0;
4206 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4207 					/* addr came good */
4208 					net->dest_state |= SCTP_ADDR_REACHABLE;
4209 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4210 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4211 				}
4212 				if (net == stcb->asoc.primary_destination) {
4213 					if (stcb->asoc.alternate) {
4214 						/*
4215 						 * release the alternate,
4216 						 * primary is good
4217 						 */
4218 						sctp_free_remote_addr(stcb->asoc.alternate);
4219 						stcb->asoc.alternate = NULL;
4220 					}
4221 				}
4222 				if (net->dest_state & SCTP_ADDR_PF) {
4223 					net->dest_state &= ~SCTP_ADDR_PF;
4224 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4225 					    stcb->sctp_ep, stcb, net,
4226 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4227 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4228 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4229 					/* Done with this net */
4230 					net->net_ack = 0;
4231 				}
4232 				/* restore any doubled timers */
4233 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4234 				if (net->RTO < stcb->asoc.minrto) {
4235 					net->RTO = stcb->asoc.minrto;
4236 				}
4237 				if (net->RTO > stcb->asoc.maxrto) {
4238 					net->RTO = stcb->asoc.maxrto;
4239 				}
4240 			}
4241 		}
4242 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4243 	}
4244 	asoc->last_acked_seq = cumack;
4245 
4246 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4247 		/* nothing left in-flight */
4248 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4249 			net->flight_size = 0;
4250 			net->partial_bytes_acked = 0;
4251 		}
4252 		asoc->total_flight = 0;
4253 		asoc->total_flight_count = 0;
4254 	}
4255 
4256 	/* RWND update */
4257 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4258 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4259 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4260 		/* SWS sender side engages */
4261 		asoc->peers_rwnd = 0;
4262 	}
4263 	if (asoc->peers_rwnd > old_rwnd) {
4264 		win_probe_recovery = 1;
4265 	}
4266 	/* Now assure a timer where data is queued at */
4267 again:
4268 	j = 0;
4269 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4270 		if (win_probe_recovery && (net->window_probe)) {
4271 			win_probe_recovered = 1;
4272 			/*
4273 			 * Find first chunk that was used with window probe
4274 			 * and clear the sent
4275 			 */
4276 			/* sa_ignore FREED_MEMORY */
4277 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4278 				if (tp1->window_probe) {
4279 					/* move back to data send queue */
4280 					sctp_window_probe_recovery(stcb, asoc, tp1);
4281 					break;
4282 				}
4283 			}
4284 		}
4285 		if (net->flight_size) {
4286 			j++;
4287 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4288 			if (net->window_probe) {
4289 				net->window_probe = 0;
4290 			}
4291 		} else {
4292 			if (net->window_probe) {
4293 				/*
4294 				 * In window probes we must assure a timer
4295 				 * is still running there
4296 				 */
4297 				net->window_probe = 0;
4298 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4299 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4300 				}
4301 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4302 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4303 				    stcb, net,
4304 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4305 			}
4306 		}
4307 	}
4308 	if ((j == 0) &&
4309 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4310 	    (asoc->sent_queue_retran_cnt == 0) &&
4311 	    (win_probe_recovered == 0) &&
4312 	    (done_once == 0)) {
4313 		/*
4314 		 * huh, this should not happen unless all packets are
4315 		 * PR-SCTP and marked to skip of course.
4316 		 */
4317 		if (sctp_fs_audit(asoc)) {
4318 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4319 				net->flight_size = 0;
4320 			}
4321 			asoc->total_flight = 0;
4322 			asoc->total_flight_count = 0;
4323 			asoc->sent_queue_retran_cnt = 0;
4324 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4325 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4326 					sctp_flight_size_increase(tp1);
4327 					sctp_total_flight_increase(stcb, tp1);
4328 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4329 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4330 				}
4331 			}
4332 		}
4333 		done_once = 1;
4334 		goto again;
4335 	}
4336 	/**********************************/
4337 	/* Now what about shutdown issues */
4338 	/**********************************/
4339 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4340 		/* nothing left on sendqueue.. consider done */
4341 		/* clean up */
4342 		if ((asoc->stream_queue_cnt == 1) &&
4343 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4344 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4345 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4346 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4347 		}
4348 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4349 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4350 		    (asoc->stream_queue_cnt == 1) &&
4351 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4352 			struct mbuf *op_err;
4353 
4354 			*abort_now = 1;
4355 			/* XXX */
4356 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4357 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4358 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4359 			return;
4360 		}
4361 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4362 		    (asoc->stream_queue_cnt == 0)) {
4363 			struct sctp_nets *netp;
4364 
4365 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4366 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4367 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4368 			}
4369 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4370 			sctp_stop_timers_for_shutdown(stcb);
4371 			if (asoc->alternate) {
4372 				netp = asoc->alternate;
4373 			} else {
4374 				netp = asoc->primary_destination;
4375 			}
4376 			sctp_send_shutdown(stcb, netp);
4377 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4378 			    stcb->sctp_ep, stcb, netp);
4379 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4380 			    stcb->sctp_ep, stcb, netp);
4381 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4382 		    (asoc->stream_queue_cnt == 0)) {
4383 			struct sctp_nets *netp;
4384 
4385 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4386 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4387 			sctp_stop_timers_for_shutdown(stcb);
4388 			if (asoc->alternate) {
4389 				netp = asoc->alternate;
4390 			} else {
4391 				netp = asoc->primary_destination;
4392 			}
4393 			sctp_send_shutdown_ack(stcb, netp);
4394 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4395 			    stcb->sctp_ep, stcb, netp);
4396 		}
4397 	}
4398 	/*********************************************/
4399 	/* Here we perform PR-SCTP procedures        */
4400 	/* (section 4.2)                             */
4401 	/*********************************************/
4402 	/* C1. update advancedPeerAckPoint */
4403 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4404 		asoc->advanced_peer_ack_point = cumack;
4405 	}
4406 	/* PR-Sctp issues need to be addressed too */
4407 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4408 		struct sctp_tmit_chunk *lchk;
4409 		uint32_t old_adv_peer_ack_point;
4410 
4411 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4412 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4413 		/* C3. See if we need to send a Fwd-TSN */
4414 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4415 			/*
4416 			 * ISSUE with ECN, see FWD-TSN processing.
4417 			 */
4418 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4419 				send_forward_tsn(stcb, asoc);
4420 			} else if (lchk) {
4421 				/* try to FR fwd-tsn's that get lost too */
4422 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4423 					send_forward_tsn(stcb, asoc);
4424 				}
4425 			}
4426 		}
4427 		if (lchk) {
4428 			/* Assure a timer is up */
4429 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4430 			    stcb->sctp_ep, stcb, lchk->whoTo);
4431 		}
4432 	}
4433 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4434 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4435 		    rwnd,
4436 		    stcb->asoc.peers_rwnd,
4437 		    stcb->asoc.total_flight,
4438 		    stcb->asoc.total_output_queue_size);
4439 	}
4440 }
4441 
4442 void
4443 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4444     struct sctp_tcb *stcb,
4445     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4446     int *abort_now, uint8_t flags,
4447     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4448 {
4449 	struct sctp_association *asoc;
4450 	struct sctp_tmit_chunk *tp1, *tp2;
4451 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4452 	uint16_t wake_him = 0;
4453 	uint32_t send_s = 0;
4454 	long j;
4455 	int accum_moved = 0;
4456 	int will_exit_fast_recovery = 0;
4457 	uint32_t a_rwnd, old_rwnd;
4458 	int win_probe_recovery = 0;
4459 	int win_probe_recovered = 0;
4460 	struct sctp_nets *net = NULL;
4461 	int done_once;
4462 	int rto_ok = 1;
4463 	uint8_t reneged_all = 0;
4464 	uint8_t cmt_dac_flag;
4465 
4466 	/*
4467 	 * we take any chance we can to service our queues since we cannot
4468 	 * get awoken when the socket is read from :<
4469 	 */
4470 	/*
4471 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4472 	 * old sack, if so discard. 2) If there is nothing left in the send
4473 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4474 	 * too, update any rwnd change and verify no timers are running.
4475 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4476 	 * moved process these first and note that it moved. 4) Process any
4477 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4478 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4479 	 * sync up flightsizes and things, stop all timers and also check
4480 	 * for shutdown_pending state. If so then go ahead and send off the
4481 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4482 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4483 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4484 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4485 	 * if in shutdown_recv state.
4486 	 */
4487 	SCTP_TCB_LOCK_ASSERT(stcb);
4488 	/* CMT DAC algo */
4489 	this_sack_lowest_newack = 0;
4490 	SCTP_STAT_INCR(sctps_slowpath_sack);
4491 	last_tsn = cum_ack;
4492 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4493 #ifdef SCTP_ASOCLOG_OF_TSNS
4494 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4495 	stcb->asoc.cumack_log_at++;
4496 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4497 		stcb->asoc.cumack_log_at = 0;
4498 	}
4499 #endif
4500 	a_rwnd = rwnd;
4501 
4502 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4503 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4504 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4505 	}
4506 
4507 	old_rwnd = stcb->asoc.peers_rwnd;
4508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4509 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4510 		    stcb->asoc.overall_error_count,
4511 		    0,
4512 		    SCTP_FROM_SCTP_INDATA,
4513 		    __LINE__);
4514 	}
4515 	stcb->asoc.overall_error_count = 0;
4516 	asoc = &stcb->asoc;
4517 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4518 		sctp_log_sack(asoc->last_acked_seq,
4519 		    cum_ack,
4520 		    0,
4521 		    num_seg,
4522 		    num_dup,
4523 		    SCTP_LOG_NEW_SACK);
4524 	}
4525 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4526 		uint16_t i;
4527 		uint32_t *dupdata, dblock;
4528 
4529 		for (i = 0; i < num_dup; i++) {
4530 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4531 			    sizeof(uint32_t), (uint8_t *)&dblock);
4532 			if (dupdata == NULL) {
4533 				break;
4534 			}
4535 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4536 		}
4537 	}
4538 	/* reality check */
4539 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4540 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4541 		    sctpchunk_listhead);
4542 		send_s = tp1->rec.data.tsn + 1;
4543 	} else {
4544 		tp1 = NULL;
4545 		send_s = asoc->sending_seq;
4546 	}
4547 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4548 		struct mbuf *op_err;
4549 		char msg[SCTP_DIAG_INFO_LEN];
4550 
4551 		/*
4552 		 * no way, we have not even sent this TSN out yet. Peer is
4553 		 * hopelessly messed up with us.
4554 		 */
4555 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4556 		    cum_ack, send_s);
4557 		if (tp1) {
4558 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4559 			    tp1->rec.data.tsn, (void *)tp1);
4560 		}
4561 hopeless_peer:
4562 		*abort_now = 1;
4563 		/* XXX */
4564 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4565 		    cum_ack, send_s);
4566 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4567 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4568 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4569 		return;
4570 	}
4571 	/**********************/
4572 	/* 1) check the range */
4573 	/**********************/
4574 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4575 		/* acking something behind */
4576 		return;
4577 	}
4578 
4579 	/* update the Rwnd of the peer */
4580 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4581 	    TAILQ_EMPTY(&asoc->send_queue) &&
4582 	    (asoc->stream_queue_cnt == 0)) {
4583 		/* nothing left on send/sent and strmq */
4584 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4585 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4586 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4587 		}
4588 		asoc->peers_rwnd = a_rwnd;
4589 		if (asoc->sent_queue_retran_cnt) {
4590 			asoc->sent_queue_retran_cnt = 0;
4591 		}
4592 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4593 			/* SWS sender side engages */
4594 			asoc->peers_rwnd = 0;
4595 		}
4596 		/* stop any timers */
4597 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4598 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4599 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4600 			net->partial_bytes_acked = 0;
4601 			net->flight_size = 0;
4602 		}
4603 		asoc->total_flight = 0;
4604 		asoc->total_flight_count = 0;
4605 		return;
4606 	}
4607 	/*
4608 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4609 	 * things. The total byte count acked is tracked in netAckSz AND
4610 	 * netAck2 is used to track the total bytes acked that are un-
4611 	 * amibguious and were never retransmitted. We track these on a per
4612 	 * destination address basis.
4613 	 */
4614 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4615 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4616 			/* Drag along the window_tsn for cwr's */
4617 			net->cwr_window_tsn = cum_ack;
4618 		}
4619 		net->prev_cwnd = net->cwnd;
4620 		net->net_ack = 0;
4621 		net->net_ack2 = 0;
4622 
4623 		/*
4624 		 * CMT: Reset CUC and Fast recovery algo variables before
4625 		 * SACK processing
4626 		 */
4627 		net->new_pseudo_cumack = 0;
4628 		net->will_exit_fast_recovery = 0;
4629 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4630 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4631 		}
4632 
4633 		/*
4634 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4635 		 * to be greater than the cumack. Also reset saw_newack to 0
4636 		 * for all dests.
4637 		 */
4638 		net->saw_newack = 0;
4639 		net->this_sack_highest_newack = last_tsn;
4640 	}
4641 	/* process the new consecutive TSN first */
4642 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4643 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4644 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4645 				accum_moved = 1;
4646 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4647 					/*
4648 					 * If it is less than ACKED, it is
4649 					 * now no-longer in flight. Higher
4650 					 * values may occur during marking
4651 					 */
4652 					if ((tp1->whoTo->dest_state &
4653 					    SCTP_ADDR_UNCONFIRMED) &&
4654 					    (tp1->snd_count < 2)) {
4655 						/*
4656 						 * If there was no retran
4657 						 * and the address is
4658 						 * un-confirmed and we sent
4659 						 * there and are now
4660 						 * sacked.. its confirmed,
4661 						 * mark it so.
4662 						 */
4663 						tp1->whoTo->dest_state &=
4664 						    ~SCTP_ADDR_UNCONFIRMED;
4665 					}
4666 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4667 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4668 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4669 							    tp1->whoTo->flight_size,
4670 							    tp1->book_size,
4671 							    (uint32_t)(uintptr_t)tp1->whoTo,
4672 							    tp1->rec.data.tsn);
4673 						}
4674 						sctp_flight_size_decrease(tp1);
4675 						sctp_total_flight_decrease(stcb, tp1);
4676 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4677 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4678 							    tp1);
4679 						}
4680 					}
4681 					tp1->whoTo->net_ack += tp1->send_size;
4682 
4683 					/* CMT SFR and DAC algos */
4684 					this_sack_lowest_newack = tp1->rec.data.tsn;
4685 					tp1->whoTo->saw_newack = 1;
4686 
4687 					if (tp1->snd_count < 2) {
4688 						/*
4689 						 * True non-retransmitted
4690 						 * chunk
4691 						 */
4692 						tp1->whoTo->net_ack2 +=
4693 						    tp1->send_size;
4694 
4695 						/* update RTO too? */
4696 						if (tp1->do_rtt) {
4697 							if (rto_ok) {
4698 								tp1->whoTo->RTO =
4699 								    sctp_calculate_rto(stcb,
4700 								    asoc, tp1->whoTo,
4701 								    &tp1->sent_rcv_time,
4702 								    SCTP_RTT_FROM_DATA);
4703 								rto_ok = 0;
4704 							}
4705 							if (tp1->whoTo->rto_needed == 0) {
4706 								tp1->whoTo->rto_needed = 1;
4707 							}
4708 							tp1->do_rtt = 0;
4709 						}
4710 					}
4711 					/*
4712 					 * CMT: CUCv2 algorithm. From the
4713 					 * cumack'd TSNs, for each TSN being
4714 					 * acked for the first time, set the
4715 					 * following variables for the
4716 					 * corresp destination.
4717 					 * new_pseudo_cumack will trigger a
4718 					 * cwnd update.
4719 					 * find_(rtx_)pseudo_cumack will
4720 					 * trigger search for the next
4721 					 * expected (rtx-)pseudo-cumack.
4722 					 */
4723 					tp1->whoTo->new_pseudo_cumack = 1;
4724 					tp1->whoTo->find_pseudo_cumack = 1;
4725 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4726 
4727 
4728 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4729 						sctp_log_sack(asoc->last_acked_seq,
4730 						    cum_ack,
4731 						    tp1->rec.data.tsn,
4732 						    0,
4733 						    0,
4734 						    SCTP_LOG_TSN_ACKED);
4735 					}
4736 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4737 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4738 					}
4739 				}
4740 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4741 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4742 #ifdef SCTP_AUDITING_ENABLED
4743 					sctp_audit_log(0xB3,
4744 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4745 #endif
4746 				}
4747 				if (tp1->rec.data.chunk_was_revoked) {
4748 					/* deflate the cwnd */
4749 					tp1->whoTo->cwnd -= tp1->book_size;
4750 					tp1->rec.data.chunk_was_revoked = 0;
4751 				}
4752 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4753 					tp1->sent = SCTP_DATAGRAM_ACKED;
4754 				}
4755 			}
4756 		} else {
4757 			break;
4758 		}
4759 	}
4760 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4761 	/* always set this up to cum-ack */
4762 	asoc->this_sack_highest_gap = last_tsn;
4763 
4764 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4765 
4766 		/*
4767 		 * thisSackHighestGap will increase while handling NEW
4768 		 * segments this_sack_highest_newack will increase while
4769 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4770 		 * used for CMT DAC algo. saw_newack will also change.
4771 		 */
4772 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4773 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4774 		    num_seg, num_nr_seg, &rto_ok)) {
4775 			wake_him++;
4776 		}
4777 		/*
4778 		 * validate the biggest_tsn_acked in the gap acks if strict
4779 		 * adherence is wanted.
4780 		 */
4781 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4782 			/*
4783 			 * peer is either confused or we are under attack.
4784 			 * We must abort.
4785 			 */
4786 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4787 			    biggest_tsn_acked, send_s);
4788 			goto hopeless_peer;
4789 		}
4790 	}
4791 	/*******************************************/
4792 	/* cancel ALL T3-send timer if accum moved */
4793 	/*******************************************/
4794 	if (asoc->sctp_cmt_on_off > 0) {
4795 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 			if (net->new_pseudo_cumack)
4797 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4798 				    stcb, net,
4799 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4800 
4801 		}
4802 	} else {
4803 		if (accum_moved) {
4804 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4806 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4807 			}
4808 		}
4809 	}
4810 	/********************************************/
4811 	/* drop the acked chunks from the sentqueue */
4812 	/********************************************/
4813 	asoc->last_acked_seq = cum_ack;
4814 
4815 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4816 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4817 			break;
4818 		}
4819 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4820 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4821 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4822 #ifdef INVARIANTS
4823 			} else {
4824 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4825 #endif
4826 			}
4827 		}
4828 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4829 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4830 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4831 			asoc->trigger_reset = 1;
4832 		}
4833 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4834 		if (PR_SCTP_ENABLED(tp1->flags)) {
4835 			if (asoc->pr_sctp_cnt != 0)
4836 				asoc->pr_sctp_cnt--;
4837 		}
4838 		asoc->sent_queue_cnt--;
4839 		if (tp1->data) {
4840 			/* sa_ignore NO_NULL_CHK */
4841 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4842 			sctp_m_freem(tp1->data);
4843 			tp1->data = NULL;
4844 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4845 				asoc->sent_queue_cnt_removeable--;
4846 			}
4847 		}
4848 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4849 			sctp_log_sack(asoc->last_acked_seq,
4850 			    cum_ack,
4851 			    tp1->rec.data.tsn,
4852 			    0,
4853 			    0,
4854 			    SCTP_LOG_FREE_SENT);
4855 		}
4856 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4857 		wake_him++;
4858 	}
4859 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4860 #ifdef INVARIANTS
4861 		panic("Warning flight size is positive and should be 0");
4862 #else
4863 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4864 		    asoc->total_flight);
4865 #endif
4866 		asoc->total_flight = 0;
4867 	}
4868 
4869 	/* sa_ignore NO_NULL_CHK */
4870 	if ((wake_him) && (stcb->sctp_socket)) {
4871 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4872 		struct socket *so;
4873 
4874 #endif
4875 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4876 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4877 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4878 		}
4879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4880 		so = SCTP_INP_SO(stcb->sctp_ep);
4881 		atomic_add_int(&stcb->asoc.refcnt, 1);
4882 		SCTP_TCB_UNLOCK(stcb);
4883 		SCTP_SOCKET_LOCK(so, 1);
4884 		SCTP_TCB_LOCK(stcb);
4885 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4886 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4887 			/* assoc was freed while we were unlocked */
4888 			SCTP_SOCKET_UNLOCK(so, 1);
4889 			return;
4890 		}
4891 #endif
4892 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4894 		SCTP_SOCKET_UNLOCK(so, 1);
4895 #endif
4896 	} else {
4897 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4898 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4899 		}
4900 	}
4901 
4902 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4903 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4904 			/* Setup so we will exit RFC2582 fast recovery */
4905 			will_exit_fast_recovery = 1;
4906 		}
4907 	}
4908 	/*
4909 	 * Check for revoked fragments:
4910 	 *
4911 	 * if Previous sack - Had no frags then we can't have any revoked if
4912 	 * Previous sack - Had frag's then - If we now have frags aka
4913 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4914 	 * some of them. else - The peer revoked all ACKED fragments, since
4915 	 * we had some before and now we have NONE.
4916 	 */
4917 
4918 	if (num_seg) {
4919 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4920 		asoc->saw_sack_with_frags = 1;
4921 	} else if (asoc->saw_sack_with_frags) {
4922 		int cnt_revoked = 0;
4923 
4924 		/* Peer revoked all dg's marked or acked */
4925 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4926 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4927 				tp1->sent = SCTP_DATAGRAM_SENT;
4928 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4929 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4930 					    tp1->whoTo->flight_size,
4931 					    tp1->book_size,
4932 					    (uint32_t)(uintptr_t)tp1->whoTo,
4933 					    tp1->rec.data.tsn);
4934 				}
4935 				sctp_flight_size_increase(tp1);
4936 				sctp_total_flight_increase(stcb, tp1);
4937 				tp1->rec.data.chunk_was_revoked = 1;
4938 				/*
4939 				 * To ensure that this increase in
4940 				 * flightsize, which is artificial, does not
4941 				 * throttle the sender, we also increase the
4942 				 * cwnd artificially.
4943 				 */
4944 				tp1->whoTo->cwnd += tp1->book_size;
4945 				cnt_revoked++;
4946 			}
4947 		}
4948 		if (cnt_revoked) {
4949 			reneged_all = 1;
4950 		}
4951 		asoc->saw_sack_with_frags = 0;
4952 	}
4953 	if (num_nr_seg > 0)
4954 		asoc->saw_sack_with_nr_frags = 1;
4955 	else
4956 		asoc->saw_sack_with_nr_frags = 0;
4957 
4958 	/* JRS - Use the congestion control given in the CC module */
4959 	if (ecne_seen == 0) {
4960 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4961 			if (net->net_ack2 > 0) {
4962 				/*
4963 				 * Karn's rule applies to clearing error
4964 				 * count, this is optional.
4965 				 */
4966 				net->error_count = 0;
4967 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4968 					/* addr came good */
4969 					net->dest_state |= SCTP_ADDR_REACHABLE;
4970 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4971 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4972 				}
4973 
4974 				if (net == stcb->asoc.primary_destination) {
4975 					if (stcb->asoc.alternate) {
4976 						/*
4977 						 * release the alternate,
4978 						 * primary is good
4979 						 */
4980 						sctp_free_remote_addr(stcb->asoc.alternate);
4981 						stcb->asoc.alternate = NULL;
4982 					}
4983 				}
4984 
4985 				if (net->dest_state & SCTP_ADDR_PF) {
4986 					net->dest_state &= ~SCTP_ADDR_PF;
4987 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4988 					    stcb->sctp_ep, stcb, net,
4989 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4990 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4991 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4992 					/* Done with this net */
4993 					net->net_ack = 0;
4994 				}
4995 				/* restore any doubled timers */
4996 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4997 				if (net->RTO < stcb->asoc.minrto) {
4998 					net->RTO = stcb->asoc.minrto;
4999 				}
5000 				if (net->RTO > stcb->asoc.maxrto) {
5001 					net->RTO = stcb->asoc.maxrto;
5002 				}
5003 			}
5004 		}
5005 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5006 	}
5007 
5008 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5009 		/* nothing left in-flight */
5010 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5011 			/* stop all timers */
5012 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5013 			    stcb, net,
5014 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5015 			net->flight_size = 0;
5016 			net->partial_bytes_acked = 0;
5017 		}
5018 		asoc->total_flight = 0;
5019 		asoc->total_flight_count = 0;
5020 	}
5021 
5022 	/**********************************/
5023 	/* Now what about shutdown issues */
5024 	/**********************************/
5025 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5026 		/* nothing left on sendqueue.. consider done */
5027 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5028 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5029 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5030 		}
5031 		asoc->peers_rwnd = a_rwnd;
5032 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5033 			/* SWS sender side engages */
5034 			asoc->peers_rwnd = 0;
5035 		}
5036 		/* clean up */
5037 		if ((asoc->stream_queue_cnt == 1) &&
5038 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5039 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5040 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5041 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5042 		}
5043 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5044 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5045 		    (asoc->stream_queue_cnt == 1) &&
5046 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5047 			struct mbuf *op_err;
5048 
5049 			*abort_now = 1;
5050 			/* XXX */
5051 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5052 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5053 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5054 			return;
5055 		}
5056 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5057 		    (asoc->stream_queue_cnt == 0)) {
5058 			struct sctp_nets *netp;
5059 
5060 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5061 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5062 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5063 			}
5064 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5065 			sctp_stop_timers_for_shutdown(stcb);
5066 			if (asoc->alternate) {
5067 				netp = asoc->alternate;
5068 			} else {
5069 				netp = asoc->primary_destination;
5070 			}
5071 			sctp_send_shutdown(stcb, netp);
5072 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5073 			    stcb->sctp_ep, stcb, netp);
5074 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5075 			    stcb->sctp_ep, stcb, netp);
5076 			return;
5077 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5078 		    (asoc->stream_queue_cnt == 0)) {
5079 			struct sctp_nets *netp;
5080 
5081 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5082 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5083 			sctp_stop_timers_for_shutdown(stcb);
5084 			if (asoc->alternate) {
5085 				netp = asoc->alternate;
5086 			} else {
5087 				netp = asoc->primary_destination;
5088 			}
5089 			sctp_send_shutdown_ack(stcb, netp);
5090 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5091 			    stcb->sctp_ep, stcb, netp);
5092 			return;
5093 		}
5094 	}
5095 	/*
5096 	 * Now here we are going to recycle net_ack for a different use...
5097 	 * HEADS UP.
5098 	 */
5099 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5100 		net->net_ack = 0;
5101 	}
5102 
5103 	/*
5104 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5105 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5106 	 * automatically ensure that.
5107 	 */
5108 	if ((asoc->sctp_cmt_on_off > 0) &&
5109 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5110 	    (cmt_dac_flag == 0)) {
5111 		this_sack_lowest_newack = cum_ack;
5112 	}
5113 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5114 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5115 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5116 	}
5117 	/* JRS - Use the congestion control given in the CC module */
5118 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5119 
5120 	/* Now are we exiting loss recovery ? */
5121 	if (will_exit_fast_recovery) {
5122 		/* Ok, we must exit fast recovery */
5123 		asoc->fast_retran_loss_recovery = 0;
5124 	}
5125 	if ((asoc->sat_t3_loss_recovery) &&
5126 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5127 		/* end satellite t3 loss recovery */
5128 		asoc->sat_t3_loss_recovery = 0;
5129 	}
5130 	/*
5131 	 * CMT Fast recovery
5132 	 */
5133 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5134 		if (net->will_exit_fast_recovery) {
5135 			/* Ok, we must exit fast recovery */
5136 			net->fast_retran_loss_recovery = 0;
5137 		}
5138 	}
5139 
5140 	/* Adjust and set the new rwnd value */
5141 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5142 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5143 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5144 	}
5145 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5146 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5147 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5148 		/* SWS sender side engages */
5149 		asoc->peers_rwnd = 0;
5150 	}
5151 	if (asoc->peers_rwnd > old_rwnd) {
5152 		win_probe_recovery = 1;
5153 	}
5154 
5155 	/*
5156 	 * Now we must setup so we have a timer up for anyone with
5157 	 * outstanding data.
5158 	 */
5159 	done_once = 0;
5160 again:
5161 	j = 0;
5162 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5163 		if (win_probe_recovery && (net->window_probe)) {
5164 			win_probe_recovered = 1;
5165 			/*-
5166 			 * Find first chunk that was used with
5167 			 * window probe and clear the event. Put
5168 			 * it back into the send queue as if has
5169 			 * not been sent.
5170 			 */
5171 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5172 				if (tp1->window_probe) {
5173 					sctp_window_probe_recovery(stcb, asoc, tp1);
5174 					break;
5175 				}
5176 			}
5177 		}
5178 		if (net->flight_size) {
5179 			j++;
5180 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5181 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5182 				    stcb->sctp_ep, stcb, net);
5183 			}
5184 			if (net->window_probe) {
5185 				net->window_probe = 0;
5186 			}
5187 		} else {
5188 			if (net->window_probe) {
5189 				/*
5190 				 * In window probes we must assure a timer
5191 				 * is still running there
5192 				 */
5193 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5194 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5195 					    stcb->sctp_ep, stcb, net);
5196 
5197 				}
5198 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5199 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5200 				    stcb, net,
5201 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5202 			}
5203 		}
5204 	}
5205 	if ((j == 0) &&
5206 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5207 	    (asoc->sent_queue_retran_cnt == 0) &&
5208 	    (win_probe_recovered == 0) &&
5209 	    (done_once == 0)) {
5210 		/*
5211 		 * huh, this should not happen unless all packets are
5212 		 * PR-SCTP and marked to skip of course.
5213 		 */
5214 		if (sctp_fs_audit(asoc)) {
5215 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5216 				net->flight_size = 0;
5217 			}
5218 			asoc->total_flight = 0;
5219 			asoc->total_flight_count = 0;
5220 			asoc->sent_queue_retran_cnt = 0;
5221 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5222 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5223 					sctp_flight_size_increase(tp1);
5224 					sctp_total_flight_increase(stcb, tp1);
5225 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5226 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5227 				}
5228 			}
5229 		}
5230 		done_once = 1;
5231 		goto again;
5232 	}
5233 	/*********************************************/
5234 	/* Here we perform PR-SCTP procedures        */
5235 	/* (section 4.2)                             */
5236 	/*********************************************/
5237 	/* C1. update advancedPeerAckPoint */
5238 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5239 		asoc->advanced_peer_ack_point = cum_ack;
5240 	}
5241 	/* C2. try to further move advancedPeerAckPoint ahead */
5242 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5243 		struct sctp_tmit_chunk *lchk;
5244 		uint32_t old_adv_peer_ack_point;
5245 
5246 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5247 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5248 		/* C3. See if we need to send a Fwd-TSN */
5249 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5250 			/*
5251 			 * ISSUE with ECN, see FWD-TSN processing.
5252 			 */
5253 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5254 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5255 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5256 				    old_adv_peer_ack_point);
5257 			}
5258 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5259 				send_forward_tsn(stcb, asoc);
5260 			} else if (lchk) {
5261 				/* try to FR fwd-tsn's that get lost too */
5262 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5263 					send_forward_tsn(stcb, asoc);
5264 				}
5265 			}
5266 		}
5267 		if (lchk) {
5268 			/* Assure a timer is up */
5269 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5270 			    stcb->sctp_ep, stcb, lchk->whoTo);
5271 		}
5272 	}
5273 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5274 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5275 		    a_rwnd,
5276 		    stcb->asoc.peers_rwnd,
5277 		    stcb->asoc.total_flight,
5278 		    stcb->asoc.total_output_queue_size);
5279 	}
5280 }
5281 
5282 void
5283 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5284 {
5285 	/* Copy cum-ack */
5286 	uint32_t cum_ack, a_rwnd;
5287 
5288 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5289 	/* Arrange so a_rwnd does NOT change */
5290 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5291 
5292 	/* Now call the express sack handling */
5293 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5294 }
5295 
5296 static void
5297 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5298     struct sctp_stream_in *strmin)
5299 {
5300 	struct sctp_queued_to_read *control, *ncontrol;
5301 	struct sctp_association *asoc;
5302 	uint32_t mid;
5303 	int need_reasm_check = 0;
5304 
5305 	asoc = &stcb->asoc;
5306 	mid = strmin->last_mid_delivered;
5307 	/*
5308 	 * First deliver anything prior to and including the stream no that
5309 	 * came in.
5310 	 */
5311 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5312 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5313 			/* this is deliverable now */
5314 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5315 				if (control->on_strm_q) {
5316 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5317 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5318 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5319 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5320 #ifdef INVARIANTS
5321 					} else {
5322 						panic("strmin: %p ctl: %p unknown %d",
5323 						    strmin, control, control->on_strm_q);
5324 #endif
5325 					}
5326 					control->on_strm_q = 0;
5327 				}
5328 				/* subtract pending on streams */
5329 				if (asoc->size_on_all_streams >= control->length) {
5330 					asoc->size_on_all_streams -= control->length;
5331 				} else {
5332 #ifdef INVARIANTS
5333 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5334 #else
5335 					asoc->size_on_all_streams = 0;
5336 #endif
5337 				}
5338 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5339 				/* deliver it to at least the delivery-q */
5340 				if (stcb->sctp_socket) {
5341 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5342 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5343 					    control,
5344 					    &stcb->sctp_socket->so_rcv,
5345 					    1, SCTP_READ_LOCK_HELD,
5346 					    SCTP_SO_NOT_LOCKED);
5347 				}
5348 			} else {
5349 				/* Its a fragmented message */
5350 				if (control->first_frag_seen) {
5351 					/*
5352 					 * Make it so this is next to
5353 					 * deliver, we restore later
5354 					 */
5355 					strmin->last_mid_delivered = control->mid - 1;
5356 					need_reasm_check = 1;
5357 					break;
5358 				}
5359 			}
5360 		} else {
5361 			/* no more delivery now. */
5362 			break;
5363 		}
5364 	}
5365 	if (need_reasm_check) {
5366 		int ret;
5367 
5368 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5369 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5370 			/* Restore the next to deliver unless we are ahead */
5371 			strmin->last_mid_delivered = mid;
5372 		}
5373 		if (ret == 0) {
5374 			/* Left the front Partial one on */
5375 			return;
5376 		}
5377 		need_reasm_check = 0;
5378 	}
5379 	/*
5380 	 * now we must deliver things in queue the normal way  if any are
5381 	 * now ready.
5382 	 */
5383 	mid = strmin->last_mid_delivered + 1;
5384 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5385 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5386 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5387 				/* this is deliverable now */
5388 				if (control->on_strm_q) {
5389 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5390 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5391 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5392 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5393 #ifdef INVARIANTS
5394 					} else {
5395 						panic("strmin: %p ctl: %p unknown %d",
5396 						    strmin, control, control->on_strm_q);
5397 #endif
5398 					}
5399 					control->on_strm_q = 0;
5400 				}
5401 				/* subtract pending on streams */
5402 				if (asoc->size_on_all_streams >= control->length) {
5403 					asoc->size_on_all_streams -= control->length;
5404 				} else {
5405 #ifdef INVARIANTS
5406 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5407 #else
5408 					asoc->size_on_all_streams = 0;
5409 #endif
5410 				}
5411 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5412 				/* deliver it to at least the delivery-q */
5413 				strmin->last_mid_delivered = control->mid;
5414 				if (stcb->sctp_socket) {
5415 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5416 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5417 					    control,
5418 					    &stcb->sctp_socket->so_rcv, 1,
5419 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5420 
5421 				}
5422 				mid = strmin->last_mid_delivered + 1;
5423 			} else {
5424 				/* Its a fragmented message */
5425 				if (control->first_frag_seen) {
5426 					/*
5427 					 * Make it so this is next to
5428 					 * deliver
5429 					 */
5430 					strmin->last_mid_delivered = control->mid - 1;
5431 					need_reasm_check = 1;
5432 					break;
5433 				}
5434 			}
5435 		} else {
5436 			break;
5437 		}
5438 	}
5439 	if (need_reasm_check) {
5440 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5441 	}
5442 }
5443 
5444 
5445 
5446 static void
5447 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5448     struct sctp_association *asoc,
5449     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5450 {
5451 	struct sctp_queued_to_read *control;
5452 	struct sctp_stream_in *strm;
5453 	struct sctp_tmit_chunk *chk, *nchk;
5454 	int cnt_removed = 0;
5455 
5456 	/*
5457 	 * For now large messages held on the stream reasm that are complete
5458 	 * will be tossed too. We could in theory do more work to spin
5459 	 * through and stop after dumping one msg aka seeing the start of a
5460 	 * new msg at the head, and call the delivery function... to see if
5461 	 * it can be delivered... But for now we just dump everything on the
5462 	 * queue.
5463 	 */
5464 	strm = &asoc->strmin[stream];
5465 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5466 	if (control == NULL) {
5467 		/* Not found */
5468 		return;
5469 	}
5470 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5471 		return;
5472 	}
5473 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5474 		/* Purge hanging chunks */
5475 		if (!asoc->idata_supported && (ordered == 0)) {
5476 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5477 				break;
5478 			}
5479 		}
5480 		cnt_removed++;
5481 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5482 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5483 			asoc->size_on_reasm_queue -= chk->send_size;
5484 		} else {
5485 #ifdef INVARIANTS
5486 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5487 #else
5488 			asoc->size_on_reasm_queue = 0;
5489 #endif
5490 		}
5491 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5492 		if (chk->data) {
5493 			sctp_m_freem(chk->data);
5494 			chk->data = NULL;
5495 		}
5496 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5497 	}
5498 	if (!TAILQ_EMPTY(&control->reasm)) {
5499 		/* This has to be old data, unordered */
5500 		if (control->data) {
5501 			sctp_m_freem(control->data);
5502 			control->data = NULL;
5503 		}
5504 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5505 		chk = TAILQ_FIRST(&control->reasm);
5506 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5507 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5508 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5509 			    chk, SCTP_READ_LOCK_HELD);
5510 		}
5511 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5512 		return;
5513 	}
5514 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5515 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5516 		if (asoc->size_on_all_streams >= control->length) {
5517 			asoc->size_on_all_streams -= control->length;
5518 		} else {
5519 #ifdef INVARIANTS
5520 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5521 #else
5522 			asoc->size_on_all_streams = 0;
5523 #endif
5524 		}
5525 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5526 		control->on_strm_q = 0;
5527 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5528 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5529 		control->on_strm_q = 0;
5530 #ifdef INVARIANTS
5531 	} else if (control->on_strm_q) {
5532 		panic("strm: %p ctl: %p unknown %d",
5533 		    strm, control, control->on_strm_q);
5534 #endif
5535 	}
5536 	control->on_strm_q = 0;
5537 	if (control->on_read_q == 0) {
5538 		sctp_free_remote_addr(control->whoFrom);
5539 		if (control->data) {
5540 			sctp_m_freem(control->data);
5541 			control->data = NULL;
5542 		}
5543 		sctp_free_a_readq(stcb, control);
5544 	}
5545 }
5546 
5547 void
5548 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5549     struct sctp_forward_tsn_chunk *fwd,
5550     int *abort_flag, struct mbuf *m, int offset)
5551 {
5552 	/* The pr-sctp fwd tsn */
5553 	/*
5554 	 * here we will perform all the data receiver side steps for
5555 	 * processing FwdTSN, as required in by pr-sctp draft:
5556 	 *
5557 	 * Assume we get FwdTSN(x):
5558 	 *
5559 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5560 	 * + others we have 3) examine and update re-ordering queue on
5561 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5562 	 * report where we are.
5563 	 */
5564 	struct sctp_association *asoc;
5565 	uint32_t new_cum_tsn, gap;
5566 	unsigned int i, fwd_sz, m_size;
5567 	uint32_t str_seq;
5568 	struct sctp_stream_in *strm;
5569 	struct sctp_queued_to_read *control, *sv;
5570 
5571 	asoc = &stcb->asoc;
5572 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5573 		SCTPDBG(SCTP_DEBUG_INDATA1,
5574 		    "Bad size too small/big fwd-tsn\n");
5575 		return;
5576 	}
5577 	m_size = (stcb->asoc.mapping_array_size << 3);
5578 	/*************************************************************/
5579 	/* 1. Here we update local cumTSN and shift the bitmap array */
5580 	/*************************************************************/
5581 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5582 
5583 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5584 		/* Already got there ... */
5585 		return;
5586 	}
5587 	/*
5588 	 * now we know the new TSN is more advanced, let's find the actual
5589 	 * gap
5590 	 */
5591 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5592 	asoc->cumulative_tsn = new_cum_tsn;
5593 	if (gap >= m_size) {
5594 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5595 			struct mbuf *op_err;
5596 			char msg[SCTP_DIAG_INFO_LEN];
5597 
5598 			/*
5599 			 * out of range (of single byte chunks in the rwnd I
5600 			 * give out). This must be an attacker.
5601 			 */
5602 			*abort_flag = 1;
5603 			snprintf(msg, sizeof(msg),
5604 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5605 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5606 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5607 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5608 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5609 			return;
5610 		}
5611 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5612 
5613 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5614 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5615 		asoc->highest_tsn_inside_map = new_cum_tsn;
5616 
5617 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5618 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5619 
5620 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5621 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5622 		}
5623 	} else {
5624 		SCTP_TCB_LOCK_ASSERT(stcb);
5625 		for (i = 0; i <= gap; i++) {
5626 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5627 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5628 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5629 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5630 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5631 				}
5632 			}
5633 		}
5634 	}
5635 	/*************************************************************/
5636 	/* 2. Clear up re-assembly queue                             */
5637 	/*************************************************************/
5638 
5639 	/* This is now done as part of clearing up the stream/seq */
5640 	if (asoc->idata_supported == 0) {
5641 		uint16_t sid;
5642 
5643 		/* Flush all the un-ordered data based on cum-tsn */
5644 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5645 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5646 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5647 		}
5648 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5649 	}
5650 	/*******************************************************/
5651 	/* 3. Update the PR-stream re-ordering queues and fix  */
5652 	/* delivery issues as needed.                       */
5653 	/*******************************************************/
5654 	fwd_sz -= sizeof(*fwd);
5655 	if (m && fwd_sz) {
5656 		/* New method. */
5657 		unsigned int num_str;
5658 		uint32_t mid, cur_mid;
5659 		uint16_t sid;
5660 		uint16_t ordered, flags;
5661 		struct sctp_strseq *stseq, strseqbuf;
5662 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5663 
5664 		offset += sizeof(*fwd);
5665 
5666 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5667 		if (asoc->idata_supported) {
5668 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5669 		} else {
5670 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5671 		}
5672 		for (i = 0; i < num_str; i++) {
5673 			if (asoc->idata_supported) {
5674 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5675 				    sizeof(struct sctp_strseq_mid),
5676 				    (uint8_t *)&strseqbuf_m);
5677 				offset += sizeof(struct sctp_strseq_mid);
5678 				if (stseq_m == NULL) {
5679 					break;
5680 				}
5681 				sid = ntohs(stseq_m->sid);
5682 				mid = ntohl(stseq_m->mid);
5683 				flags = ntohs(stseq_m->flags);
5684 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5685 					ordered = 0;
5686 				} else {
5687 					ordered = 1;
5688 				}
5689 			} else {
5690 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5691 				    sizeof(struct sctp_strseq),
5692 				    (uint8_t *)&strseqbuf);
5693 				offset += sizeof(struct sctp_strseq);
5694 				if (stseq == NULL) {
5695 					break;
5696 				}
5697 				sid = ntohs(stseq->sid);
5698 				mid = (uint32_t)ntohs(stseq->ssn);
5699 				ordered = 1;
5700 			}
5701 			/* Convert */
5702 
5703 			/* now process */
5704 
5705 			/*
5706 			 * Ok we now look for the stream/seq on the read
5707 			 * queue where its not all delivered. If we find it
5708 			 * we transmute the read entry into a PDI_ABORTED.
5709 			 */
5710 			if (sid >= asoc->streamincnt) {
5711 				/* screwed up streams, stop!  */
5712 				break;
5713 			}
5714 			if ((asoc->str_of_pdapi == sid) &&
5715 			    (asoc->ssn_of_pdapi == mid)) {
5716 				/*
5717 				 * If this is the one we were partially
5718 				 * delivering now then we no longer are.
5719 				 * Note this will change with the reassembly
5720 				 * re-write.
5721 				 */
5722 				asoc->fragmented_delivery_inprogress = 0;
5723 			}
5724 			strm = &asoc->strmin[sid];
5725 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5726 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5727 			}
5728 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5729 				if ((control->sinfo_stream == sid) &&
5730 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5731 					str_seq = (sid << 16) | (0x0000ffff & mid);
5732 					control->pdapi_aborted = 1;
5733 					sv = stcb->asoc.control_pdapi;
5734 					control->end_added = 1;
5735 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5736 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5737 						if (asoc->size_on_all_streams >= control->length) {
5738 							asoc->size_on_all_streams -= control->length;
5739 						} else {
5740 #ifdef INVARIANTS
5741 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5742 #else
5743 							asoc->size_on_all_streams = 0;
5744 #endif
5745 						}
5746 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5747 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5748 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5749 #ifdef INVARIANTS
5750 					} else if (control->on_strm_q) {
5751 						panic("strm: %p ctl: %p unknown %d",
5752 						    strm, control, control->on_strm_q);
5753 #endif
5754 					}
5755 					control->on_strm_q = 0;
5756 					stcb->asoc.control_pdapi = control;
5757 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5758 					    stcb,
5759 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5760 					    (void *)&str_seq,
5761 					    SCTP_SO_NOT_LOCKED);
5762 					stcb->asoc.control_pdapi = sv;
5763 					break;
5764 				} else if ((control->sinfo_stream == sid) &&
5765 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5766 					/* We are past our victim SSN */
5767 					break;
5768 				}
5769 			}
5770 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5771 				/* Update the sequence number */
5772 				strm->last_mid_delivered = mid;
5773 			}
5774 			/* now kick the stream the new way */
5775 			/* sa_ignore NO_NULL_CHK */
5776 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5777 		}
5778 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5779 	}
5780 	/*
5781 	 * Now slide thing forward.
5782 	 */
5783 	sctp_slide_mapping_arrays(stcb);
5784 }
5785