xref: /freebsd/sys/netinet/sctp_indata.c (revision b13788e396c2b24f88697e7d4a74bab429ef4d0c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		if (snprintf(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid) < 0) {
444 			msg[0] = '\0';
445 		}
446 	} else {
447 		if (snprintf(msg, sizeof(msg),
448 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
449 		    opspot,
450 		    control->fsn_included,
451 		    chk->rec.data.tsn,
452 		    chk->rec.data.sid,
453 		    chk->rec.data.fsn,
454 		    (uint16_t)chk->rec.data.mid) < 0) {
455 			msg[0] = '\0';
456 		}
457 	}
458 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
459 	sctp_m_freem(chk->data);
460 	chk->data = NULL;
461 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
462 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
463 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
464 	*abort_flag = 1;
465 }
466 
467 static void
468 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
469 {
470 	/*
471 	 * The control could not be placed and must be cleaned.
472 	 */
473 	struct sctp_tmit_chunk *chk, *nchk;
474 
475 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
476 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
477 		if (chk->data)
478 			sctp_m_freem(chk->data);
479 		chk->data = NULL;
480 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
481 	}
482 	sctp_free_remote_addr(control->whoFrom);
483 	if (control->data) {
484 		sctp_m_freem(control->data);
485 		control->data = NULL;
486 	}
487 	sctp_free_a_readq(stcb, control);
488 }
489 
490 /*
491  * Queue the chunk either right into the socket buffer if it is the next one
492  * to go OR put it in the correct place in the delivery queue.  If we do
493  * append to the so_buf, keep doing so until we are out of order as
494  * long as the control's entered are non-fragmented.
495  */
496 static void
497 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
498     struct sctp_association *asoc,
499     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
500 {
501 	/*
502 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
503 	 * all the data in one stream this could happen quite rapidly. One
504 	 * could use the TSN to keep track of things, but this scheme breaks
505 	 * down in the other type of stream usage that could occur. Send a
506 	 * single msg to stream 0, send 4Billion messages to stream 1, now
507 	 * send a message to stream 0. You have a situation where the TSN
508 	 * has wrapped but not in the stream. Is this worth worrying about
509 	 * or should we just change our queue sort at the bottom to be by
510 	 * TSN.
511 	 *
512 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
513 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
514 	 * assignment this could happen... and I don't see how this would be
515 	 * a violation. So for now I am undecided an will leave the sort by
516 	 * SSN alone. Maybe a hybred approach is the answer
517 	 *
518 	 */
519 	struct sctp_queued_to_read *at;
520 	int queue_needed;
521 	uint32_t nxt_todel;
522 	struct mbuf *op_err;
523 	struct sctp_stream_in *strm;
524 	char msg[SCTP_DIAG_INFO_LEN];
525 
526 	strm = &asoc->strmin[control->sinfo_stream];
527 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
528 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
529 	}
530 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
531 		/* The incoming sseq is behind where we last delivered? */
532 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
533 		    strm->last_mid_delivered, control->mid);
534 		/*
535 		 * throw it in the stream so it gets cleaned up in
536 		 * association destruction
537 		 */
538 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
539 		if (asoc->idata_supported) {
540 			if (snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
541 			    strm->last_mid_delivered, control->sinfo_tsn,
542 			    control->sinfo_stream, control->mid) < 0) {
543 				msg[0] = '\0';
544 			}
545 		} else {
546 			if (snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
547 			    (uint16_t)strm->last_mid_delivered,
548 			    control->sinfo_tsn,
549 			    control->sinfo_stream,
550 			    (uint16_t)control->mid) < 0) {
551 				msg[0] = '\0';
552 			}
553 		}
554 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
555 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
556 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
557 		*abort_flag = 1;
558 		return;
559 
560 	}
561 	queue_needed = 1;
562 	asoc->size_on_all_streams += control->length;
563 	sctp_ucount_incr(asoc->cnt_on_all_streams);
564 	nxt_todel = strm->last_mid_delivered + 1;
565 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
566 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
567 		struct socket *so;
568 
569 		so = SCTP_INP_SO(stcb->sctp_ep);
570 		atomic_add_int(&stcb->asoc.refcnt, 1);
571 		SCTP_TCB_UNLOCK(stcb);
572 		SCTP_SOCKET_LOCK(so, 1);
573 		SCTP_TCB_LOCK(stcb);
574 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
575 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
576 			SCTP_SOCKET_UNLOCK(so, 1);
577 			return;
578 		}
579 #endif
580 		/* can be delivered right away? */
581 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
582 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
583 		}
584 		/* EY it wont be queued if it could be delivered directly */
585 		queue_needed = 0;
586 		if (asoc->size_on_all_streams >= control->length) {
587 			asoc->size_on_all_streams -= control->length;
588 		} else {
589 #ifdef INVARIANTS
590 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
591 #else
592 			asoc->size_on_all_streams = 0;
593 #endif
594 		}
595 		sctp_ucount_decr(asoc->cnt_on_all_streams);
596 		strm->last_mid_delivered++;
597 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
598 		sctp_add_to_readq(stcb->sctp_ep, stcb,
599 		    control,
600 		    &stcb->sctp_socket->so_rcv, 1,
601 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
602 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
603 			/* all delivered */
604 			nxt_todel = strm->last_mid_delivered + 1;
605 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
606 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
607 				if (control->on_strm_q == SCTP_ON_ORDERED) {
608 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
609 					if (asoc->size_on_all_streams >= control->length) {
610 						asoc->size_on_all_streams -= control->length;
611 					} else {
612 #ifdef INVARIANTS
613 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
614 #else
615 						asoc->size_on_all_streams = 0;
616 #endif
617 					}
618 					sctp_ucount_decr(asoc->cnt_on_all_streams);
619 #ifdef INVARIANTS
620 				} else {
621 					panic("Huh control: %p is on_strm_q: %d",
622 					    control, control->on_strm_q);
623 #endif
624 				}
625 				control->on_strm_q = 0;
626 				strm->last_mid_delivered++;
627 				/*
628 				 * We ignore the return of deliver_data here
629 				 * since we always can hold the chunk on the
630 				 * d-queue. And we have a finite number that
631 				 * can be delivered from the strq.
632 				 */
633 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
634 					sctp_log_strm_del(control, NULL,
635 					    SCTP_STR_LOG_FROM_IMMED_DEL);
636 				}
637 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
638 				sctp_add_to_readq(stcb->sctp_ep, stcb,
639 				    control,
640 				    &stcb->sctp_socket->so_rcv, 1,
641 				    SCTP_READ_LOCK_NOT_HELD,
642 				    SCTP_SO_LOCKED);
643 				continue;
644 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
645 				*need_reasm = 1;
646 			}
647 			break;
648 		}
649 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
650 		SCTP_SOCKET_UNLOCK(so, 1);
651 #endif
652 	}
653 	if (queue_needed) {
654 		/*
655 		 * Ok, we did not deliver this guy, find the correct place
656 		 * to put it on the queue.
657 		 */
658 		if (sctp_place_control_in_stream(strm, asoc, control)) {
659 			if (snprintf(msg, sizeof(msg),
660 			    "Queue to str MID: %u duplicate", control->mid) < 0) {
661 				msg[0] = '\0';
662 			}
663 			sctp_clean_up_control(stcb, control);
664 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
665 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
666 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
667 			*abort_flag = 1;
668 		}
669 	}
670 }
671 
672 
673 static void
674 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
675 {
676 	struct mbuf *m, *prev = NULL;
677 	struct sctp_tcb *stcb;
678 
679 	stcb = control->stcb;
680 	control->held_length = 0;
681 	control->length = 0;
682 	m = control->data;
683 	while (m) {
684 		if (SCTP_BUF_LEN(m) == 0) {
685 			/* Skip mbufs with NO length */
686 			if (prev == NULL) {
687 				/* First one */
688 				control->data = sctp_m_free(m);
689 				m = control->data;
690 			} else {
691 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
692 				m = SCTP_BUF_NEXT(prev);
693 			}
694 			if (m == NULL) {
695 				control->tail_mbuf = prev;
696 			}
697 			continue;
698 		}
699 		prev = m;
700 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
701 		if (control->on_read_q) {
702 			/*
703 			 * On read queue so we must increment the SB stuff,
704 			 * we assume caller has done any locks of SB.
705 			 */
706 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
707 		}
708 		m = SCTP_BUF_NEXT(m);
709 	}
710 	if (prev) {
711 		control->tail_mbuf = prev;
712 	}
713 }
714 
715 static void
716 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
717 {
718 	struct mbuf *prev = NULL;
719 	struct sctp_tcb *stcb;
720 
721 	stcb = control->stcb;
722 	if (stcb == NULL) {
723 #ifdef INVARIANTS
724 		panic("Control broken");
725 #else
726 		return;
727 #endif
728 	}
729 	if (control->tail_mbuf == NULL) {
730 		/* TSNH */
731 		sctp_m_freem(control->data);
732 		control->data = m;
733 		sctp_setup_tail_pointer(control);
734 		return;
735 	}
736 	control->tail_mbuf->m_next = m;
737 	while (m) {
738 		if (SCTP_BUF_LEN(m) == 0) {
739 			/* Skip mbufs with NO length */
740 			if (prev == NULL) {
741 				/* First one */
742 				control->tail_mbuf->m_next = sctp_m_free(m);
743 				m = control->tail_mbuf->m_next;
744 			} else {
745 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
746 				m = SCTP_BUF_NEXT(prev);
747 			}
748 			if (m == NULL) {
749 				control->tail_mbuf = prev;
750 			}
751 			continue;
752 		}
753 		prev = m;
754 		if (control->on_read_q) {
755 			/*
756 			 * On read queue so we must increment the SB stuff,
757 			 * we assume caller has done any locks of SB.
758 			 */
759 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
760 		}
761 		*added += SCTP_BUF_LEN(m);
762 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
763 		m = SCTP_BUF_NEXT(m);
764 	}
765 	if (prev) {
766 		control->tail_mbuf = prev;
767 	}
768 }
769 
770 static void
771 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
772 {
773 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
774 	nc->sinfo_stream = control->sinfo_stream;
775 	nc->mid = control->mid;
776 	TAILQ_INIT(&nc->reasm);
777 	nc->top_fsn = control->top_fsn;
778 	nc->mid = control->mid;
779 	nc->sinfo_flags = control->sinfo_flags;
780 	nc->sinfo_ppid = control->sinfo_ppid;
781 	nc->sinfo_context = control->sinfo_context;
782 	nc->fsn_included = 0xffffffff;
783 	nc->sinfo_tsn = control->sinfo_tsn;
784 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
785 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
786 	nc->whoFrom = control->whoFrom;
787 	atomic_add_int(&nc->whoFrom->ref_count, 1);
788 	nc->stcb = control->stcb;
789 	nc->port_from = control->port_from;
790 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
791 }
792 
793 static void
794 sctp_reset_a_control(struct sctp_queued_to_read *control,
795     struct sctp_inpcb *inp, uint32_t tsn)
796 {
797 	control->fsn_included = tsn;
798 	if (control->on_read_q) {
799 		/*
800 		 * We have to purge it from there, hopefully this will work
801 		 * :-)
802 		 */
803 		TAILQ_REMOVE(&inp->read_queue, control, next);
804 		control->on_read_q = 0;
805 	}
806 }
807 
808 static int
809 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
810     struct sctp_association *asoc,
811     struct sctp_stream_in *strm,
812     struct sctp_queued_to_read *control,
813     uint32_t pd_point,
814     int inp_read_lock_held)
815 {
816 	/*
817 	 * Special handling for the old un-ordered data chunk. All the
818 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
819 	 * to see if we have it all. If you return one, no other control
820 	 * entries on the un-ordered queue will be looked at. In theory
821 	 * there should be no others entries in reality, unless the guy is
822 	 * sending both unordered NDATA and unordered DATA...
823 	 */
824 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
825 	uint32_t fsn;
826 	struct sctp_queued_to_read *nc;
827 	int cnt_added;
828 
829 	if (control->first_frag_seen == 0) {
830 		/* Nothing we can do, we have not seen the first piece yet */
831 		return (1);
832 	}
833 	/* Collapse any we can */
834 	cnt_added = 0;
835 restart:
836 	fsn = control->fsn_included + 1;
837 	/* Now what can we add? */
838 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
839 		if (chk->rec.data.fsn == fsn) {
840 			/* Ok lets add it */
841 			sctp_alloc_a_readq(stcb, nc);
842 			if (nc == NULL) {
843 				break;
844 			}
845 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
846 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
847 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
848 			fsn++;
849 			cnt_added++;
850 			chk = NULL;
851 			if (control->end_added) {
852 				/* We are done */
853 				if (!TAILQ_EMPTY(&control->reasm)) {
854 					/*
855 					 * Ok we have to move anything left
856 					 * on the control queue to a new
857 					 * control.
858 					 */
859 					sctp_build_readq_entry_from_ctl(nc, control);
860 					tchk = TAILQ_FIRST(&control->reasm);
861 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
862 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
863 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
864 							asoc->size_on_reasm_queue -= tchk->send_size;
865 						} else {
866 #ifdef INVARIANTS
867 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
868 #else
869 							asoc->size_on_reasm_queue = 0;
870 #endif
871 						}
872 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
873 						nc->first_frag_seen = 1;
874 						nc->fsn_included = tchk->rec.data.fsn;
875 						nc->data = tchk->data;
876 						nc->sinfo_ppid = tchk->rec.data.ppid;
877 						nc->sinfo_tsn = tchk->rec.data.tsn;
878 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
879 						tchk->data = NULL;
880 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
881 						sctp_setup_tail_pointer(nc);
882 						tchk = TAILQ_FIRST(&control->reasm);
883 					}
884 					/* Spin the rest onto the queue */
885 					while (tchk) {
886 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
887 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
888 						tchk = TAILQ_FIRST(&control->reasm);
889 					}
890 					/*
891 					 * Now lets add it to the queue
892 					 * after removing control
893 					 */
894 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
895 					nc->on_strm_q = SCTP_ON_UNORDERED;
896 					if (control->on_strm_q) {
897 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
898 						control->on_strm_q = 0;
899 					}
900 				}
901 				if (control->pdapi_started) {
902 					strm->pd_api_started = 0;
903 					control->pdapi_started = 0;
904 				}
905 				if (control->on_strm_q) {
906 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
907 					control->on_strm_q = 0;
908 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
909 				}
910 				if (control->on_read_q == 0) {
911 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
912 					    &stcb->sctp_socket->so_rcv, control->end_added,
913 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
914 				}
915 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
916 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
917 					/*
918 					 * Switch to the new guy and
919 					 * continue
920 					 */
921 					control = nc;
922 					goto restart;
923 				} else {
924 					if (nc->on_strm_q == 0) {
925 						sctp_free_a_readq(stcb, nc);
926 					}
927 				}
928 				return (1);
929 			} else {
930 				sctp_free_a_readq(stcb, nc);
931 			}
932 		} else {
933 			/* Can't add more */
934 			break;
935 		}
936 	}
937 	if (cnt_added && strm->pd_api_started) {
938 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
939 	}
940 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
941 		strm->pd_api_started = 1;
942 		control->pdapi_started = 1;
943 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
944 		    &stcb->sctp_socket->so_rcv, control->end_added,
945 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
946 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
947 		return (0);
948 	} else {
949 		return (1);
950 	}
951 }
952 
953 static void
954 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
955     struct sctp_association *asoc,
956     struct sctp_queued_to_read *control,
957     struct sctp_tmit_chunk *chk,
958     int *abort_flag)
959 {
960 	struct sctp_tmit_chunk *at;
961 	int inserted;
962 
963 	/*
964 	 * Here we need to place the chunk into the control structure sorted
965 	 * in the correct order.
966 	 */
967 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
968 		/* Its the very first one. */
969 		SCTPDBG(SCTP_DEBUG_XXX,
970 		    "chunk is a first fsn: %u becomes fsn_included\n",
971 		    chk->rec.data.fsn);
972 		at = TAILQ_FIRST(&control->reasm);
973 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
974 			/*
975 			 * The first chunk in the reassembly is a smaller
976 			 * TSN than this one, even though this has a first,
977 			 * it must be from a subsequent msg.
978 			 */
979 			goto place_chunk;
980 		}
981 		if (control->first_frag_seen) {
982 			/*
983 			 * In old un-ordered we can reassembly on one
984 			 * control multiple messages. As long as the next
985 			 * FIRST is greater then the old first (TSN i.e. FSN
986 			 * wise)
987 			 */
988 			struct mbuf *tdata;
989 			uint32_t tmp;
990 
991 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
992 				/*
993 				 * Easy way the start of a new guy beyond
994 				 * the lowest
995 				 */
996 				goto place_chunk;
997 			}
998 			if ((chk->rec.data.fsn == control->fsn_included) ||
999 			    (control->pdapi_started)) {
1000 				/*
1001 				 * Ok this should not happen, if it does we
1002 				 * started the pd-api on the higher TSN
1003 				 * (since the equals part is a TSN failure
1004 				 * it must be that).
1005 				 *
1006 				 * We are completly hosed in that case since
1007 				 * I have no way to recover. This really
1008 				 * will only happen if we can get more TSN's
1009 				 * higher before the pd-api-point.
1010 				 */
1011 				sctp_abort_in_reasm(stcb, control, chk,
1012 				    abort_flag,
1013 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1014 
1015 				return;
1016 			}
1017 			/*
1018 			 * Ok we have two firsts and the one we just got is
1019 			 * smaller than the one we previously placed.. yuck!
1020 			 * We must swap them out.
1021 			 */
1022 			/* swap the mbufs */
1023 			tdata = control->data;
1024 			control->data = chk->data;
1025 			chk->data = tdata;
1026 			/* Save the lengths */
1027 			chk->send_size = control->length;
1028 			/* Recompute length of control and tail pointer */
1029 			sctp_setup_tail_pointer(control);
1030 			/* Fix the FSN included */
1031 			tmp = control->fsn_included;
1032 			control->fsn_included = chk->rec.data.fsn;
1033 			chk->rec.data.fsn = tmp;
1034 			/* Fix the TSN included */
1035 			tmp = control->sinfo_tsn;
1036 			control->sinfo_tsn = chk->rec.data.tsn;
1037 			chk->rec.data.tsn = tmp;
1038 			/* Fix the PPID included */
1039 			tmp = control->sinfo_ppid;
1040 			control->sinfo_ppid = chk->rec.data.ppid;
1041 			chk->rec.data.ppid = tmp;
1042 			/* Fix tail pointer */
1043 			goto place_chunk;
1044 		}
1045 		control->first_frag_seen = 1;
1046 		control->fsn_included = chk->rec.data.fsn;
1047 		control->top_fsn = chk->rec.data.fsn;
1048 		control->sinfo_tsn = chk->rec.data.tsn;
1049 		control->sinfo_ppid = chk->rec.data.ppid;
1050 		control->data = chk->data;
1051 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1052 		chk->data = NULL;
1053 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1054 		sctp_setup_tail_pointer(control);
1055 		return;
1056 	}
1057 place_chunk:
1058 	inserted = 0;
1059 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1060 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1061 			/*
1062 			 * This one in queue is bigger than the new one,
1063 			 * insert the new one before at.
1064 			 */
1065 			asoc->size_on_reasm_queue += chk->send_size;
1066 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1067 			inserted = 1;
1068 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1069 			break;
1070 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1071 			/*
1072 			 * They sent a duplicate fsn number. This really
1073 			 * should not happen since the FSN is a TSN and it
1074 			 * should have been dropped earlier.
1075 			 */
1076 			sctp_abort_in_reasm(stcb, control, chk,
1077 			    abort_flag,
1078 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1079 			return;
1080 		}
1081 
1082 	}
1083 	if (inserted == 0) {
1084 		/* Its at the end */
1085 		asoc->size_on_reasm_queue += chk->send_size;
1086 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1087 		control->top_fsn = chk->rec.data.fsn;
1088 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1089 	}
1090 }
1091 
1092 static int
1093 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1094     struct sctp_stream_in *strm, int inp_read_lock_held)
1095 {
1096 	/*
1097 	 * Given a stream, strm, see if any of the SSN's on it that are
1098 	 * fragmented are ready to deliver. If so go ahead and place them on
1099 	 * the read queue. In so placing if we have hit the end, then we
1100 	 * need to remove them from the stream's queue.
1101 	 */
1102 	struct sctp_queued_to_read *control, *nctl = NULL;
1103 	uint32_t next_to_del;
1104 	uint32_t pd_point;
1105 	int ret = 0;
1106 
1107 	if (stcb->sctp_socket) {
1108 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1109 		    stcb->sctp_ep->partial_delivery_point);
1110 	} else {
1111 		pd_point = stcb->sctp_ep->partial_delivery_point;
1112 	}
1113 	control = TAILQ_FIRST(&strm->uno_inqueue);
1114 
1115 	if ((control != NULL) &&
1116 	    (asoc->idata_supported == 0)) {
1117 		/* Special handling needed for "old" data format */
1118 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1119 			goto done_un;
1120 		}
1121 	}
1122 	if (strm->pd_api_started) {
1123 		/* Can't add more */
1124 		return (0);
1125 	}
1126 	while (control) {
1127 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1128 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1129 		nctl = TAILQ_NEXT(control, next_instrm);
1130 		if (control->end_added) {
1131 			/* We just put the last bit on */
1132 			if (control->on_strm_q) {
1133 #ifdef INVARIANTS
1134 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1135 					panic("Huh control: %p on_q: %d -- not unordered?",
1136 					    control, control->on_strm_q);
1137 				}
1138 #endif
1139 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1140 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1141 				control->on_strm_q = 0;
1142 			}
1143 			if (control->on_read_q == 0) {
1144 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1145 				    control,
1146 				    &stcb->sctp_socket->so_rcv, control->end_added,
1147 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148 			}
1149 		} else {
1150 			/* Can we do a PD-API for this un-ordered guy? */
1151 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1152 				strm->pd_api_started = 1;
1153 				control->pdapi_started = 1;
1154 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1155 				    control,
1156 				    &stcb->sctp_socket->so_rcv, control->end_added,
1157 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1158 
1159 				break;
1160 			}
1161 		}
1162 		control = nctl;
1163 	}
1164 done_un:
1165 	control = TAILQ_FIRST(&strm->inqueue);
1166 	if (strm->pd_api_started) {
1167 		/* Can't add more */
1168 		return (0);
1169 	}
1170 	if (control == NULL) {
1171 		return (ret);
1172 	}
1173 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1174 		/*
1175 		 * Ok the guy at the top was being partially delivered
1176 		 * completed, so we remove it. Note the pd_api flag was
1177 		 * taken off when the chunk was merged on in
1178 		 * sctp_queue_data_for_reasm below.
1179 		 */
1180 		nctl = TAILQ_NEXT(control, next_instrm);
1181 		SCTPDBG(SCTP_DEBUG_XXX,
1182 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1183 		    control, control->end_added, control->mid,
1184 		    control->top_fsn, control->fsn_included,
1185 		    strm->last_mid_delivered);
1186 		if (control->end_added) {
1187 			if (control->on_strm_q) {
1188 #ifdef INVARIANTS
1189 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1190 					panic("Huh control: %p on_q: %d -- not ordered?",
1191 					    control, control->on_strm_q);
1192 				}
1193 #endif
1194 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1195 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1196 				if (asoc->size_on_all_streams >= control->length) {
1197 					asoc->size_on_all_streams -= control->length;
1198 				} else {
1199 #ifdef INVARIANTS
1200 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1201 #else
1202 					asoc->size_on_all_streams = 0;
1203 #endif
1204 				}
1205 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1206 				control->on_strm_q = 0;
1207 			}
1208 			if (strm->pd_api_started && control->pdapi_started) {
1209 				control->pdapi_started = 0;
1210 				strm->pd_api_started = 0;
1211 			}
1212 			if (control->on_read_q == 0) {
1213 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1214 				    control,
1215 				    &stcb->sctp_socket->so_rcv, control->end_added,
1216 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1217 			}
1218 			control = nctl;
1219 		}
1220 	}
1221 	if (strm->pd_api_started) {
1222 		/*
1223 		 * Can't add more must have gotten an un-ordered above being
1224 		 * partially delivered.
1225 		 */
1226 		return (0);
1227 	}
1228 deliver_more:
1229 	next_to_del = strm->last_mid_delivered + 1;
1230 	if (control) {
1231 		SCTPDBG(SCTP_DEBUG_XXX,
1232 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1233 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1234 		    next_to_del);
1235 		nctl = TAILQ_NEXT(control, next_instrm);
1236 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1237 		    (control->first_frag_seen)) {
1238 			int done;
1239 
1240 			/* Ok we can deliver it onto the stream. */
1241 			if (control->end_added) {
1242 				/* We are done with it afterwards */
1243 				if (control->on_strm_q) {
1244 #ifdef INVARIANTS
1245 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1246 						panic("Huh control: %p on_q: %d -- not ordered?",
1247 						    control, control->on_strm_q);
1248 					}
1249 #endif
1250 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1251 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1252 					if (asoc->size_on_all_streams >= control->length) {
1253 						asoc->size_on_all_streams -= control->length;
1254 					} else {
1255 #ifdef INVARIANTS
1256 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1257 #else
1258 						asoc->size_on_all_streams = 0;
1259 #endif
1260 					}
1261 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1262 					control->on_strm_q = 0;
1263 				}
1264 				ret++;
1265 			}
1266 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1267 				/*
1268 				 * A singleton now slipping through - mark
1269 				 * it non-revokable too
1270 				 */
1271 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1272 			} else if (control->end_added == 0) {
1273 				/*
1274 				 * Check if we can defer adding until its
1275 				 * all there
1276 				 */
1277 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1278 					/*
1279 					 * Don't need it or cannot add more
1280 					 * (one being delivered that way)
1281 					 */
1282 					goto out;
1283 				}
1284 			}
1285 			done = (control->end_added) && (control->last_frag_seen);
1286 			if (control->on_read_q == 0) {
1287 				if (!done) {
1288 					if (asoc->size_on_all_streams >= control->length) {
1289 						asoc->size_on_all_streams -= control->length;
1290 					} else {
1291 #ifdef INVARIANTS
1292 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1293 #else
1294 						asoc->size_on_all_streams = 0;
1295 #endif
1296 					}
1297 					strm->pd_api_started = 1;
1298 					control->pdapi_started = 1;
1299 				}
1300 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1301 				    control,
1302 				    &stcb->sctp_socket->so_rcv, control->end_added,
1303 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1304 			}
1305 			strm->last_mid_delivered = next_to_del;
1306 			if (done) {
1307 				control = nctl;
1308 				goto deliver_more;
1309 			}
1310 		}
1311 	}
1312 out:
1313 	return (ret);
1314 }
1315 
1316 
1317 uint32_t
1318 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1319     struct sctp_stream_in *strm,
1320     struct sctp_tcb *stcb, struct sctp_association *asoc,
1321     struct sctp_tmit_chunk *chk, int hold_rlock)
1322 {
1323 	/*
1324 	 * Given a control and a chunk, merge the data from the chk onto the
1325 	 * control and free up the chunk resources.
1326 	 */
1327 	uint32_t added = 0;
1328 	int i_locked = 0;
1329 
1330 	if (control->on_read_q && (hold_rlock == 0)) {
1331 		/*
1332 		 * Its being pd-api'd so we must do some locks.
1333 		 */
1334 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1335 		i_locked = 1;
1336 	}
1337 	if (control->data == NULL) {
1338 		control->data = chk->data;
1339 		sctp_setup_tail_pointer(control);
1340 	} else {
1341 		sctp_add_to_tail_pointer(control, chk->data, &added);
1342 	}
1343 	control->fsn_included = chk->rec.data.fsn;
1344 	asoc->size_on_reasm_queue -= chk->send_size;
1345 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1346 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1347 	chk->data = NULL;
1348 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1349 		control->first_frag_seen = 1;
1350 		control->sinfo_tsn = chk->rec.data.tsn;
1351 		control->sinfo_ppid = chk->rec.data.ppid;
1352 	}
1353 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1354 		/* Its complete */
1355 		if ((control->on_strm_q) && (control->on_read_q)) {
1356 			if (control->pdapi_started) {
1357 				control->pdapi_started = 0;
1358 				strm->pd_api_started = 0;
1359 			}
1360 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1361 				/* Unordered */
1362 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1363 				control->on_strm_q = 0;
1364 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1365 				/* Ordered */
1366 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1367 				/*
1368 				 * Don't need to decrement
1369 				 * size_on_all_streams, since control is on
1370 				 * the read queue.
1371 				 */
1372 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1373 				control->on_strm_q = 0;
1374 #ifdef INVARIANTS
1375 			} else if (control->on_strm_q) {
1376 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1377 				    control->on_strm_q);
1378 #endif
1379 			}
1380 		}
1381 		control->end_added = 1;
1382 		control->last_frag_seen = 1;
1383 	}
1384 	if (i_locked) {
1385 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1386 	}
1387 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1388 	return (added);
1389 }
1390 
1391 /*
1392  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1393  * queue, see if anthing can be delivered. If so pull it off (or as much as
1394  * we can. If we run out of space then we must dump what we can and set the
1395  * appropriate flag to say we queued what we could.
1396  */
1397 static void
1398 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1399     struct sctp_queued_to_read *control,
1400     struct sctp_tmit_chunk *chk,
1401     int created_control,
1402     int *abort_flag, uint32_t tsn)
1403 {
1404 	uint32_t next_fsn;
1405 	struct sctp_tmit_chunk *at, *nat;
1406 	struct sctp_stream_in *strm;
1407 	int do_wakeup, unordered;
1408 	uint32_t lenadded;
1409 
1410 	strm = &asoc->strmin[control->sinfo_stream];
1411 	/*
1412 	 * For old un-ordered data chunks.
1413 	 */
1414 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1415 		unordered = 1;
1416 	} else {
1417 		unordered = 0;
1418 	}
1419 	/* Must be added to the stream-in queue */
1420 	if (created_control) {
1421 		if (unordered == 0) {
1422 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1423 		}
1424 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1425 			/* Duplicate SSN? */
1426 			sctp_abort_in_reasm(stcb, control, chk,
1427 			    abort_flag,
1428 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1429 			sctp_clean_up_control(stcb, control);
1430 			return;
1431 		}
1432 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1433 			/*
1434 			 * Ok we created this control and now lets validate
1435 			 * that its legal i.e. there is a B bit set, if not
1436 			 * and we have up to the cum-ack then its invalid.
1437 			 */
1438 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1439 				sctp_abort_in_reasm(stcb, control, chk,
1440 				    abort_flag,
1441 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1442 				return;
1443 			}
1444 		}
1445 	}
1446 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1447 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1448 		return;
1449 	}
1450 	/*
1451 	 * Ok we must queue the chunk into the reasembly portion: o if its
1452 	 * the first it goes to the control mbuf. o if its not first but the
1453 	 * next in sequence it goes to the control, and each succeeding one
1454 	 * in order also goes. o if its not in order we place it on the list
1455 	 * in its place.
1456 	 */
1457 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1458 		/* Its the very first one. */
1459 		SCTPDBG(SCTP_DEBUG_XXX,
1460 		    "chunk is a first fsn: %u becomes fsn_included\n",
1461 		    chk->rec.data.fsn);
1462 		if (control->first_frag_seen) {
1463 			/*
1464 			 * Error on senders part, they either sent us two
1465 			 * data chunks with FIRST, or they sent two
1466 			 * un-ordered chunks that were fragmented at the
1467 			 * same time in the same stream.
1468 			 */
1469 			sctp_abort_in_reasm(stcb, control, chk,
1470 			    abort_flag,
1471 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1472 			return;
1473 		}
1474 		control->first_frag_seen = 1;
1475 		control->sinfo_ppid = chk->rec.data.ppid;
1476 		control->sinfo_tsn = chk->rec.data.tsn;
1477 		control->fsn_included = chk->rec.data.fsn;
1478 		control->data = chk->data;
1479 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1480 		chk->data = NULL;
1481 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1482 		sctp_setup_tail_pointer(control);
1483 		asoc->size_on_all_streams += control->length;
1484 	} else {
1485 		/* Place the chunk in our list */
1486 		int inserted = 0;
1487 
1488 		if (control->last_frag_seen == 0) {
1489 			/* Still willing to raise highest FSN seen */
1490 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1491 				SCTPDBG(SCTP_DEBUG_XXX,
1492 				    "We have a new top_fsn: %u\n",
1493 				    chk->rec.data.fsn);
1494 				control->top_fsn = chk->rec.data.fsn;
1495 			}
1496 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1497 				SCTPDBG(SCTP_DEBUG_XXX,
1498 				    "The last fsn is now in place fsn: %u\n",
1499 				    chk->rec.data.fsn);
1500 				control->last_frag_seen = 1;
1501 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1502 					SCTPDBG(SCTP_DEBUG_XXX,
1503 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1504 					    chk->rec.data.fsn,
1505 					    control->top_fsn);
1506 					sctp_abort_in_reasm(stcb, control, chk,
1507 					    abort_flag,
1508 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1509 					return;
1510 				}
1511 			}
1512 			if (asoc->idata_supported || control->first_frag_seen) {
1513 				/*
1514 				 * For IDATA we always check since we know
1515 				 * that the first fragment is 0. For old
1516 				 * DATA we have to receive the first before
1517 				 * we know the first FSN (which is the TSN).
1518 				 */
1519 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1520 					/*
1521 					 * We have already delivered up to
1522 					 * this so its a dup
1523 					 */
1524 					sctp_abort_in_reasm(stcb, control, chk,
1525 					    abort_flag,
1526 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1527 					return;
1528 				}
1529 			}
1530 		} else {
1531 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1532 				/* Second last? huh? */
1533 				SCTPDBG(SCTP_DEBUG_XXX,
1534 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1535 				    chk->rec.data.fsn, control->top_fsn);
1536 				sctp_abort_in_reasm(stcb, control,
1537 				    chk, abort_flag,
1538 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1539 				return;
1540 			}
1541 			if (asoc->idata_supported || control->first_frag_seen) {
1542 				/*
1543 				 * For IDATA we always check since we know
1544 				 * that the first fragment is 0. For old
1545 				 * DATA we have to receive the first before
1546 				 * we know the first FSN (which is the TSN).
1547 				 */
1548 
1549 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1550 					/*
1551 					 * We have already delivered up to
1552 					 * this so its a dup
1553 					 */
1554 					SCTPDBG(SCTP_DEBUG_XXX,
1555 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1556 					    chk->rec.data.fsn, control->fsn_included);
1557 					sctp_abort_in_reasm(stcb, control, chk,
1558 					    abort_flag,
1559 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1560 					return;
1561 				}
1562 			}
1563 			/*
1564 			 * validate not beyond top FSN if we have seen last
1565 			 * one
1566 			 */
1567 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1568 				SCTPDBG(SCTP_DEBUG_XXX,
1569 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1570 				    chk->rec.data.fsn,
1571 				    control->top_fsn);
1572 				sctp_abort_in_reasm(stcb, control, chk,
1573 				    abort_flag,
1574 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1575 				return;
1576 			}
1577 		}
1578 		/*
1579 		 * If we reach here, we need to place the new chunk in the
1580 		 * reassembly for this control.
1581 		 */
1582 		SCTPDBG(SCTP_DEBUG_XXX,
1583 		    "chunk is a not first fsn: %u needs to be inserted\n",
1584 		    chk->rec.data.fsn);
1585 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1586 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1587 				/*
1588 				 * This one in queue is bigger than the new
1589 				 * one, insert the new one before at.
1590 				 */
1591 				SCTPDBG(SCTP_DEBUG_XXX,
1592 				    "Insert it before fsn: %u\n",
1593 				    at->rec.data.fsn);
1594 				asoc->size_on_reasm_queue += chk->send_size;
1595 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1596 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1597 				inserted = 1;
1598 				break;
1599 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1600 				/*
1601 				 * Gak, He sent me a duplicate str seq
1602 				 * number
1603 				 */
1604 				/*
1605 				 * foo bar, I guess I will just free this
1606 				 * new guy, should we abort too? FIX ME
1607 				 * MAYBE? Or it COULD be that the SSN's have
1608 				 * wrapped. Maybe I should compare to TSN
1609 				 * somehow... sigh for now just blow away
1610 				 * the chunk!
1611 				 */
1612 				SCTPDBG(SCTP_DEBUG_XXX,
1613 				    "Duplicate to fsn: %u -- abort\n",
1614 				    at->rec.data.fsn);
1615 				sctp_abort_in_reasm(stcb, control,
1616 				    chk, abort_flag,
1617 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1618 				return;
1619 			}
1620 		}
1621 		if (inserted == 0) {
1622 			/* Goes on the end */
1623 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1624 			    chk->rec.data.fsn);
1625 			asoc->size_on_reasm_queue += chk->send_size;
1626 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1627 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1628 		}
1629 	}
1630 	/*
1631 	 * Ok lets see if we can suck any up into the control structure that
1632 	 * are in seq if it makes sense.
1633 	 */
1634 	do_wakeup = 0;
1635 	/*
1636 	 * If the first fragment has not been seen there is no sense in
1637 	 * looking.
1638 	 */
1639 	if (control->first_frag_seen) {
1640 		next_fsn = control->fsn_included + 1;
1641 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1642 			if (at->rec.data.fsn == next_fsn) {
1643 				/* We can add this one now to the control */
1644 				SCTPDBG(SCTP_DEBUG_XXX,
1645 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1646 				    control, at,
1647 				    at->rec.data.fsn,
1648 				    next_fsn, control->fsn_included);
1649 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1650 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1651 				if (control->on_read_q) {
1652 					do_wakeup = 1;
1653 				} else {
1654 					/*
1655 					 * We only add to the
1656 					 * size-on-all-streams if its not on
1657 					 * the read q. The read q flag will
1658 					 * cause a sballoc so its accounted
1659 					 * for there.
1660 					 */
1661 					asoc->size_on_all_streams += lenadded;
1662 				}
1663 				next_fsn++;
1664 				if (control->end_added && control->pdapi_started) {
1665 					if (strm->pd_api_started) {
1666 						strm->pd_api_started = 0;
1667 						control->pdapi_started = 0;
1668 					}
1669 					if (control->on_read_q == 0) {
1670 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1671 						    control,
1672 						    &stcb->sctp_socket->so_rcv, control->end_added,
1673 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1674 					}
1675 					break;
1676 				}
1677 			} else {
1678 				break;
1679 			}
1680 		}
1681 	}
1682 	if (do_wakeup) {
1683 		/* Need to wakeup the reader */
1684 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1685 	}
1686 }
1687 
1688 static struct sctp_queued_to_read *
1689 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1690 {
1691 	struct sctp_queued_to_read *control;
1692 
1693 	if (ordered) {
1694 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1695 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1696 				break;
1697 			}
1698 		}
1699 	} else {
1700 		if (idata_supported) {
1701 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1702 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1703 					break;
1704 				}
1705 			}
1706 		} else {
1707 			control = TAILQ_FIRST(&strm->uno_inqueue);
1708 		}
1709 	}
1710 	return (control);
1711 }
1712 
1713 static int
1714 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1715     struct mbuf **m, int offset, int chk_length,
1716     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1717     int *break_flag, int last_chunk, uint8_t chk_type)
1718 {
1719 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1720 	uint32_t tsn, fsn, gap, mid;
1721 	struct mbuf *dmbuf;
1722 	int the_len;
1723 	int need_reasm_check = 0;
1724 	uint16_t sid;
1725 	struct mbuf *op_err;
1726 	char msg[SCTP_DIAG_INFO_LEN];
1727 	struct sctp_queued_to_read *control, *ncontrol;
1728 	uint32_t ppid;
1729 	uint8_t chk_flags;
1730 	struct sctp_stream_reset_list *liste;
1731 	int ordered;
1732 	size_t clen;
1733 	int created_control = 0;
1734 
1735 	if (chk_type == SCTP_IDATA) {
1736 		struct sctp_idata_chunk *chunk, chunk_buf;
1737 
1738 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1739 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1740 		chk_flags = chunk->ch.chunk_flags;
1741 		clen = sizeof(struct sctp_idata_chunk);
1742 		tsn = ntohl(chunk->dp.tsn);
1743 		sid = ntohs(chunk->dp.sid);
1744 		mid = ntohl(chunk->dp.mid);
1745 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1746 			fsn = 0;
1747 			ppid = chunk->dp.ppid_fsn.ppid;
1748 		} else {
1749 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1750 			ppid = 0xffffffff;	/* Use as an invalid value. */
1751 		}
1752 	} else {
1753 		struct sctp_data_chunk *chunk, chunk_buf;
1754 
1755 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1756 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1757 		chk_flags = chunk->ch.chunk_flags;
1758 		clen = sizeof(struct sctp_data_chunk);
1759 		tsn = ntohl(chunk->dp.tsn);
1760 		sid = ntohs(chunk->dp.sid);
1761 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1762 		fsn = tsn;
1763 		ppid = chunk->dp.ppid;
1764 	}
1765 	if ((size_t)chk_length == clen) {
1766 		/*
1767 		 * Need to send an abort since we had a empty data chunk.
1768 		 */
1769 		op_err = sctp_generate_no_user_data_cause(tsn);
1770 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1771 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1772 		*abort_flag = 1;
1773 		return (0);
1774 	}
1775 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1776 		asoc->send_sack = 1;
1777 	}
1778 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1779 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1780 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1781 	}
1782 	if (stcb == NULL) {
1783 		return (0);
1784 	}
1785 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1786 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1787 		/* It is a duplicate */
1788 		SCTP_STAT_INCR(sctps_recvdupdata);
1789 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1790 			/* Record a dup for the next outbound sack */
1791 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1792 			asoc->numduptsns++;
1793 		}
1794 		asoc->send_sack = 1;
1795 		return (0);
1796 	}
1797 	/* Calculate the number of TSN's between the base and this TSN */
1798 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1799 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1800 		/* Can't hold the bit in the mapping at max array, toss it */
1801 		return (0);
1802 	}
1803 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1804 		SCTP_TCB_LOCK_ASSERT(stcb);
1805 		if (sctp_expand_mapping_array(asoc, gap)) {
1806 			/* Can't expand, drop it */
1807 			return (0);
1808 		}
1809 	}
1810 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1811 		*high_tsn = tsn;
1812 	}
1813 	/* See if we have received this one already */
1814 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1815 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1816 		SCTP_STAT_INCR(sctps_recvdupdata);
1817 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1818 			/* Record a dup for the next outbound sack */
1819 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1820 			asoc->numduptsns++;
1821 		}
1822 		asoc->send_sack = 1;
1823 		return (0);
1824 	}
1825 	/*
1826 	 * Check to see about the GONE flag, duplicates would cause a sack
1827 	 * to be sent up above
1828 	 */
1829 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1830 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1831 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1832 		/*
1833 		 * wait a minute, this guy is gone, there is no longer a
1834 		 * receiver. Send peer an ABORT!
1835 		 */
1836 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1837 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1838 		*abort_flag = 1;
1839 		return (0);
1840 	}
1841 	/*
1842 	 * Now before going further we see if there is room. If NOT then we
1843 	 * MAY let one through only IF this TSN is the one we are waiting
1844 	 * for on a partial delivery API.
1845 	 */
1846 
1847 	/* Is the stream valid? */
1848 	if (sid >= asoc->streamincnt) {
1849 		struct sctp_error_invalid_stream *cause;
1850 
1851 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1852 		    0, M_NOWAIT, 1, MT_DATA);
1853 		if (op_err != NULL) {
1854 			/* add some space up front so prepend will work well */
1855 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1856 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1857 			/*
1858 			 * Error causes are just param's and this one has
1859 			 * two back to back phdr, one with the error type
1860 			 * and size, the other with the streamid and a rsvd
1861 			 */
1862 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1863 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1864 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1865 			cause->stream_id = htons(sid);
1866 			cause->reserved = htons(0);
1867 			sctp_queue_op_err(stcb, op_err);
1868 		}
1869 		SCTP_STAT_INCR(sctps_badsid);
1870 		SCTP_TCB_LOCK_ASSERT(stcb);
1871 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1872 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1873 			asoc->highest_tsn_inside_nr_map = tsn;
1874 		}
1875 		if (tsn == (asoc->cumulative_tsn + 1)) {
1876 			/* Update cum-ack */
1877 			asoc->cumulative_tsn = tsn;
1878 		}
1879 		return (0);
1880 	}
1881 	/*
1882 	 * If its a fragmented message, lets see if we can find the control
1883 	 * on the reassembly queues.
1884 	 */
1885 	if ((chk_type == SCTP_IDATA) &&
1886 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1887 	    (fsn == 0)) {
1888 		/*
1889 		 * The first *must* be fsn 0, and other (middle/end) pieces
1890 		 * can *not* be fsn 0. XXX: This can happen in case of a
1891 		 * wrap around. Ignore is for now.
1892 		 */
1893 		if (snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags) < 0) {
1894 			msg[0] = '\0';
1895 		}
1896 		goto err_out;
1897 	}
1898 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1899 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1900 	    chk_flags, control);
1901 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1902 		/* See if we can find the re-assembly entity */
1903 		if (control != NULL) {
1904 			/* We found something, does it belong? */
1905 			if (ordered && (mid != control->mid)) {
1906 				if (snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid) < 0) {
1907 					msg[0] = '\0';
1908 				}
1909 		err_out:
1910 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1911 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1912 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1913 				*abort_flag = 1;
1914 				return (0);
1915 			}
1916 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1917 				/*
1918 				 * We can't have a switched order with an
1919 				 * unordered chunk
1920 				 */
1921 				if (snprintf(msg, sizeof(msg),
1922 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1923 				    tsn) < 0) {
1924 					msg[0] = '\0';
1925 				}
1926 				goto err_out;
1927 			}
1928 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1929 				/*
1930 				 * We can't have a switched unordered with a
1931 				 * ordered chunk
1932 				 */
1933 				if (snprintf(msg, sizeof(msg),
1934 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1935 				    tsn) < 0) {
1936 					msg[0] = '\0';
1937 				}
1938 				goto err_out;
1939 			}
1940 		}
1941 	} else {
1942 		/*
1943 		 * Its a complete segment. Lets validate we don't have a
1944 		 * re-assembly going on with the same Stream/Seq (for
1945 		 * ordered) or in the same Stream for unordered.
1946 		 */
1947 		if (control != NULL) {
1948 			if (ordered || asoc->idata_supported) {
1949 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1950 				    chk_flags, mid);
1951 				if (snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid) < 0) {
1952 					msg[0] = '\0';
1953 				}
1954 				goto err_out;
1955 			} else {
1956 				if ((tsn == control->fsn_included + 1) &&
1957 				    (control->end_added == 0)) {
1958 					if (snprintf(msg, sizeof(msg),
1959 					    "Illegal message sequence, missing end for MID: %8.8x",
1960 					    control->fsn_included) < 0) {
1961 						msg[0] = '\0';
1962 					}
1963 					goto err_out;
1964 				} else {
1965 					control = NULL;
1966 				}
1967 			}
1968 		}
1969 	}
1970 	/* now do the tests */
1971 	if (((asoc->cnt_on_all_streams +
1972 	    asoc->cnt_on_reasm_queue +
1973 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1974 	    (((int)asoc->my_rwnd) <= 0)) {
1975 		/*
1976 		 * When we have NO room in the rwnd we check to make sure
1977 		 * the reader is doing its job...
1978 		 */
1979 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1980 			/* some to read, wake-up */
1981 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1982 			struct socket *so;
1983 
1984 			so = SCTP_INP_SO(stcb->sctp_ep);
1985 			atomic_add_int(&stcb->asoc.refcnt, 1);
1986 			SCTP_TCB_UNLOCK(stcb);
1987 			SCTP_SOCKET_LOCK(so, 1);
1988 			SCTP_TCB_LOCK(stcb);
1989 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1990 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1991 				/* assoc was freed while we were unlocked */
1992 				SCTP_SOCKET_UNLOCK(so, 1);
1993 				return (0);
1994 			}
1995 #endif
1996 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1998 			SCTP_SOCKET_UNLOCK(so, 1);
1999 #endif
2000 		}
2001 		/* now is it in the mapping array of what we have accepted? */
2002 		if (chk_type == SCTP_DATA) {
2003 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
2004 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2005 				/* Nope not in the valid range dump it */
2006 		dump_packet:
2007 				sctp_set_rwnd(stcb, asoc);
2008 				if ((asoc->cnt_on_all_streams +
2009 				    asoc->cnt_on_reasm_queue +
2010 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
2011 					SCTP_STAT_INCR(sctps_datadropchklmt);
2012 				} else {
2013 					SCTP_STAT_INCR(sctps_datadroprwnd);
2014 				}
2015 				*break_flag = 1;
2016 				return (0);
2017 			}
2018 		} else {
2019 			if (control == NULL) {
2020 				goto dump_packet;
2021 			}
2022 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
2023 				goto dump_packet;
2024 			}
2025 		}
2026 	}
2027 #ifdef SCTP_ASOCLOG_OF_TSNS
2028 	SCTP_TCB_LOCK_ASSERT(stcb);
2029 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2030 		asoc->tsn_in_at = 0;
2031 		asoc->tsn_in_wrapped = 1;
2032 	}
2033 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2034 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2035 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2036 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2037 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2038 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2039 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2040 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2041 	asoc->tsn_in_at++;
2042 #endif
2043 	/*
2044 	 * Before we continue lets validate that we are not being fooled by
2045 	 * an evil attacker. We can only have Nk chunks based on our TSN
2046 	 * spread allowed by the mapping array N * 8 bits, so there is no
2047 	 * way our stream sequence numbers could have wrapped. We of course
2048 	 * only validate the FIRST fragment so the bit must be set.
2049 	 */
2050 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2051 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2052 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2053 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2054 		/* The incoming sseq is behind where we last delivered? */
2055 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2056 		    mid, asoc->strmin[sid].last_mid_delivered);
2057 
2058 		if (asoc->idata_supported) {
2059 			if (snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2060 			    asoc->strmin[sid].last_mid_delivered,
2061 			    tsn,
2062 			    sid,
2063 			    mid) < 0) {
2064 				msg[0] = '\0';
2065 			}
2066 		} else {
2067 			if (snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2068 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2069 			    tsn,
2070 			    sid,
2071 			    (uint16_t)mid) < 0) {
2072 				msg[0] = '\0';
2073 			}
2074 		}
2075 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2076 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2077 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2078 		*abort_flag = 1;
2079 		return (0);
2080 	}
2081 	if (chk_type == SCTP_IDATA) {
2082 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2083 	} else {
2084 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2085 	}
2086 	if (last_chunk == 0) {
2087 		if (chk_type == SCTP_IDATA) {
2088 			dmbuf = SCTP_M_COPYM(*m,
2089 			    (offset + sizeof(struct sctp_idata_chunk)),
2090 			    the_len, M_NOWAIT);
2091 		} else {
2092 			dmbuf = SCTP_M_COPYM(*m,
2093 			    (offset + sizeof(struct sctp_data_chunk)),
2094 			    the_len, M_NOWAIT);
2095 		}
2096 #ifdef SCTP_MBUF_LOGGING
2097 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2098 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2099 		}
2100 #endif
2101 	} else {
2102 		/* We can steal the last chunk */
2103 		int l_len;
2104 
2105 		dmbuf = *m;
2106 		/* lop off the top part */
2107 		if (chk_type == SCTP_IDATA) {
2108 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2109 		} else {
2110 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2111 		}
2112 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2113 			l_len = SCTP_BUF_LEN(dmbuf);
2114 		} else {
2115 			/*
2116 			 * need to count up the size hopefully does not hit
2117 			 * this to often :-0
2118 			 */
2119 			struct mbuf *lat;
2120 
2121 			l_len = 0;
2122 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2123 				l_len += SCTP_BUF_LEN(lat);
2124 			}
2125 		}
2126 		if (l_len > the_len) {
2127 			/* Trim the end round bytes off  too */
2128 			m_adj(dmbuf, -(l_len - the_len));
2129 		}
2130 	}
2131 	if (dmbuf == NULL) {
2132 		SCTP_STAT_INCR(sctps_nomem);
2133 		return (0);
2134 	}
2135 	/*
2136 	 * Now no matter what, we need a control, get one if we don't have
2137 	 * one (we may have gotten it above when we found the message was
2138 	 * fragmented
2139 	 */
2140 	if (control == NULL) {
2141 		sctp_alloc_a_readq(stcb, control);
2142 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2143 		    ppid,
2144 		    sid,
2145 		    chk_flags,
2146 		    NULL, fsn, mid);
2147 		if (control == NULL) {
2148 			SCTP_STAT_INCR(sctps_nomem);
2149 			return (0);
2150 		}
2151 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2152 			struct mbuf *mm;
2153 
2154 			control->data = dmbuf;
2155 			control->tail_mbuf = NULL;
2156 			for (mm = control->data; mm; mm = mm->m_next) {
2157 				control->length += SCTP_BUF_LEN(mm);
2158 				if (SCTP_BUF_NEXT(mm) == NULL) {
2159 					control->tail_mbuf = mm;
2160 				}
2161 			}
2162 			control->end_added = 1;
2163 			control->last_frag_seen = 1;
2164 			control->first_frag_seen = 1;
2165 			control->fsn_included = fsn;
2166 			control->top_fsn = fsn;
2167 		}
2168 		created_control = 1;
2169 	}
2170 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2171 	    chk_flags, ordered, mid, control);
2172 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2173 	    TAILQ_EMPTY(&asoc->resetHead) &&
2174 	    ((ordered == 0) ||
2175 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2176 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2177 		/* Candidate for express delivery */
2178 		/*
2179 		 * Its not fragmented, No PD-API is up, Nothing in the
2180 		 * delivery queue, Its un-ordered OR ordered and the next to
2181 		 * deliver AND nothing else is stuck on the stream queue,
2182 		 * And there is room for it in the socket buffer. Lets just
2183 		 * stuff it up the buffer....
2184 		 */
2185 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2186 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2187 			asoc->highest_tsn_inside_nr_map = tsn;
2188 		}
2189 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2190 		    control, mid);
2191 
2192 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2193 		    control, &stcb->sctp_socket->so_rcv,
2194 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2195 
2196 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2197 			/* for ordered, bump what we delivered */
2198 			asoc->strmin[sid].last_mid_delivered++;
2199 		}
2200 		SCTP_STAT_INCR(sctps_recvexpress);
2201 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2202 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2203 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2204 		}
2205 		control = NULL;
2206 		goto finish_express_del;
2207 	}
2208 
2209 	/* Now will we need a chunk too? */
2210 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2211 		sctp_alloc_a_chunk(stcb, chk);
2212 		if (chk == NULL) {
2213 			/* No memory so we drop the chunk */
2214 			SCTP_STAT_INCR(sctps_nomem);
2215 			if (last_chunk == 0) {
2216 				/* we copied it, free the copy */
2217 				sctp_m_freem(dmbuf);
2218 			}
2219 			return (0);
2220 		}
2221 		chk->rec.data.tsn = tsn;
2222 		chk->no_fr_allowed = 0;
2223 		chk->rec.data.fsn = fsn;
2224 		chk->rec.data.mid = mid;
2225 		chk->rec.data.sid = sid;
2226 		chk->rec.data.ppid = ppid;
2227 		chk->rec.data.context = stcb->asoc.context;
2228 		chk->rec.data.doing_fast_retransmit = 0;
2229 		chk->rec.data.rcv_flags = chk_flags;
2230 		chk->asoc = asoc;
2231 		chk->send_size = the_len;
2232 		chk->whoTo = net;
2233 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2234 		    chk,
2235 		    control, mid);
2236 		atomic_add_int(&net->ref_count, 1);
2237 		chk->data = dmbuf;
2238 	}
2239 	/* Set the appropriate TSN mark */
2240 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2241 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2242 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2243 			asoc->highest_tsn_inside_nr_map = tsn;
2244 		}
2245 	} else {
2246 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2247 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2248 			asoc->highest_tsn_inside_map = tsn;
2249 		}
2250 	}
2251 	/* Now is it complete (i.e. not fragmented)? */
2252 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2253 		/*
2254 		 * Special check for when streams are resetting. We could be
2255 		 * more smart about this and check the actual stream to see
2256 		 * if it is not being reset.. that way we would not create a
2257 		 * HOLB when amongst streams being reset and those not being
2258 		 * reset.
2259 		 *
2260 		 */
2261 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2262 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2263 			/*
2264 			 * yep its past where we need to reset... go ahead
2265 			 * and queue it.
2266 			 */
2267 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2268 				/* first one on */
2269 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2270 			} else {
2271 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2272 				unsigned char inserted = 0;
2273 
2274 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2275 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2276 
2277 						continue;
2278 					} else {
2279 						/* found it */
2280 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2281 						inserted = 1;
2282 						break;
2283 					}
2284 				}
2285 				if (inserted == 0) {
2286 					/*
2287 					 * must be put at end, use prevP
2288 					 * (all setup from loop) to setup
2289 					 * nextP.
2290 					 */
2291 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2292 				}
2293 			}
2294 			goto finish_express_del;
2295 		}
2296 		if (chk_flags & SCTP_DATA_UNORDERED) {
2297 			/* queue directly into socket buffer */
2298 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2299 			    control, mid);
2300 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2301 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2302 			    control,
2303 			    &stcb->sctp_socket->so_rcv, 1,
2304 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2305 
2306 		} else {
2307 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2308 			    mid);
2309 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2310 			if (*abort_flag) {
2311 				if (last_chunk) {
2312 					*m = NULL;
2313 				}
2314 				return (0);
2315 			}
2316 		}
2317 		goto finish_express_del;
2318 	}
2319 	/* If we reach here its a reassembly */
2320 	need_reasm_check = 1;
2321 	SCTPDBG(SCTP_DEBUG_XXX,
2322 	    "Queue data to stream for reasm control: %p MID: %u\n",
2323 	    control, mid);
2324 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2325 	if (*abort_flag) {
2326 		/*
2327 		 * the assoc is now gone and chk was put onto the reasm
2328 		 * queue, which has all been freed.
2329 		 */
2330 		if (last_chunk) {
2331 			*m = NULL;
2332 		}
2333 		return (0);
2334 	}
2335 finish_express_del:
2336 	/* Here we tidy up things */
2337 	if (tsn == (asoc->cumulative_tsn + 1)) {
2338 		/* Update cum-ack */
2339 		asoc->cumulative_tsn = tsn;
2340 	}
2341 	if (last_chunk) {
2342 		*m = NULL;
2343 	}
2344 	if (ordered) {
2345 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2346 	} else {
2347 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2348 	}
2349 	SCTP_STAT_INCR(sctps_recvdata);
2350 	/* Set it present please */
2351 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2352 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2353 	}
2354 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2355 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2356 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2357 	}
2358 	if (need_reasm_check) {
2359 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2360 		need_reasm_check = 0;
2361 	}
2362 	/* check the special flag for stream resets */
2363 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2364 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2365 		/*
2366 		 * we have finished working through the backlogged TSN's now
2367 		 * time to reset streams. 1: call reset function. 2: free
2368 		 * pending_reply space 3: distribute any chunks in
2369 		 * pending_reply_queue.
2370 		 */
2371 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2372 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2373 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2374 		SCTP_FREE(liste, SCTP_M_STRESET);
2375 		/* sa_ignore FREED_MEMORY */
2376 		liste = TAILQ_FIRST(&asoc->resetHead);
2377 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2378 			/* All can be removed */
2379 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2380 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2381 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2382 				if (*abort_flag) {
2383 					return (0);
2384 				}
2385 				if (need_reasm_check) {
2386 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2387 					need_reasm_check = 0;
2388 				}
2389 			}
2390 		} else {
2391 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2392 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2393 					break;
2394 				}
2395 				/*
2396 				 * if control->sinfo_tsn is <= liste->tsn we
2397 				 * can process it which is the NOT of
2398 				 * control->sinfo_tsn > liste->tsn
2399 				 */
2400 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2401 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2402 				if (*abort_flag) {
2403 					return (0);
2404 				}
2405 				if (need_reasm_check) {
2406 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2407 					need_reasm_check = 0;
2408 				}
2409 			}
2410 		}
2411 	}
2412 	return (1);
2413 }
2414 
2415 static const int8_t sctp_map_lookup_tab[256] = {
2416 	0, 1, 0, 2, 0, 1, 0, 3,
2417 	0, 1, 0, 2, 0, 1, 0, 4,
2418 	0, 1, 0, 2, 0, 1, 0, 3,
2419 	0, 1, 0, 2, 0, 1, 0, 5,
2420 	0, 1, 0, 2, 0, 1, 0, 3,
2421 	0, 1, 0, 2, 0, 1, 0, 4,
2422 	0, 1, 0, 2, 0, 1, 0, 3,
2423 	0, 1, 0, 2, 0, 1, 0, 6,
2424 	0, 1, 0, 2, 0, 1, 0, 3,
2425 	0, 1, 0, 2, 0, 1, 0, 4,
2426 	0, 1, 0, 2, 0, 1, 0, 3,
2427 	0, 1, 0, 2, 0, 1, 0, 5,
2428 	0, 1, 0, 2, 0, 1, 0, 3,
2429 	0, 1, 0, 2, 0, 1, 0, 4,
2430 	0, 1, 0, 2, 0, 1, 0, 3,
2431 	0, 1, 0, 2, 0, 1, 0, 7,
2432 	0, 1, 0, 2, 0, 1, 0, 3,
2433 	0, 1, 0, 2, 0, 1, 0, 4,
2434 	0, 1, 0, 2, 0, 1, 0, 3,
2435 	0, 1, 0, 2, 0, 1, 0, 5,
2436 	0, 1, 0, 2, 0, 1, 0, 3,
2437 	0, 1, 0, 2, 0, 1, 0, 4,
2438 	0, 1, 0, 2, 0, 1, 0, 3,
2439 	0, 1, 0, 2, 0, 1, 0, 6,
2440 	0, 1, 0, 2, 0, 1, 0, 3,
2441 	0, 1, 0, 2, 0, 1, 0, 4,
2442 	0, 1, 0, 2, 0, 1, 0, 3,
2443 	0, 1, 0, 2, 0, 1, 0, 5,
2444 	0, 1, 0, 2, 0, 1, 0, 3,
2445 	0, 1, 0, 2, 0, 1, 0, 4,
2446 	0, 1, 0, 2, 0, 1, 0, 3,
2447 	0, 1, 0, 2, 0, 1, 0, 8
2448 };
2449 
2450 
2451 void
2452 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2453 {
2454 	/*
2455 	 * Now we also need to check the mapping array in a couple of ways.
2456 	 * 1) Did we move the cum-ack point?
2457 	 *
2458 	 * When you first glance at this you might think that all entries
2459 	 * that make up the position of the cum-ack would be in the
2460 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2461 	 * deliverable. Thats true with one exception, when its a fragmented
2462 	 * message we may not deliver the data until some threshold (or all
2463 	 * of it) is in place. So we must OR the nr_mapping_array and
2464 	 * mapping_array to get a true picture of the cum-ack.
2465 	 */
2466 	struct sctp_association *asoc;
2467 	int at;
2468 	uint8_t val;
2469 	int slide_from, slide_end, lgap, distance;
2470 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2471 
2472 	asoc = &stcb->asoc;
2473 
2474 	old_cumack = asoc->cumulative_tsn;
2475 	old_base = asoc->mapping_array_base_tsn;
2476 	old_highest = asoc->highest_tsn_inside_map;
2477 	/*
2478 	 * We could probably improve this a small bit by calculating the
2479 	 * offset of the current cum-ack as the starting point.
2480 	 */
2481 	at = 0;
2482 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2483 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2484 		if (val == 0xff) {
2485 			at += 8;
2486 		} else {
2487 			/* there is a 0 bit */
2488 			at += sctp_map_lookup_tab[val];
2489 			break;
2490 		}
2491 	}
2492 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2493 
2494 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2495 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2496 #ifdef INVARIANTS
2497 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2498 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2499 #else
2500 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2501 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2502 		sctp_print_mapping_array(asoc);
2503 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2504 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2505 		}
2506 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2507 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2508 #endif
2509 	}
2510 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2511 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2512 	} else {
2513 		highest_tsn = asoc->highest_tsn_inside_map;
2514 	}
2515 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2516 		/* The complete array was completed by a single FR */
2517 		/* highest becomes the cum-ack */
2518 		int clr;
2519 #ifdef INVARIANTS
2520 		unsigned int i;
2521 #endif
2522 
2523 		/* clear the array */
2524 		clr = ((at + 7) >> 3);
2525 		if (clr > asoc->mapping_array_size) {
2526 			clr = asoc->mapping_array_size;
2527 		}
2528 		memset(asoc->mapping_array, 0, clr);
2529 		memset(asoc->nr_mapping_array, 0, clr);
2530 #ifdef INVARIANTS
2531 		for (i = 0; i < asoc->mapping_array_size; i++) {
2532 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2533 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2534 				sctp_print_mapping_array(asoc);
2535 			}
2536 		}
2537 #endif
2538 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2539 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2540 	} else if (at >= 8) {
2541 		/* we can slide the mapping array down */
2542 		/* slide_from holds where we hit the first NON 0xff byte */
2543 
2544 		/*
2545 		 * now calculate the ceiling of the move using our highest
2546 		 * TSN value
2547 		 */
2548 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2549 		slide_end = (lgap >> 3);
2550 		if (slide_end < slide_from) {
2551 			sctp_print_mapping_array(asoc);
2552 #ifdef INVARIANTS
2553 			panic("impossible slide");
2554 #else
2555 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2556 			    lgap, slide_end, slide_from, at);
2557 			return;
2558 #endif
2559 		}
2560 		if (slide_end > asoc->mapping_array_size) {
2561 #ifdef INVARIANTS
2562 			panic("would overrun buffer");
2563 #else
2564 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2565 			    asoc->mapping_array_size, slide_end);
2566 			slide_end = asoc->mapping_array_size;
2567 #endif
2568 		}
2569 		distance = (slide_end - slide_from) + 1;
2570 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2571 			sctp_log_map(old_base, old_cumack, old_highest,
2572 			    SCTP_MAP_PREPARE_SLIDE);
2573 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2574 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2575 		}
2576 		if (distance + slide_from > asoc->mapping_array_size ||
2577 		    distance < 0) {
2578 			/*
2579 			 * Here we do NOT slide forward the array so that
2580 			 * hopefully when more data comes in to fill it up
2581 			 * we will be able to slide it forward. Really I
2582 			 * don't think this should happen :-0
2583 			 */
2584 
2585 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2586 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2587 				    (uint32_t)asoc->mapping_array_size,
2588 				    SCTP_MAP_SLIDE_NONE);
2589 			}
2590 		} else {
2591 			int ii;
2592 
2593 			for (ii = 0; ii < distance; ii++) {
2594 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2595 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2596 
2597 			}
2598 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2599 				asoc->mapping_array[ii] = 0;
2600 				asoc->nr_mapping_array[ii] = 0;
2601 			}
2602 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2603 				asoc->highest_tsn_inside_map += (slide_from << 3);
2604 			}
2605 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2606 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2607 			}
2608 			asoc->mapping_array_base_tsn += (slide_from << 3);
2609 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2610 				sctp_log_map(asoc->mapping_array_base_tsn,
2611 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2612 				    SCTP_MAP_SLIDE_RESULT);
2613 			}
2614 		}
2615 	}
2616 }
2617 
2618 void
2619 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2620 {
2621 	struct sctp_association *asoc;
2622 	uint32_t highest_tsn;
2623 	int is_a_gap;
2624 
2625 	sctp_slide_mapping_arrays(stcb);
2626 	asoc = &stcb->asoc;
2627 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2628 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2629 	} else {
2630 		highest_tsn = asoc->highest_tsn_inside_map;
2631 	}
2632 	/* Is there a gap now? */
2633 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2634 
2635 	/*
2636 	 * Now we need to see if we need to queue a sack or just start the
2637 	 * timer (if allowed).
2638 	 */
2639 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2640 		/*
2641 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2642 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2643 		 * SACK
2644 		 */
2645 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2646 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2647 			    stcb->sctp_ep, stcb, NULL,
2648 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2649 		}
2650 		sctp_send_shutdown(stcb,
2651 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2652 		if (is_a_gap) {
2653 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2654 		}
2655 	} else {
2656 		/*
2657 		 * CMT DAC algorithm: increase number of packets received
2658 		 * since last ack
2659 		 */
2660 		stcb->asoc.cmt_dac_pkts_rcvd++;
2661 
2662 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2663 							 * SACK */
2664 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2665 							 * longer is one */
2666 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2667 		    (is_a_gap) ||	/* is still a gap */
2668 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2669 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2670 		    ) {
2671 
2672 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2673 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2674 			    (stcb->asoc.send_sack == 0) &&
2675 			    (stcb->asoc.numduptsns == 0) &&
2676 			    (stcb->asoc.delayed_ack) &&
2677 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2678 
2679 				/*
2680 				 * CMT DAC algorithm: With CMT, delay acks
2681 				 * even in the face of
2682 				 *
2683 				 * reordering. Therefore, if acks that do
2684 				 * not have to be sent because of the above
2685 				 * reasons, will be delayed. That is, acks
2686 				 * that would have been sent due to gap
2687 				 * reports will be delayed with DAC. Start
2688 				 * the delayed ack timer.
2689 				 */
2690 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2691 				    stcb->sctp_ep, stcb, NULL);
2692 			} else {
2693 				/*
2694 				 * Ok we must build a SACK since the timer
2695 				 * is pending, we got our first packet OR
2696 				 * there are gaps or duplicates.
2697 				 */
2698 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2699 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2700 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2701 			}
2702 		} else {
2703 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2704 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2705 				    stcb->sctp_ep, stcb, NULL);
2706 			}
2707 		}
2708 	}
2709 }
2710 
2711 int
2712 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2713     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2714     struct sctp_nets *net, uint32_t *high_tsn)
2715 {
2716 	struct sctp_chunkhdr *ch, chunk_buf;
2717 	struct sctp_association *asoc;
2718 	int num_chunks = 0;	/* number of control chunks processed */
2719 	int stop_proc = 0;
2720 	int break_flag, last_chunk;
2721 	int abort_flag = 0, was_a_gap;
2722 	struct mbuf *m;
2723 	uint32_t highest_tsn;
2724 	uint16_t chk_length;
2725 
2726 	/* set the rwnd */
2727 	sctp_set_rwnd(stcb, &stcb->asoc);
2728 
2729 	m = *mm;
2730 	SCTP_TCB_LOCK_ASSERT(stcb);
2731 	asoc = &stcb->asoc;
2732 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2733 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2734 	} else {
2735 		highest_tsn = asoc->highest_tsn_inside_map;
2736 	}
2737 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2738 	/*
2739 	 * setup where we got the last DATA packet from for any SACK that
2740 	 * may need to go out. Don't bump the net. This is done ONLY when a
2741 	 * chunk is assigned.
2742 	 */
2743 	asoc->last_data_chunk_from = net;
2744 
2745 	/*-
2746 	 * Now before we proceed we must figure out if this is a wasted
2747 	 * cluster... i.e. it is a small packet sent in and yet the driver
2748 	 * underneath allocated a full cluster for it. If so we must copy it
2749 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2750 	 * with cluster starvation. Note for __Panda__ we don't do this
2751 	 * since it has clusters all the way down to 64 bytes.
2752 	 */
2753 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2754 		/* we only handle mbufs that are singletons.. not chains */
2755 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2756 		if (m) {
2757 			/* ok lets see if we can copy the data up */
2758 			caddr_t *from, *to;
2759 
2760 			/* get the pointers and copy */
2761 			to = mtod(m, caddr_t *);
2762 			from = mtod((*mm), caddr_t *);
2763 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2764 			/* copy the length and free up the old */
2765 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2766 			sctp_m_freem(*mm);
2767 			/* success, back copy */
2768 			*mm = m;
2769 		} else {
2770 			/* We are in trouble in the mbuf world .. yikes */
2771 			m = *mm;
2772 		}
2773 	}
2774 	/* get pointer to the first chunk header */
2775 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2776 	    sizeof(struct sctp_chunkhdr),
2777 	    (uint8_t *)&chunk_buf);
2778 	if (ch == NULL) {
2779 		return (1);
2780 	}
2781 	/*
2782 	 * process all DATA chunks...
2783 	 */
2784 	*high_tsn = asoc->cumulative_tsn;
2785 	break_flag = 0;
2786 	asoc->data_pkts_seen++;
2787 	while (stop_proc == 0) {
2788 		/* validate chunk length */
2789 		chk_length = ntohs(ch->chunk_length);
2790 		if (length - *offset < chk_length) {
2791 			/* all done, mutulated chunk */
2792 			stop_proc = 1;
2793 			continue;
2794 		}
2795 		if ((asoc->idata_supported == 1) &&
2796 		    (ch->chunk_type == SCTP_DATA)) {
2797 			struct mbuf *op_err;
2798 			char msg[SCTP_DIAG_INFO_LEN];
2799 
2800 			if (snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated") < 0) {
2801 				msg[0] = '\0';
2802 			}
2803 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2804 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2805 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2806 			return (2);
2807 		}
2808 		if ((asoc->idata_supported == 0) &&
2809 		    (ch->chunk_type == SCTP_IDATA)) {
2810 			struct mbuf *op_err;
2811 			char msg[SCTP_DIAG_INFO_LEN];
2812 
2813 			if (snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated") < 0) {
2814 				msg[0] = '\0';
2815 			}
2816 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2817 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2818 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2819 			return (2);
2820 		}
2821 		if ((ch->chunk_type == SCTP_DATA) ||
2822 		    (ch->chunk_type == SCTP_IDATA)) {
2823 			uint16_t clen;
2824 
2825 			if (ch->chunk_type == SCTP_DATA) {
2826 				clen = sizeof(struct sctp_data_chunk);
2827 			} else {
2828 				clen = sizeof(struct sctp_idata_chunk);
2829 			}
2830 			if (chk_length < clen) {
2831 				/*
2832 				 * Need to send an abort since we had a
2833 				 * invalid data chunk.
2834 				 */
2835 				struct mbuf *op_err;
2836 				char msg[SCTP_DIAG_INFO_LEN];
2837 
2838 				if (snprintf(msg, sizeof(msg), "%s chunk of length %u",
2839 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2840 				    chk_length) < 0) {
2841 					msg[0] = '\0';
2842 				}
2843 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2844 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2845 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2846 				return (2);
2847 			}
2848 #ifdef SCTP_AUDITING_ENABLED
2849 			sctp_audit_log(0xB1, 0);
2850 #endif
2851 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2852 				last_chunk = 1;
2853 			} else {
2854 				last_chunk = 0;
2855 			}
2856 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2857 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2858 			    last_chunk, ch->chunk_type)) {
2859 				num_chunks++;
2860 			}
2861 			if (abort_flag)
2862 				return (2);
2863 
2864 			if (break_flag) {
2865 				/*
2866 				 * Set because of out of rwnd space and no
2867 				 * drop rep space left.
2868 				 */
2869 				stop_proc = 1;
2870 				continue;
2871 			}
2872 		} else {
2873 			/* not a data chunk in the data region */
2874 			switch (ch->chunk_type) {
2875 			case SCTP_INITIATION:
2876 			case SCTP_INITIATION_ACK:
2877 			case SCTP_SELECTIVE_ACK:
2878 			case SCTP_NR_SELECTIVE_ACK:
2879 			case SCTP_HEARTBEAT_REQUEST:
2880 			case SCTP_HEARTBEAT_ACK:
2881 			case SCTP_ABORT_ASSOCIATION:
2882 			case SCTP_SHUTDOWN:
2883 			case SCTP_SHUTDOWN_ACK:
2884 			case SCTP_OPERATION_ERROR:
2885 			case SCTP_COOKIE_ECHO:
2886 			case SCTP_COOKIE_ACK:
2887 			case SCTP_ECN_ECHO:
2888 			case SCTP_ECN_CWR:
2889 			case SCTP_SHUTDOWN_COMPLETE:
2890 			case SCTP_AUTHENTICATION:
2891 			case SCTP_ASCONF_ACK:
2892 			case SCTP_PACKET_DROPPED:
2893 			case SCTP_STREAM_RESET:
2894 			case SCTP_FORWARD_CUM_TSN:
2895 			case SCTP_ASCONF:
2896 				{
2897 					/*
2898 					 * Now, what do we do with KNOWN
2899 					 * chunks that are NOT in the right
2900 					 * place?
2901 					 *
2902 					 * For now, I do nothing but ignore
2903 					 * them. We may later want to add
2904 					 * sysctl stuff to switch out and do
2905 					 * either an ABORT() or possibly
2906 					 * process them.
2907 					 */
2908 					struct mbuf *op_err;
2909 					char msg[SCTP_DIAG_INFO_LEN];
2910 
2911 					if (snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2912 					    ch->chunk_type) < 0) {
2913 						msg[0] = '\0';
2914 					}
2915 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2916 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2917 					return (2);
2918 				}
2919 			default:
2920 				/*
2921 				 * Unknown chunk type: use bit rules after
2922 				 * checking length
2923 				 */
2924 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2925 					/*
2926 					 * Need to send an abort since we
2927 					 * had a invalid chunk.
2928 					 */
2929 					struct mbuf *op_err;
2930 					char msg[SCTP_DIAG_INFO_LEN];
2931 
2932 					if (snprintf(msg, sizeof(msg), "Chunk of length %u", chk_length) < 0) {
2933 						msg[0] = '\0';
2934 					}
2935 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2936 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2937 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2938 					return (2);
2939 				}
2940 				if (ch->chunk_type & 0x40) {
2941 					/* Add a error report to the queue */
2942 					struct mbuf *op_err;
2943 					struct sctp_gen_error_cause *cause;
2944 
2945 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2946 					    0, M_NOWAIT, 1, MT_DATA);
2947 					if (op_err != NULL) {
2948 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2949 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2950 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2951 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2952 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2953 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2954 							sctp_queue_op_err(stcb, op_err);
2955 						} else {
2956 							sctp_m_freem(op_err);
2957 						}
2958 					}
2959 				}
2960 				if ((ch->chunk_type & 0x80) == 0) {
2961 					/* discard the rest of this packet */
2962 					stop_proc = 1;
2963 				}	/* else skip this bad chunk and
2964 					 * continue... */
2965 				break;
2966 			}	/* switch of chunk type */
2967 		}
2968 		*offset += SCTP_SIZE32(chk_length);
2969 		if ((*offset >= length) || stop_proc) {
2970 			/* no more data left in the mbuf chain */
2971 			stop_proc = 1;
2972 			continue;
2973 		}
2974 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2975 		    sizeof(struct sctp_chunkhdr),
2976 		    (uint8_t *)&chunk_buf);
2977 		if (ch == NULL) {
2978 			*offset = length;
2979 			stop_proc = 1;
2980 			continue;
2981 		}
2982 	}
2983 	if (break_flag) {
2984 		/*
2985 		 * we need to report rwnd overrun drops.
2986 		 */
2987 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2988 	}
2989 	if (num_chunks) {
2990 		/*
2991 		 * Did we get data, if so update the time for auto-close and
2992 		 * give peer credit for being alive.
2993 		 */
2994 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2995 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2996 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2997 			    stcb->asoc.overall_error_count,
2998 			    0,
2999 			    SCTP_FROM_SCTP_INDATA,
3000 			    __LINE__);
3001 		}
3002 		stcb->asoc.overall_error_count = 0;
3003 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3004 	}
3005 	/* now service all of the reassm queue if needed */
3006 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
3007 		/* Assure that we ack right away */
3008 		stcb->asoc.send_sack = 1;
3009 	}
3010 	/* Start a sack timer or QUEUE a SACK for sending */
3011 	sctp_sack_check(stcb, was_a_gap);
3012 	return (0);
3013 }
3014 
3015 static int
3016 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
3017     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
3018     int *num_frs,
3019     uint32_t *biggest_newly_acked_tsn,
3020     uint32_t *this_sack_lowest_newack,
3021     int *rto_ok)
3022 {
3023 	struct sctp_tmit_chunk *tp1;
3024 	unsigned int theTSN;
3025 	int j, wake_him = 0, circled = 0;
3026 
3027 	/* Recover the tp1 we last saw */
3028 	tp1 = *p_tp1;
3029 	if (tp1 == NULL) {
3030 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3031 	}
3032 	for (j = frag_strt; j <= frag_end; j++) {
3033 		theTSN = j + last_tsn;
3034 		while (tp1) {
3035 			if (tp1->rec.data.doing_fast_retransmit)
3036 				(*num_frs) += 1;
3037 
3038 			/*-
3039 			 * CMT: CUCv2 algorithm. For each TSN being
3040 			 * processed from the sent queue, track the
3041 			 * next expected pseudo-cumack, or
3042 			 * rtx_pseudo_cumack, if required. Separate
3043 			 * cumack trackers for first transmissions,
3044 			 * and retransmissions.
3045 			 */
3046 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3047 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
3048 			    (tp1->snd_count == 1)) {
3049 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3050 				tp1->whoTo->find_pseudo_cumack = 0;
3051 			}
3052 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3053 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3054 			    (tp1->snd_count > 1)) {
3055 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3056 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3057 			}
3058 			if (tp1->rec.data.tsn == theTSN) {
3059 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3060 					/*-
3061 					 * must be held until
3062 					 * cum-ack passes
3063 					 */
3064 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3065 						/*-
3066 						 * If it is less than RESEND, it is
3067 						 * now no-longer in flight.
3068 						 * Higher values may already be set
3069 						 * via previous Gap Ack Blocks...
3070 						 * i.e. ACKED or RESEND.
3071 						 */
3072 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3073 						    *biggest_newly_acked_tsn)) {
3074 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3075 						}
3076 						/*-
3077 						 * CMT: SFR algo (and HTNA) - set
3078 						 * saw_newack to 1 for dest being
3079 						 * newly acked. update
3080 						 * this_sack_highest_newack if
3081 						 * appropriate.
3082 						 */
3083 						if (tp1->rec.data.chunk_was_revoked == 0)
3084 							tp1->whoTo->saw_newack = 1;
3085 
3086 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3087 						    tp1->whoTo->this_sack_highest_newack)) {
3088 							tp1->whoTo->this_sack_highest_newack =
3089 							    tp1->rec.data.tsn;
3090 						}
3091 						/*-
3092 						 * CMT DAC algo: also update
3093 						 * this_sack_lowest_newack
3094 						 */
3095 						if (*this_sack_lowest_newack == 0) {
3096 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3097 								sctp_log_sack(*this_sack_lowest_newack,
3098 								    last_tsn,
3099 								    tp1->rec.data.tsn,
3100 								    0,
3101 								    0,
3102 								    SCTP_LOG_TSN_ACKED);
3103 							}
3104 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3105 						}
3106 						/*-
3107 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3108 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3109 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3110 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3111 						 * Separate pseudo_cumack trackers for first transmissions and
3112 						 * retransmissions.
3113 						 */
3114 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3115 							if (tp1->rec.data.chunk_was_revoked == 0) {
3116 								tp1->whoTo->new_pseudo_cumack = 1;
3117 							}
3118 							tp1->whoTo->find_pseudo_cumack = 1;
3119 						}
3120 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3121 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3122 						}
3123 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3124 							if (tp1->rec.data.chunk_was_revoked == 0) {
3125 								tp1->whoTo->new_pseudo_cumack = 1;
3126 							}
3127 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3128 						}
3129 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3130 							sctp_log_sack(*biggest_newly_acked_tsn,
3131 							    last_tsn,
3132 							    tp1->rec.data.tsn,
3133 							    frag_strt,
3134 							    frag_end,
3135 							    SCTP_LOG_TSN_ACKED);
3136 						}
3137 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3138 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3139 							    tp1->whoTo->flight_size,
3140 							    tp1->book_size,
3141 							    (uint32_t)(uintptr_t)tp1->whoTo,
3142 							    tp1->rec.data.tsn);
3143 						}
3144 						sctp_flight_size_decrease(tp1);
3145 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3146 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3147 							    tp1);
3148 						}
3149 						sctp_total_flight_decrease(stcb, tp1);
3150 
3151 						tp1->whoTo->net_ack += tp1->send_size;
3152 						if (tp1->snd_count < 2) {
3153 							/*-
3154 							 * True non-retransmitted chunk
3155 							 */
3156 							tp1->whoTo->net_ack2 += tp1->send_size;
3157 
3158 							/*-
3159 							 * update RTO too ?
3160 							 */
3161 							if (tp1->do_rtt) {
3162 								if (*rto_ok &&
3163 								    sctp_calculate_rto(stcb,
3164 								    &stcb->asoc,
3165 								    tp1->whoTo,
3166 								    &tp1->sent_rcv_time,
3167 								    SCTP_RTT_FROM_DATA)) {
3168 									*rto_ok = 0;
3169 								}
3170 								if (tp1->whoTo->rto_needed == 0) {
3171 									tp1->whoTo->rto_needed = 1;
3172 								}
3173 								tp1->do_rtt = 0;
3174 							}
3175 						}
3176 
3177 					}
3178 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3179 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3180 						    stcb->asoc.this_sack_highest_gap)) {
3181 							stcb->asoc.this_sack_highest_gap =
3182 							    tp1->rec.data.tsn;
3183 						}
3184 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3185 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3186 #ifdef SCTP_AUDITING_ENABLED
3187 							sctp_audit_log(0xB2,
3188 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3189 #endif
3190 						}
3191 					}
3192 					/*-
3193 					 * All chunks NOT UNSENT fall through here and are marked
3194 					 * (leave PR-SCTP ones that are to skip alone though)
3195 					 */
3196 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3197 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3198 						tp1->sent = SCTP_DATAGRAM_MARKED;
3199 					}
3200 					if (tp1->rec.data.chunk_was_revoked) {
3201 						/* deflate the cwnd */
3202 						tp1->whoTo->cwnd -= tp1->book_size;
3203 						tp1->rec.data.chunk_was_revoked = 0;
3204 					}
3205 					/* NR Sack code here */
3206 					if (nr_sacking &&
3207 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3208 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3209 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3210 #ifdef INVARIANTS
3211 						} else {
3212 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3213 #endif
3214 						}
3215 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3216 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3217 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3218 							stcb->asoc.trigger_reset = 1;
3219 						}
3220 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3221 						if (tp1->data) {
3222 							/*
3223 							 * sa_ignore
3224 							 * NO_NULL_CHK
3225 							 */
3226 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3227 							sctp_m_freem(tp1->data);
3228 							tp1->data = NULL;
3229 						}
3230 						wake_him++;
3231 					}
3232 				}
3233 				break;
3234 			}	/* if (tp1->tsn == theTSN) */
3235 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3236 				break;
3237 			}
3238 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3239 			if ((tp1 == NULL) && (circled == 0)) {
3240 				circled++;
3241 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3242 			}
3243 		}		/* end while (tp1) */
3244 		if (tp1 == NULL) {
3245 			circled = 0;
3246 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3247 		}
3248 		/* In case the fragments were not in order we must reset */
3249 	}			/* end for (j = fragStart */
3250 	*p_tp1 = tp1;
3251 	return (wake_him);	/* Return value only used for nr-sack */
3252 }
3253 
3254 
3255 static int
3256 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3257     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3258     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3259     int num_seg, int num_nr_seg, int *rto_ok)
3260 {
3261 	struct sctp_gap_ack_block *frag, block;
3262 	struct sctp_tmit_chunk *tp1;
3263 	int i;
3264 	int num_frs = 0;
3265 	int chunk_freed;
3266 	int non_revocable;
3267 	uint16_t frag_strt, frag_end, prev_frag_end;
3268 
3269 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3270 	prev_frag_end = 0;
3271 	chunk_freed = 0;
3272 
3273 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3274 		if (i == num_seg) {
3275 			prev_frag_end = 0;
3276 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3277 		}
3278 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3279 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3280 		*offset += sizeof(block);
3281 		if (frag == NULL) {
3282 			return (chunk_freed);
3283 		}
3284 		frag_strt = ntohs(frag->start);
3285 		frag_end = ntohs(frag->end);
3286 
3287 		if (frag_strt > frag_end) {
3288 			/* This gap report is malformed, skip it. */
3289 			continue;
3290 		}
3291 		if (frag_strt <= prev_frag_end) {
3292 			/* This gap report is not in order, so restart. */
3293 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3294 		}
3295 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3296 			*biggest_tsn_acked = last_tsn + frag_end;
3297 		}
3298 		if (i < num_seg) {
3299 			non_revocable = 0;
3300 		} else {
3301 			non_revocable = 1;
3302 		}
3303 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3304 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3305 		    this_sack_lowest_newack, rto_ok)) {
3306 			chunk_freed = 1;
3307 		}
3308 		prev_frag_end = frag_end;
3309 	}
3310 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3311 		if (num_frs)
3312 			sctp_log_fr(*biggest_tsn_acked,
3313 			    *biggest_newly_acked_tsn,
3314 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3315 	}
3316 	return (chunk_freed);
3317 }
3318 
3319 static void
3320 sctp_check_for_revoked(struct sctp_tcb *stcb,
3321     struct sctp_association *asoc, uint32_t cumack,
3322     uint32_t biggest_tsn_acked)
3323 {
3324 	struct sctp_tmit_chunk *tp1;
3325 
3326 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3327 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3328 			/*
3329 			 * ok this guy is either ACK or MARKED. If it is
3330 			 * ACKED it has been previously acked but not this
3331 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3332 			 * again.
3333 			 */
3334 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3335 				break;
3336 			}
3337 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3338 				/* it has been revoked */
3339 				tp1->sent = SCTP_DATAGRAM_SENT;
3340 				tp1->rec.data.chunk_was_revoked = 1;
3341 				/*
3342 				 * We must add this stuff back in to assure
3343 				 * timers and such get started.
3344 				 */
3345 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3346 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3347 					    tp1->whoTo->flight_size,
3348 					    tp1->book_size,
3349 					    (uint32_t)(uintptr_t)tp1->whoTo,
3350 					    tp1->rec.data.tsn);
3351 				}
3352 				sctp_flight_size_increase(tp1);
3353 				sctp_total_flight_increase(stcb, tp1);
3354 				/*
3355 				 * We inflate the cwnd to compensate for our
3356 				 * artificial inflation of the flight_size.
3357 				 */
3358 				tp1->whoTo->cwnd += tp1->book_size;
3359 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3360 					sctp_log_sack(asoc->last_acked_seq,
3361 					    cumack,
3362 					    tp1->rec.data.tsn,
3363 					    0,
3364 					    0,
3365 					    SCTP_LOG_TSN_REVOKED);
3366 				}
3367 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3368 				/* it has been re-acked in this SACK */
3369 				tp1->sent = SCTP_DATAGRAM_ACKED;
3370 			}
3371 		}
3372 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3373 			break;
3374 	}
3375 }
3376 
3377 
3378 static void
3379 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3380     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3381 {
3382 	struct sctp_tmit_chunk *tp1;
3383 	int strike_flag = 0;
3384 	struct timeval now;
3385 	int tot_retrans = 0;
3386 	uint32_t sending_seq;
3387 	struct sctp_nets *net;
3388 	int num_dests_sacked = 0;
3389 
3390 	/*
3391 	 * select the sending_seq, this is either the next thing ready to be
3392 	 * sent but not transmitted, OR, the next seq we assign.
3393 	 */
3394 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3395 	if (tp1 == NULL) {
3396 		sending_seq = asoc->sending_seq;
3397 	} else {
3398 		sending_seq = tp1->rec.data.tsn;
3399 	}
3400 
3401 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3402 	if ((asoc->sctp_cmt_on_off > 0) &&
3403 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3404 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3405 			if (net->saw_newack)
3406 				num_dests_sacked++;
3407 		}
3408 	}
3409 	if (stcb->asoc.prsctp_supported) {
3410 		(void)SCTP_GETTIME_TIMEVAL(&now);
3411 	}
3412 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3413 		strike_flag = 0;
3414 		if (tp1->no_fr_allowed) {
3415 			/* this one had a timeout or something */
3416 			continue;
3417 		}
3418 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3420 				sctp_log_fr(biggest_tsn_newly_acked,
3421 				    tp1->rec.data.tsn,
3422 				    tp1->sent,
3423 				    SCTP_FR_LOG_CHECK_STRIKE);
3424 		}
3425 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3426 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3427 			/* done */
3428 			break;
3429 		}
3430 		if (stcb->asoc.prsctp_supported) {
3431 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3432 				/* Is it expired? */
3433 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3434 					/* Yes so drop it */
3435 					if (tp1->data != NULL) {
3436 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3437 						    SCTP_SO_NOT_LOCKED);
3438 					}
3439 					continue;
3440 				}
3441 			}
3442 
3443 		}
3444 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3445 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3446 			/* we are beyond the tsn in the sack  */
3447 			break;
3448 		}
3449 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3450 			/* either a RESEND, ACKED, or MARKED */
3451 			/* skip */
3452 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3453 				/* Continue strikin FWD-TSN chunks */
3454 				tp1->rec.data.fwd_tsn_cnt++;
3455 			}
3456 			continue;
3457 		}
3458 		/*
3459 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3460 		 */
3461 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3462 			/*
3463 			 * No new acks were receieved for data sent to this
3464 			 * dest. Therefore, according to the SFR algo for
3465 			 * CMT, no data sent to this dest can be marked for
3466 			 * FR using this SACK.
3467 			 */
3468 			continue;
3469 		} else if (tp1->whoTo &&
3470 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3471 			    tp1->whoTo->this_sack_highest_newack) &&
3472 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3473 			/*
3474 			 * CMT: New acks were receieved for data sent to
3475 			 * this dest. But no new acks were seen for data
3476 			 * sent after tp1. Therefore, according to the SFR
3477 			 * algo for CMT, tp1 cannot be marked for FR using
3478 			 * this SACK. This step covers part of the DAC algo
3479 			 * and the HTNA algo as well.
3480 			 */
3481 			continue;
3482 		}
3483 		/*
3484 		 * Here we check to see if we were have already done a FR
3485 		 * and if so we see if the biggest TSN we saw in the sack is
3486 		 * smaller than the recovery point. If so we don't strike
3487 		 * the tsn... otherwise we CAN strike the TSN.
3488 		 */
3489 		/*
3490 		 * @@@ JRI: Check for CMT if (accum_moved &&
3491 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3492 		 * 0)) {
3493 		 */
3494 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3495 			/*
3496 			 * Strike the TSN if in fast-recovery and cum-ack
3497 			 * moved.
3498 			 */
3499 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 				sctp_log_fr(biggest_tsn_newly_acked,
3501 				    tp1->rec.data.tsn,
3502 				    tp1->sent,
3503 				    SCTP_FR_LOG_STRIKE_CHUNK);
3504 			}
3505 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3506 				tp1->sent++;
3507 			}
3508 			if ((asoc->sctp_cmt_on_off > 0) &&
3509 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3510 				/*
3511 				 * CMT DAC algorithm: If SACK flag is set to
3512 				 * 0, then lowest_newack test will not pass
3513 				 * because it would have been set to the
3514 				 * cumack earlier. If not already to be
3515 				 * rtx'd, If not a mixed sack and if tp1 is
3516 				 * not between two sacked TSNs, then mark by
3517 				 * one more. NOTE that we are marking by one
3518 				 * additional time since the SACK DAC flag
3519 				 * indicates that two packets have been
3520 				 * received after this missing TSN.
3521 				 */
3522 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3523 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3524 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3525 						sctp_log_fr(16 + num_dests_sacked,
3526 						    tp1->rec.data.tsn,
3527 						    tp1->sent,
3528 						    SCTP_FR_LOG_STRIKE_CHUNK);
3529 					}
3530 					tp1->sent++;
3531 				}
3532 			}
3533 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3534 		    (asoc->sctp_cmt_on_off == 0)) {
3535 			/*
3536 			 * For those that have done a FR we must take
3537 			 * special consideration if we strike. I.e the
3538 			 * biggest_newly_acked must be higher than the
3539 			 * sending_seq at the time we did the FR.
3540 			 */
3541 			if (
3542 #ifdef SCTP_FR_TO_ALTERNATE
3543 			/*
3544 			 * If FR's go to new networks, then we must only do
3545 			 * this for singly homed asoc's. However if the FR's
3546 			 * go to the same network (Armando's work) then its
3547 			 * ok to FR multiple times.
3548 			 */
3549 			    (asoc->numnets < 2)
3550 #else
3551 			    (1)
3552 #endif
3553 			    ) {
3554 
3555 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3556 				    tp1->rec.data.fast_retran_tsn)) {
3557 					/*
3558 					 * Strike the TSN, since this ack is
3559 					 * beyond where things were when we
3560 					 * did a FR.
3561 					 */
3562 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3563 						sctp_log_fr(biggest_tsn_newly_acked,
3564 						    tp1->rec.data.tsn,
3565 						    tp1->sent,
3566 						    SCTP_FR_LOG_STRIKE_CHUNK);
3567 					}
3568 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3569 						tp1->sent++;
3570 					}
3571 					strike_flag = 1;
3572 					if ((asoc->sctp_cmt_on_off > 0) &&
3573 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3574 						/*
3575 						 * CMT DAC algorithm: If
3576 						 * SACK flag is set to 0,
3577 						 * then lowest_newack test
3578 						 * will not pass because it
3579 						 * would have been set to
3580 						 * the cumack earlier. If
3581 						 * not already to be rtx'd,
3582 						 * If not a mixed sack and
3583 						 * if tp1 is not between two
3584 						 * sacked TSNs, then mark by
3585 						 * one more. NOTE that we
3586 						 * are marking by one
3587 						 * additional time since the
3588 						 * SACK DAC flag indicates
3589 						 * that two packets have
3590 						 * been received after this
3591 						 * missing TSN.
3592 						 */
3593 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3594 						    (num_dests_sacked == 1) &&
3595 						    SCTP_TSN_GT(this_sack_lowest_newack,
3596 						    tp1->rec.data.tsn)) {
3597 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3598 								sctp_log_fr(32 + num_dests_sacked,
3599 								    tp1->rec.data.tsn,
3600 								    tp1->sent,
3601 								    SCTP_FR_LOG_STRIKE_CHUNK);
3602 							}
3603 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3604 								tp1->sent++;
3605 							}
3606 						}
3607 					}
3608 				}
3609 			}
3610 			/*
3611 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3612 			 * algo covers HTNA.
3613 			 */
3614 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3615 		    biggest_tsn_newly_acked)) {
3616 			/*
3617 			 * We don't strike these: This is the  HTNA
3618 			 * algorithm i.e. we don't strike If our TSN is
3619 			 * larger than the Highest TSN Newly Acked.
3620 			 */
3621 			;
3622 		} else {
3623 			/* Strike the TSN */
3624 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3625 				sctp_log_fr(biggest_tsn_newly_acked,
3626 				    tp1->rec.data.tsn,
3627 				    tp1->sent,
3628 				    SCTP_FR_LOG_STRIKE_CHUNK);
3629 			}
3630 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3631 				tp1->sent++;
3632 			}
3633 			if ((asoc->sctp_cmt_on_off > 0) &&
3634 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3635 				/*
3636 				 * CMT DAC algorithm: If SACK flag is set to
3637 				 * 0, then lowest_newack test will not pass
3638 				 * because it would have been set to the
3639 				 * cumack earlier. If not already to be
3640 				 * rtx'd, If not a mixed sack and if tp1 is
3641 				 * not between two sacked TSNs, then mark by
3642 				 * one more. NOTE that we are marking by one
3643 				 * additional time since the SACK DAC flag
3644 				 * indicates that two packets have been
3645 				 * received after this missing TSN.
3646 				 */
3647 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3648 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3649 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3650 						sctp_log_fr(48 + num_dests_sacked,
3651 						    tp1->rec.data.tsn,
3652 						    tp1->sent,
3653 						    SCTP_FR_LOG_STRIKE_CHUNK);
3654 					}
3655 					tp1->sent++;
3656 				}
3657 			}
3658 		}
3659 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3660 			struct sctp_nets *alt;
3661 
3662 			/* fix counts and things */
3663 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3664 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3665 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3666 				    tp1->book_size,
3667 				    (uint32_t)(uintptr_t)tp1->whoTo,
3668 				    tp1->rec.data.tsn);
3669 			}
3670 			if (tp1->whoTo) {
3671 				tp1->whoTo->net_ack++;
3672 				sctp_flight_size_decrease(tp1);
3673 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3674 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3675 					    tp1);
3676 				}
3677 			}
3678 
3679 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3680 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3681 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3682 			}
3683 			/* add back to the rwnd */
3684 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3685 
3686 			/* remove from the total flight */
3687 			sctp_total_flight_decrease(stcb, tp1);
3688 
3689 			if ((stcb->asoc.prsctp_supported) &&
3690 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3691 				/*
3692 				 * Has it been retransmitted tv_sec times? -
3693 				 * we store the retran count there.
3694 				 */
3695 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3696 					/* Yes, so drop it */
3697 					if (tp1->data != NULL) {
3698 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3699 						    SCTP_SO_NOT_LOCKED);
3700 					}
3701 					/* Make sure to flag we had a FR */
3702 					if (tp1->whoTo != NULL) {
3703 						tp1->whoTo->net_ack++;
3704 					}
3705 					continue;
3706 				}
3707 			}
3708 			/*
3709 			 * SCTP_PRINTF("OK, we are now ready to FR this
3710 			 * guy\n");
3711 			 */
3712 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3713 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3714 				    0, SCTP_FR_MARKED);
3715 			}
3716 			if (strike_flag) {
3717 				/* This is a subsequent FR */
3718 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3719 			}
3720 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3721 			if (asoc->sctp_cmt_on_off > 0) {
3722 				/*
3723 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3724 				 * If CMT is being used, then pick dest with
3725 				 * largest ssthresh for any retransmission.
3726 				 */
3727 				tp1->no_fr_allowed = 1;
3728 				alt = tp1->whoTo;
3729 				/* sa_ignore NO_NULL_CHK */
3730 				if (asoc->sctp_cmt_pf > 0) {
3731 					/*
3732 					 * JRS 5/18/07 - If CMT PF is on,
3733 					 * use the PF version of
3734 					 * find_alt_net()
3735 					 */
3736 					alt = sctp_find_alternate_net(stcb, alt, 2);
3737 				} else {
3738 					/*
3739 					 * JRS 5/18/07 - If only CMT is on,
3740 					 * use the CMT version of
3741 					 * find_alt_net()
3742 					 */
3743 					/* sa_ignore NO_NULL_CHK */
3744 					alt = sctp_find_alternate_net(stcb, alt, 1);
3745 				}
3746 				if (alt == NULL) {
3747 					alt = tp1->whoTo;
3748 				}
3749 				/*
3750 				 * CUCv2: If a different dest is picked for
3751 				 * the retransmission, then new
3752 				 * (rtx-)pseudo_cumack needs to be tracked
3753 				 * for orig dest. Let CUCv2 track new (rtx-)
3754 				 * pseudo-cumack always.
3755 				 */
3756 				if (tp1->whoTo) {
3757 					tp1->whoTo->find_pseudo_cumack = 1;
3758 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3759 				}
3760 
3761 			} else {	/* CMT is OFF */
3762 
3763 #ifdef SCTP_FR_TO_ALTERNATE
3764 				/* Can we find an alternate? */
3765 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3766 #else
3767 				/*
3768 				 * default behavior is to NOT retransmit
3769 				 * FR's to an alternate. Armando Caro's
3770 				 * paper details why.
3771 				 */
3772 				alt = tp1->whoTo;
3773 #endif
3774 			}
3775 
3776 			tp1->rec.data.doing_fast_retransmit = 1;
3777 			tot_retrans++;
3778 			/* mark the sending seq for possible subsequent FR's */
3779 			/*
3780 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3781 			 * (uint32_t)tpi->rec.data.tsn);
3782 			 */
3783 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3784 				/*
3785 				 * If the queue of send is empty then its
3786 				 * the next sequence number that will be
3787 				 * assigned so we subtract one from this to
3788 				 * get the one we last sent.
3789 				 */
3790 				tp1->rec.data.fast_retran_tsn = sending_seq;
3791 			} else {
3792 				/*
3793 				 * If there are chunks on the send queue
3794 				 * (unsent data that has made it from the
3795 				 * stream queues but not out the door, we
3796 				 * take the first one (which will have the
3797 				 * lowest TSN) and subtract one to get the
3798 				 * one we last sent.
3799 				 */
3800 				struct sctp_tmit_chunk *ttt;
3801 
3802 				ttt = TAILQ_FIRST(&asoc->send_queue);
3803 				tp1->rec.data.fast_retran_tsn =
3804 				    ttt->rec.data.tsn;
3805 			}
3806 
3807 			if (tp1->do_rtt) {
3808 				/*
3809 				 * this guy had a RTO calculation pending on
3810 				 * it, cancel it
3811 				 */
3812 				if ((tp1->whoTo != NULL) &&
3813 				    (tp1->whoTo->rto_needed == 0)) {
3814 					tp1->whoTo->rto_needed = 1;
3815 				}
3816 				tp1->do_rtt = 0;
3817 			}
3818 			if (alt != tp1->whoTo) {
3819 				/* yes, there is an alternate. */
3820 				sctp_free_remote_addr(tp1->whoTo);
3821 				/* sa_ignore FREED_MEMORY */
3822 				tp1->whoTo = alt;
3823 				atomic_add_int(&alt->ref_count, 1);
3824 			}
3825 		}
3826 	}
3827 }
3828 
3829 struct sctp_tmit_chunk *
3830 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3831     struct sctp_association *asoc)
3832 {
3833 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3834 	struct timeval now;
3835 	int now_filled = 0;
3836 
3837 	if (asoc->prsctp_supported == 0) {
3838 		return (NULL);
3839 	}
3840 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3841 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3842 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3843 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3844 			/* no chance to advance, out of here */
3845 			break;
3846 		}
3847 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3848 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3849 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3850 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3851 				    asoc->advanced_peer_ack_point,
3852 				    tp1->rec.data.tsn, 0, 0);
3853 			}
3854 		}
3855 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3856 			/*
3857 			 * We can't fwd-tsn past any that are reliable aka
3858 			 * retransmitted until the asoc fails.
3859 			 */
3860 			break;
3861 		}
3862 		if (!now_filled) {
3863 			(void)SCTP_GETTIME_TIMEVAL(&now);
3864 			now_filled = 1;
3865 		}
3866 		/*
3867 		 * now we got a chunk which is marked for another
3868 		 * retransmission to a PR-stream but has run out its chances
3869 		 * already maybe OR has been marked to skip now. Can we skip
3870 		 * it if its a resend?
3871 		 */
3872 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3873 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3874 			/*
3875 			 * Now is this one marked for resend and its time is
3876 			 * now up?
3877 			 */
3878 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3879 				/* Yes so drop it */
3880 				if (tp1->data) {
3881 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3882 					    1, SCTP_SO_NOT_LOCKED);
3883 				}
3884 			} else {
3885 				/*
3886 				 * No, we are done when hit one for resend
3887 				 * whos time as not expired.
3888 				 */
3889 				break;
3890 			}
3891 		}
3892 		/*
3893 		 * Ok now if this chunk is marked to drop it we can clean up
3894 		 * the chunk, advance our peer ack point and we can check
3895 		 * the next chunk.
3896 		 */
3897 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3898 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3899 			/* advance PeerAckPoint goes forward */
3900 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3901 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3902 				a_adv = tp1;
3903 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3904 				/* No update but we do save the chk */
3905 				a_adv = tp1;
3906 			}
3907 		} else {
3908 			/*
3909 			 * If it is still in RESEND we can advance no
3910 			 * further
3911 			 */
3912 			break;
3913 		}
3914 	}
3915 	return (a_adv);
3916 }
3917 
3918 static int
3919 sctp_fs_audit(struct sctp_association *asoc)
3920 {
3921 	struct sctp_tmit_chunk *chk;
3922 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3923 	int ret;
3924 #ifndef INVARIANTS
3925 	int entry_flight, entry_cnt;
3926 #endif
3927 
3928 	ret = 0;
3929 #ifndef INVARIANTS
3930 	entry_flight = asoc->total_flight;
3931 	entry_cnt = asoc->total_flight_count;
3932 #endif
3933 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3934 		return (0);
3935 
3936 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3937 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3938 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3939 			    chk->rec.data.tsn,
3940 			    chk->send_size,
3941 			    chk->snd_count);
3942 			inflight++;
3943 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3944 			resend++;
3945 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3946 			inbetween++;
3947 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3948 			above++;
3949 		} else {
3950 			acked++;
3951 		}
3952 	}
3953 
3954 	if ((inflight > 0) || (inbetween > 0)) {
3955 #ifdef INVARIANTS
3956 		panic("Flight size-express incorrect? \n");
3957 #else
3958 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3959 		    entry_flight, entry_cnt);
3960 
3961 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3962 		    inflight, inbetween, resend, above, acked);
3963 		ret = 1;
3964 #endif
3965 	}
3966 	return (ret);
3967 }
3968 
3969 
3970 static void
3971 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3972     struct sctp_association *asoc,
3973     struct sctp_tmit_chunk *tp1)
3974 {
3975 	tp1->window_probe = 0;
3976 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3977 		/* TSN's skipped we do NOT move back. */
3978 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3979 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3980 		    tp1->book_size,
3981 		    (uint32_t)(uintptr_t)tp1->whoTo,
3982 		    tp1->rec.data.tsn);
3983 		return;
3984 	}
3985 	/* First setup this by shrinking flight */
3986 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3987 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3988 		    tp1);
3989 	}
3990 	sctp_flight_size_decrease(tp1);
3991 	sctp_total_flight_decrease(stcb, tp1);
3992 	/* Now mark for resend */
3993 	tp1->sent = SCTP_DATAGRAM_RESEND;
3994 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3995 
3996 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3997 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3998 		    tp1->whoTo->flight_size,
3999 		    tp1->book_size,
4000 		    (uint32_t)(uintptr_t)tp1->whoTo,
4001 		    tp1->rec.data.tsn);
4002 	}
4003 }
4004 
4005 void
4006 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4007     uint32_t rwnd, int *abort_now, int ecne_seen)
4008 {
4009 	struct sctp_nets *net;
4010 	struct sctp_association *asoc;
4011 	struct sctp_tmit_chunk *tp1, *tp2;
4012 	uint32_t old_rwnd;
4013 	int win_probe_recovery = 0;
4014 	int win_probe_recovered = 0;
4015 	int j, done_once = 0;
4016 	int rto_ok = 1;
4017 	uint32_t send_s;
4018 
4019 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4020 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4021 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4022 	}
4023 	SCTP_TCB_LOCK_ASSERT(stcb);
4024 #ifdef SCTP_ASOCLOG_OF_TSNS
4025 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4026 	stcb->asoc.cumack_log_at++;
4027 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4028 		stcb->asoc.cumack_log_at = 0;
4029 	}
4030 #endif
4031 	asoc = &stcb->asoc;
4032 	old_rwnd = asoc->peers_rwnd;
4033 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
4034 		/* old ack */
4035 		return;
4036 	} else if (asoc->last_acked_seq == cumack) {
4037 		/* Window update sack */
4038 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4039 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4040 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4041 			/* SWS sender side engages */
4042 			asoc->peers_rwnd = 0;
4043 		}
4044 		if (asoc->peers_rwnd > old_rwnd) {
4045 			goto again;
4046 		}
4047 		return;
4048 	}
4049 
4050 	/* First setup for CC stuff */
4051 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4052 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4053 			/* Drag along the window_tsn for cwr's */
4054 			net->cwr_window_tsn = cumack;
4055 		}
4056 		net->prev_cwnd = net->cwnd;
4057 		net->net_ack = 0;
4058 		net->net_ack2 = 0;
4059 
4060 		/*
4061 		 * CMT: Reset CUC and Fast recovery algo variables before
4062 		 * SACK processing
4063 		 */
4064 		net->new_pseudo_cumack = 0;
4065 		net->will_exit_fast_recovery = 0;
4066 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4067 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4068 		}
4069 	}
4070 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4071 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4072 		    sctpchunk_listhead);
4073 		send_s = tp1->rec.data.tsn + 1;
4074 	} else {
4075 		send_s = asoc->sending_seq;
4076 	}
4077 	if (SCTP_TSN_GE(cumack, send_s)) {
4078 		struct mbuf *op_err;
4079 		char msg[SCTP_DIAG_INFO_LEN];
4080 
4081 		*abort_now = 1;
4082 		/* XXX */
4083 		if (snprintf(msg, sizeof(msg),
4084 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4085 		    cumack, send_s) < 0) {
4086 			msg[0] = '\0';
4087 		}
4088 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4089 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4090 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4091 		return;
4092 	}
4093 	asoc->this_sack_highest_gap = cumack;
4094 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4095 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4096 		    stcb->asoc.overall_error_count,
4097 		    0,
4098 		    SCTP_FROM_SCTP_INDATA,
4099 		    __LINE__);
4100 	}
4101 	stcb->asoc.overall_error_count = 0;
4102 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4103 		/* process the new consecutive TSN first */
4104 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4105 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4106 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4107 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4108 				}
4109 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4110 					/*
4111 					 * If it is less than ACKED, it is
4112 					 * now no-longer in flight. Higher
4113 					 * values may occur during marking
4114 					 */
4115 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4116 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4117 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4118 							    tp1->whoTo->flight_size,
4119 							    tp1->book_size,
4120 							    (uint32_t)(uintptr_t)tp1->whoTo,
4121 							    tp1->rec.data.tsn);
4122 						}
4123 						sctp_flight_size_decrease(tp1);
4124 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4125 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4126 							    tp1);
4127 						}
4128 						/* sa_ignore NO_NULL_CHK */
4129 						sctp_total_flight_decrease(stcb, tp1);
4130 					}
4131 					tp1->whoTo->net_ack += tp1->send_size;
4132 					if (tp1->snd_count < 2) {
4133 						/*
4134 						 * True non-retransmitted
4135 						 * chunk
4136 						 */
4137 						tp1->whoTo->net_ack2 +=
4138 						    tp1->send_size;
4139 
4140 						/* update RTO too? */
4141 						if (tp1->do_rtt) {
4142 							if (rto_ok &&
4143 							    sctp_calculate_rto(stcb,
4144 							    &stcb->asoc,
4145 							    tp1->whoTo,
4146 							    &tp1->sent_rcv_time,
4147 							    SCTP_RTT_FROM_DATA)) {
4148 								rto_ok = 0;
4149 							}
4150 							if (tp1->whoTo->rto_needed == 0) {
4151 								tp1->whoTo->rto_needed = 1;
4152 							}
4153 							tp1->do_rtt = 0;
4154 						}
4155 					}
4156 					/*
4157 					 * CMT: CUCv2 algorithm. From the
4158 					 * cumack'd TSNs, for each TSN being
4159 					 * acked for the first time, set the
4160 					 * following variables for the
4161 					 * corresp destination.
4162 					 * new_pseudo_cumack will trigger a
4163 					 * cwnd update.
4164 					 * find_(rtx_)pseudo_cumack will
4165 					 * trigger search for the next
4166 					 * expected (rtx-)pseudo-cumack.
4167 					 */
4168 					tp1->whoTo->new_pseudo_cumack = 1;
4169 					tp1->whoTo->find_pseudo_cumack = 1;
4170 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4171 
4172 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4173 						/* sa_ignore NO_NULL_CHK */
4174 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4175 					}
4176 				}
4177 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4178 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4179 				}
4180 				if (tp1->rec.data.chunk_was_revoked) {
4181 					/* deflate the cwnd */
4182 					tp1->whoTo->cwnd -= tp1->book_size;
4183 					tp1->rec.data.chunk_was_revoked = 0;
4184 				}
4185 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4186 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4187 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4188 #ifdef INVARIANTS
4189 					} else {
4190 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4191 #endif
4192 					}
4193 				}
4194 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4195 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4196 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4197 					asoc->trigger_reset = 1;
4198 				}
4199 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4200 				if (tp1->data) {
4201 					/* sa_ignore NO_NULL_CHK */
4202 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4203 					sctp_m_freem(tp1->data);
4204 					tp1->data = NULL;
4205 				}
4206 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4207 					sctp_log_sack(asoc->last_acked_seq,
4208 					    cumack,
4209 					    tp1->rec.data.tsn,
4210 					    0,
4211 					    0,
4212 					    SCTP_LOG_FREE_SENT);
4213 				}
4214 				asoc->sent_queue_cnt--;
4215 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4216 			} else {
4217 				break;
4218 			}
4219 		}
4220 
4221 	}
4222 	/* sa_ignore NO_NULL_CHK */
4223 	if (stcb->sctp_socket) {
4224 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4225 		struct socket *so;
4226 
4227 #endif
4228 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4229 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4230 			/* sa_ignore NO_NULL_CHK */
4231 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4232 		}
4233 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4234 		so = SCTP_INP_SO(stcb->sctp_ep);
4235 		atomic_add_int(&stcb->asoc.refcnt, 1);
4236 		SCTP_TCB_UNLOCK(stcb);
4237 		SCTP_SOCKET_LOCK(so, 1);
4238 		SCTP_TCB_LOCK(stcb);
4239 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4240 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4241 			/* assoc was freed while we were unlocked */
4242 			SCTP_SOCKET_UNLOCK(so, 1);
4243 			return;
4244 		}
4245 #endif
4246 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4247 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4248 		SCTP_SOCKET_UNLOCK(so, 1);
4249 #endif
4250 	} else {
4251 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4252 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4253 		}
4254 	}
4255 
4256 	/* JRS - Use the congestion control given in the CC module */
4257 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4258 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4259 			if (net->net_ack2 > 0) {
4260 				/*
4261 				 * Karn's rule applies to clearing error
4262 				 * count, this is optional.
4263 				 */
4264 				net->error_count = 0;
4265 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4266 					/* addr came good */
4267 					net->dest_state |= SCTP_ADDR_REACHABLE;
4268 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4269 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4270 				}
4271 				if (net == stcb->asoc.primary_destination) {
4272 					if (stcb->asoc.alternate) {
4273 						/*
4274 						 * release the alternate,
4275 						 * primary is good
4276 						 */
4277 						sctp_free_remote_addr(stcb->asoc.alternate);
4278 						stcb->asoc.alternate = NULL;
4279 					}
4280 				}
4281 				if (net->dest_state & SCTP_ADDR_PF) {
4282 					net->dest_state &= ~SCTP_ADDR_PF;
4283 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4284 					    stcb->sctp_ep, stcb, net,
4285 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4286 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4287 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4288 					/* Done with this net */
4289 					net->net_ack = 0;
4290 				}
4291 				/* restore any doubled timers */
4292 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4293 				if (net->RTO < stcb->asoc.minrto) {
4294 					net->RTO = stcb->asoc.minrto;
4295 				}
4296 				if (net->RTO > stcb->asoc.maxrto) {
4297 					net->RTO = stcb->asoc.maxrto;
4298 				}
4299 			}
4300 		}
4301 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4302 	}
4303 	asoc->last_acked_seq = cumack;
4304 
4305 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4306 		/* nothing left in-flight */
4307 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4308 			net->flight_size = 0;
4309 			net->partial_bytes_acked = 0;
4310 		}
4311 		asoc->total_flight = 0;
4312 		asoc->total_flight_count = 0;
4313 	}
4314 
4315 	/* RWND update */
4316 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4317 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4318 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4319 		/* SWS sender side engages */
4320 		asoc->peers_rwnd = 0;
4321 	}
4322 	if (asoc->peers_rwnd > old_rwnd) {
4323 		win_probe_recovery = 1;
4324 	}
4325 	/* Now assure a timer where data is queued at */
4326 again:
4327 	j = 0;
4328 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4329 		if (win_probe_recovery && (net->window_probe)) {
4330 			win_probe_recovered = 1;
4331 			/*
4332 			 * Find first chunk that was used with window probe
4333 			 * and clear the sent
4334 			 */
4335 			/* sa_ignore FREED_MEMORY */
4336 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4337 				if (tp1->window_probe) {
4338 					/* move back to data send queue */
4339 					sctp_window_probe_recovery(stcb, asoc, tp1);
4340 					break;
4341 				}
4342 			}
4343 		}
4344 		if (net->flight_size) {
4345 			j++;
4346 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4347 			if (net->window_probe) {
4348 				net->window_probe = 0;
4349 			}
4350 		} else {
4351 			if (net->window_probe) {
4352 				/*
4353 				 * In window probes we must assure a timer
4354 				 * is still running there
4355 				 */
4356 				net->window_probe = 0;
4357 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4358 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4359 				}
4360 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4361 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4362 				    stcb, net,
4363 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4364 			}
4365 		}
4366 	}
4367 	if ((j == 0) &&
4368 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4369 	    (asoc->sent_queue_retran_cnt == 0) &&
4370 	    (win_probe_recovered == 0) &&
4371 	    (done_once == 0)) {
4372 		/*
4373 		 * huh, this should not happen unless all packets are
4374 		 * PR-SCTP and marked to skip of course.
4375 		 */
4376 		if (sctp_fs_audit(asoc)) {
4377 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4378 				net->flight_size = 0;
4379 			}
4380 			asoc->total_flight = 0;
4381 			asoc->total_flight_count = 0;
4382 			asoc->sent_queue_retran_cnt = 0;
4383 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4384 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4385 					sctp_flight_size_increase(tp1);
4386 					sctp_total_flight_increase(stcb, tp1);
4387 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4388 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4389 				}
4390 			}
4391 		}
4392 		done_once = 1;
4393 		goto again;
4394 	}
4395 	/**********************************/
4396 	/* Now what about shutdown issues */
4397 	/**********************************/
4398 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4399 		/* nothing left on sendqueue.. consider done */
4400 		/* clean up */
4401 		if ((asoc->stream_queue_cnt == 1) &&
4402 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4403 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4404 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4405 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4406 		}
4407 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4408 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4409 		    (asoc->stream_queue_cnt == 1) &&
4410 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4411 			struct mbuf *op_err;
4412 
4413 			*abort_now = 1;
4414 			/* XXX */
4415 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4416 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4417 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4418 			return;
4419 		}
4420 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4421 		    (asoc->stream_queue_cnt == 0)) {
4422 			struct sctp_nets *netp;
4423 
4424 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4425 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4426 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4427 			}
4428 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4429 			sctp_stop_timers_for_shutdown(stcb);
4430 			if (asoc->alternate) {
4431 				netp = asoc->alternate;
4432 			} else {
4433 				netp = asoc->primary_destination;
4434 			}
4435 			sctp_send_shutdown(stcb, netp);
4436 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4437 			    stcb->sctp_ep, stcb, netp);
4438 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4439 			    stcb->sctp_ep, stcb, NULL);
4440 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4441 		    (asoc->stream_queue_cnt == 0)) {
4442 			struct sctp_nets *netp;
4443 
4444 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4445 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4446 			sctp_stop_timers_for_shutdown(stcb);
4447 			if (asoc->alternate) {
4448 				netp = asoc->alternate;
4449 			} else {
4450 				netp = asoc->primary_destination;
4451 			}
4452 			sctp_send_shutdown_ack(stcb, netp);
4453 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4454 			    stcb->sctp_ep, stcb, netp);
4455 		}
4456 	}
4457 	/*********************************************/
4458 	/* Here we perform PR-SCTP procedures        */
4459 	/* (section 4.2)                             */
4460 	/*********************************************/
4461 	/* C1. update advancedPeerAckPoint */
4462 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4463 		asoc->advanced_peer_ack_point = cumack;
4464 	}
4465 	/* PR-Sctp issues need to be addressed too */
4466 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4467 		struct sctp_tmit_chunk *lchk;
4468 		uint32_t old_adv_peer_ack_point;
4469 
4470 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4471 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4472 		/* C3. See if we need to send a Fwd-TSN */
4473 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4474 			/*
4475 			 * ISSUE with ECN, see FWD-TSN processing.
4476 			 */
4477 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4478 				send_forward_tsn(stcb, asoc);
4479 			} else if (lchk) {
4480 				/* try to FR fwd-tsn's that get lost too */
4481 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4482 					send_forward_tsn(stcb, asoc);
4483 				}
4484 			}
4485 		}
4486 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4487 			if (lchk->whoTo != NULL) {
4488 				break;
4489 			}
4490 		}
4491 		if (lchk != NULL) {
4492 			/* Assure a timer is up */
4493 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4494 			    stcb->sctp_ep, stcb, lchk->whoTo);
4495 		}
4496 	}
4497 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4498 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4499 		    rwnd,
4500 		    stcb->asoc.peers_rwnd,
4501 		    stcb->asoc.total_flight,
4502 		    stcb->asoc.total_output_queue_size);
4503 	}
4504 }
4505 
4506 void
4507 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4508     struct sctp_tcb *stcb,
4509     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4510     int *abort_now, uint8_t flags,
4511     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4512 {
4513 	struct sctp_association *asoc;
4514 	struct sctp_tmit_chunk *tp1, *tp2;
4515 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4516 	uint16_t wake_him = 0;
4517 	uint32_t send_s = 0;
4518 	long j;
4519 	int accum_moved = 0;
4520 	int will_exit_fast_recovery = 0;
4521 	uint32_t a_rwnd, old_rwnd;
4522 	int win_probe_recovery = 0;
4523 	int win_probe_recovered = 0;
4524 	struct sctp_nets *net = NULL;
4525 	int done_once;
4526 	int rto_ok = 1;
4527 	uint8_t reneged_all = 0;
4528 	uint8_t cmt_dac_flag;
4529 
4530 	/*
4531 	 * we take any chance we can to service our queues since we cannot
4532 	 * get awoken when the socket is read from :<
4533 	 */
4534 	/*
4535 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4536 	 * old sack, if so discard. 2) If there is nothing left in the send
4537 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4538 	 * too, update any rwnd change and verify no timers are running.
4539 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4540 	 * moved process these first and note that it moved. 4) Process any
4541 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4542 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4543 	 * sync up flightsizes and things, stop all timers and also check
4544 	 * for shutdown_pending state. If so then go ahead and send off the
4545 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4546 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4547 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4548 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4549 	 * if in shutdown_recv state.
4550 	 */
4551 	SCTP_TCB_LOCK_ASSERT(stcb);
4552 	/* CMT DAC algo */
4553 	this_sack_lowest_newack = 0;
4554 	SCTP_STAT_INCR(sctps_slowpath_sack);
4555 	last_tsn = cum_ack;
4556 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4557 #ifdef SCTP_ASOCLOG_OF_TSNS
4558 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4559 	stcb->asoc.cumack_log_at++;
4560 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4561 		stcb->asoc.cumack_log_at = 0;
4562 	}
4563 #endif
4564 	a_rwnd = rwnd;
4565 
4566 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4567 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4568 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4569 	}
4570 
4571 	old_rwnd = stcb->asoc.peers_rwnd;
4572 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4573 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4574 		    stcb->asoc.overall_error_count,
4575 		    0,
4576 		    SCTP_FROM_SCTP_INDATA,
4577 		    __LINE__);
4578 	}
4579 	stcb->asoc.overall_error_count = 0;
4580 	asoc = &stcb->asoc;
4581 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4582 		sctp_log_sack(asoc->last_acked_seq,
4583 		    cum_ack,
4584 		    0,
4585 		    num_seg,
4586 		    num_dup,
4587 		    SCTP_LOG_NEW_SACK);
4588 	}
4589 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4590 		uint16_t i;
4591 		uint32_t *dupdata, dblock;
4592 
4593 		for (i = 0; i < num_dup; i++) {
4594 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4595 			    sizeof(uint32_t), (uint8_t *)&dblock);
4596 			if (dupdata == NULL) {
4597 				break;
4598 			}
4599 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4600 		}
4601 	}
4602 	/* reality check */
4603 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4604 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4605 		    sctpchunk_listhead);
4606 		send_s = tp1->rec.data.tsn + 1;
4607 	} else {
4608 		tp1 = NULL;
4609 		send_s = asoc->sending_seq;
4610 	}
4611 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4612 		struct mbuf *op_err;
4613 		char msg[SCTP_DIAG_INFO_LEN];
4614 
4615 		/*
4616 		 * no way, we have not even sent this TSN out yet. Peer is
4617 		 * hopelessly messed up with us.
4618 		 */
4619 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4620 		    cum_ack, send_s);
4621 		if (tp1) {
4622 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4623 			    tp1->rec.data.tsn, (void *)tp1);
4624 		}
4625 hopeless_peer:
4626 		*abort_now = 1;
4627 		/* XXX */
4628 		if (snprintf(msg, sizeof(msg),
4629 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4630 		    cum_ack, send_s) < 0) {
4631 			msg[0] = '\0';
4632 		}
4633 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4634 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4635 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4636 		return;
4637 	}
4638 	/**********************/
4639 	/* 1) check the range */
4640 	/**********************/
4641 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4642 		/* acking something behind */
4643 		return;
4644 	}
4645 
4646 	/* update the Rwnd of the peer */
4647 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4648 	    TAILQ_EMPTY(&asoc->send_queue) &&
4649 	    (asoc->stream_queue_cnt == 0)) {
4650 		/* nothing left on send/sent and strmq */
4651 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4652 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4653 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4654 		}
4655 		asoc->peers_rwnd = a_rwnd;
4656 		if (asoc->sent_queue_retran_cnt) {
4657 			asoc->sent_queue_retran_cnt = 0;
4658 		}
4659 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4660 			/* SWS sender side engages */
4661 			asoc->peers_rwnd = 0;
4662 		}
4663 		/* stop any timers */
4664 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4665 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4666 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4667 			net->partial_bytes_acked = 0;
4668 			net->flight_size = 0;
4669 		}
4670 		asoc->total_flight = 0;
4671 		asoc->total_flight_count = 0;
4672 		return;
4673 	}
4674 	/*
4675 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4676 	 * things. The total byte count acked is tracked in netAckSz AND
4677 	 * netAck2 is used to track the total bytes acked that are un-
4678 	 * amibguious and were never retransmitted. We track these on a per
4679 	 * destination address basis.
4680 	 */
4681 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4682 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4683 			/* Drag along the window_tsn for cwr's */
4684 			net->cwr_window_tsn = cum_ack;
4685 		}
4686 		net->prev_cwnd = net->cwnd;
4687 		net->net_ack = 0;
4688 		net->net_ack2 = 0;
4689 
4690 		/*
4691 		 * CMT: Reset CUC and Fast recovery algo variables before
4692 		 * SACK processing
4693 		 */
4694 		net->new_pseudo_cumack = 0;
4695 		net->will_exit_fast_recovery = 0;
4696 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4697 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4698 		}
4699 
4700 		/*
4701 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4702 		 * to be greater than the cumack. Also reset saw_newack to 0
4703 		 * for all dests.
4704 		 */
4705 		net->saw_newack = 0;
4706 		net->this_sack_highest_newack = last_tsn;
4707 	}
4708 	/* process the new consecutive TSN first */
4709 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4710 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4711 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4712 				accum_moved = 1;
4713 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4714 					/*
4715 					 * If it is less than ACKED, it is
4716 					 * now no-longer in flight. Higher
4717 					 * values may occur during marking
4718 					 */
4719 					if ((tp1->whoTo->dest_state &
4720 					    SCTP_ADDR_UNCONFIRMED) &&
4721 					    (tp1->snd_count < 2)) {
4722 						/*
4723 						 * If there was no retran
4724 						 * and the address is
4725 						 * un-confirmed and we sent
4726 						 * there and are now
4727 						 * sacked.. its confirmed,
4728 						 * mark it so.
4729 						 */
4730 						tp1->whoTo->dest_state &=
4731 						    ~SCTP_ADDR_UNCONFIRMED;
4732 					}
4733 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4734 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4735 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4736 							    tp1->whoTo->flight_size,
4737 							    tp1->book_size,
4738 							    (uint32_t)(uintptr_t)tp1->whoTo,
4739 							    tp1->rec.data.tsn);
4740 						}
4741 						sctp_flight_size_decrease(tp1);
4742 						sctp_total_flight_decrease(stcb, tp1);
4743 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4744 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4745 							    tp1);
4746 						}
4747 					}
4748 					tp1->whoTo->net_ack += tp1->send_size;
4749 
4750 					/* CMT SFR and DAC algos */
4751 					this_sack_lowest_newack = tp1->rec.data.tsn;
4752 					tp1->whoTo->saw_newack = 1;
4753 
4754 					if (tp1->snd_count < 2) {
4755 						/*
4756 						 * True non-retransmitted
4757 						 * chunk
4758 						 */
4759 						tp1->whoTo->net_ack2 +=
4760 						    tp1->send_size;
4761 
4762 						/* update RTO too? */
4763 						if (tp1->do_rtt) {
4764 							if (rto_ok &&
4765 							    sctp_calculate_rto(stcb,
4766 							    &stcb->asoc,
4767 							    tp1->whoTo,
4768 							    &tp1->sent_rcv_time,
4769 							    SCTP_RTT_FROM_DATA)) {
4770 								rto_ok = 0;
4771 							}
4772 							if (tp1->whoTo->rto_needed == 0) {
4773 								tp1->whoTo->rto_needed = 1;
4774 							}
4775 							tp1->do_rtt = 0;
4776 						}
4777 					}
4778 					/*
4779 					 * CMT: CUCv2 algorithm. From the
4780 					 * cumack'd TSNs, for each TSN being
4781 					 * acked for the first time, set the
4782 					 * following variables for the
4783 					 * corresp destination.
4784 					 * new_pseudo_cumack will trigger a
4785 					 * cwnd update.
4786 					 * find_(rtx_)pseudo_cumack will
4787 					 * trigger search for the next
4788 					 * expected (rtx-)pseudo-cumack.
4789 					 */
4790 					tp1->whoTo->new_pseudo_cumack = 1;
4791 					tp1->whoTo->find_pseudo_cumack = 1;
4792 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4793 
4794 
4795 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4796 						sctp_log_sack(asoc->last_acked_seq,
4797 						    cum_ack,
4798 						    tp1->rec.data.tsn,
4799 						    0,
4800 						    0,
4801 						    SCTP_LOG_TSN_ACKED);
4802 					}
4803 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4804 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4805 					}
4806 				}
4807 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4808 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4809 #ifdef SCTP_AUDITING_ENABLED
4810 					sctp_audit_log(0xB3,
4811 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4812 #endif
4813 				}
4814 				if (tp1->rec.data.chunk_was_revoked) {
4815 					/* deflate the cwnd */
4816 					tp1->whoTo->cwnd -= tp1->book_size;
4817 					tp1->rec.data.chunk_was_revoked = 0;
4818 				}
4819 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4820 					tp1->sent = SCTP_DATAGRAM_ACKED;
4821 				}
4822 			}
4823 		} else {
4824 			break;
4825 		}
4826 	}
4827 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4828 	/* always set this up to cum-ack */
4829 	asoc->this_sack_highest_gap = last_tsn;
4830 
4831 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4832 
4833 		/*
4834 		 * thisSackHighestGap will increase while handling NEW
4835 		 * segments this_sack_highest_newack will increase while
4836 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4837 		 * used for CMT DAC algo. saw_newack will also change.
4838 		 */
4839 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4840 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4841 		    num_seg, num_nr_seg, &rto_ok)) {
4842 			wake_him++;
4843 		}
4844 		/*
4845 		 * validate the biggest_tsn_acked in the gap acks if strict
4846 		 * adherence is wanted.
4847 		 */
4848 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4849 			/*
4850 			 * peer is either confused or we are under attack.
4851 			 * We must abort.
4852 			 */
4853 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4854 			    biggest_tsn_acked, send_s);
4855 			goto hopeless_peer;
4856 		}
4857 	}
4858 	/*******************************************/
4859 	/* cancel ALL T3-send timer if accum moved */
4860 	/*******************************************/
4861 	if (asoc->sctp_cmt_on_off > 0) {
4862 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4863 			if (net->new_pseudo_cumack)
4864 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4865 				    stcb, net,
4866 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4867 
4868 		}
4869 	} else {
4870 		if (accum_moved) {
4871 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4872 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4873 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4874 			}
4875 		}
4876 	}
4877 	/********************************************/
4878 	/* drop the acked chunks from the sentqueue */
4879 	/********************************************/
4880 	asoc->last_acked_seq = cum_ack;
4881 
4882 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4883 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4884 			break;
4885 		}
4886 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4887 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4888 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4889 #ifdef INVARIANTS
4890 			} else {
4891 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4892 #endif
4893 			}
4894 		}
4895 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4896 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4897 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4898 			asoc->trigger_reset = 1;
4899 		}
4900 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4901 		if (PR_SCTP_ENABLED(tp1->flags)) {
4902 			if (asoc->pr_sctp_cnt != 0)
4903 				asoc->pr_sctp_cnt--;
4904 		}
4905 		asoc->sent_queue_cnt--;
4906 		if (tp1->data) {
4907 			/* sa_ignore NO_NULL_CHK */
4908 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4909 			sctp_m_freem(tp1->data);
4910 			tp1->data = NULL;
4911 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4912 				asoc->sent_queue_cnt_removeable--;
4913 			}
4914 		}
4915 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4916 			sctp_log_sack(asoc->last_acked_seq,
4917 			    cum_ack,
4918 			    tp1->rec.data.tsn,
4919 			    0,
4920 			    0,
4921 			    SCTP_LOG_FREE_SENT);
4922 		}
4923 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4924 		wake_him++;
4925 	}
4926 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4927 #ifdef INVARIANTS
4928 		panic("Warning flight size is positive and should be 0");
4929 #else
4930 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4931 		    asoc->total_flight);
4932 #endif
4933 		asoc->total_flight = 0;
4934 	}
4935 
4936 	/* sa_ignore NO_NULL_CHK */
4937 	if ((wake_him) && (stcb->sctp_socket)) {
4938 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4939 		struct socket *so;
4940 
4941 #endif
4942 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4943 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4944 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4945 		}
4946 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4947 		so = SCTP_INP_SO(stcb->sctp_ep);
4948 		atomic_add_int(&stcb->asoc.refcnt, 1);
4949 		SCTP_TCB_UNLOCK(stcb);
4950 		SCTP_SOCKET_LOCK(so, 1);
4951 		SCTP_TCB_LOCK(stcb);
4952 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4953 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4954 			/* assoc was freed while we were unlocked */
4955 			SCTP_SOCKET_UNLOCK(so, 1);
4956 			return;
4957 		}
4958 #endif
4959 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4960 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4961 		SCTP_SOCKET_UNLOCK(so, 1);
4962 #endif
4963 	} else {
4964 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4965 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4966 		}
4967 	}
4968 
4969 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4970 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4971 			/* Setup so we will exit RFC2582 fast recovery */
4972 			will_exit_fast_recovery = 1;
4973 		}
4974 	}
4975 	/*
4976 	 * Check for revoked fragments:
4977 	 *
4978 	 * if Previous sack - Had no frags then we can't have any revoked if
4979 	 * Previous sack - Had frag's then - If we now have frags aka
4980 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4981 	 * some of them. else - The peer revoked all ACKED fragments, since
4982 	 * we had some before and now we have NONE.
4983 	 */
4984 
4985 	if (num_seg) {
4986 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4987 		asoc->saw_sack_with_frags = 1;
4988 	} else if (asoc->saw_sack_with_frags) {
4989 		int cnt_revoked = 0;
4990 
4991 		/* Peer revoked all dg's marked or acked */
4992 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4993 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4994 				tp1->sent = SCTP_DATAGRAM_SENT;
4995 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4996 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4997 					    tp1->whoTo->flight_size,
4998 					    tp1->book_size,
4999 					    (uint32_t)(uintptr_t)tp1->whoTo,
5000 					    tp1->rec.data.tsn);
5001 				}
5002 				sctp_flight_size_increase(tp1);
5003 				sctp_total_flight_increase(stcb, tp1);
5004 				tp1->rec.data.chunk_was_revoked = 1;
5005 				/*
5006 				 * To ensure that this increase in
5007 				 * flightsize, which is artificial, does not
5008 				 * throttle the sender, we also increase the
5009 				 * cwnd artificially.
5010 				 */
5011 				tp1->whoTo->cwnd += tp1->book_size;
5012 				cnt_revoked++;
5013 			}
5014 		}
5015 		if (cnt_revoked) {
5016 			reneged_all = 1;
5017 		}
5018 		asoc->saw_sack_with_frags = 0;
5019 	}
5020 	if (num_nr_seg > 0)
5021 		asoc->saw_sack_with_nr_frags = 1;
5022 	else
5023 		asoc->saw_sack_with_nr_frags = 0;
5024 
5025 	/* JRS - Use the congestion control given in the CC module */
5026 	if (ecne_seen == 0) {
5027 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5028 			if (net->net_ack2 > 0) {
5029 				/*
5030 				 * Karn's rule applies to clearing error
5031 				 * count, this is optional.
5032 				 */
5033 				net->error_count = 0;
5034 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
5035 					/* addr came good */
5036 					net->dest_state |= SCTP_ADDR_REACHABLE;
5037 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
5038 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
5039 				}
5040 
5041 				if (net == stcb->asoc.primary_destination) {
5042 					if (stcb->asoc.alternate) {
5043 						/*
5044 						 * release the alternate,
5045 						 * primary is good
5046 						 */
5047 						sctp_free_remote_addr(stcb->asoc.alternate);
5048 						stcb->asoc.alternate = NULL;
5049 					}
5050 				}
5051 
5052 				if (net->dest_state & SCTP_ADDR_PF) {
5053 					net->dest_state &= ~SCTP_ADDR_PF;
5054 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5055 					    stcb->sctp_ep, stcb, net,
5056 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5057 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5058 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5059 					/* Done with this net */
5060 					net->net_ack = 0;
5061 				}
5062 				/* restore any doubled timers */
5063 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5064 				if (net->RTO < stcb->asoc.minrto) {
5065 					net->RTO = stcb->asoc.minrto;
5066 				}
5067 				if (net->RTO > stcb->asoc.maxrto) {
5068 					net->RTO = stcb->asoc.maxrto;
5069 				}
5070 			}
5071 		}
5072 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5073 	}
5074 
5075 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5076 		/* nothing left in-flight */
5077 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5078 			/* stop all timers */
5079 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5080 			    stcb, net,
5081 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5082 			net->flight_size = 0;
5083 			net->partial_bytes_acked = 0;
5084 		}
5085 		asoc->total_flight = 0;
5086 		asoc->total_flight_count = 0;
5087 	}
5088 
5089 	/**********************************/
5090 	/* Now what about shutdown issues */
5091 	/**********************************/
5092 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5093 		/* nothing left on sendqueue.. consider done */
5094 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5095 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5096 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5097 		}
5098 		asoc->peers_rwnd = a_rwnd;
5099 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5100 			/* SWS sender side engages */
5101 			asoc->peers_rwnd = 0;
5102 		}
5103 		/* clean up */
5104 		if ((asoc->stream_queue_cnt == 1) &&
5105 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5106 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5107 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5108 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5109 		}
5110 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5111 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5112 		    (asoc->stream_queue_cnt == 1) &&
5113 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5114 			struct mbuf *op_err;
5115 
5116 			*abort_now = 1;
5117 			/* XXX */
5118 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5119 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5120 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5121 			return;
5122 		}
5123 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5124 		    (asoc->stream_queue_cnt == 0)) {
5125 			struct sctp_nets *netp;
5126 
5127 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5128 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5129 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5130 			}
5131 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5132 			sctp_stop_timers_for_shutdown(stcb);
5133 			if (asoc->alternate) {
5134 				netp = asoc->alternate;
5135 			} else {
5136 				netp = asoc->primary_destination;
5137 			}
5138 			sctp_send_shutdown(stcb, netp);
5139 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5140 			    stcb->sctp_ep, stcb, netp);
5141 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5142 			    stcb->sctp_ep, stcb, NULL);
5143 			return;
5144 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5145 		    (asoc->stream_queue_cnt == 0)) {
5146 			struct sctp_nets *netp;
5147 
5148 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5149 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5150 			sctp_stop_timers_for_shutdown(stcb);
5151 			if (asoc->alternate) {
5152 				netp = asoc->alternate;
5153 			} else {
5154 				netp = asoc->primary_destination;
5155 			}
5156 			sctp_send_shutdown_ack(stcb, netp);
5157 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5158 			    stcb->sctp_ep, stcb, netp);
5159 			return;
5160 		}
5161 	}
5162 	/*
5163 	 * Now here we are going to recycle net_ack for a different use...
5164 	 * HEADS UP.
5165 	 */
5166 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5167 		net->net_ack = 0;
5168 	}
5169 
5170 	/*
5171 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5172 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5173 	 * automatically ensure that.
5174 	 */
5175 	if ((asoc->sctp_cmt_on_off > 0) &&
5176 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5177 	    (cmt_dac_flag == 0)) {
5178 		this_sack_lowest_newack = cum_ack;
5179 	}
5180 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5181 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5182 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5183 	}
5184 	/* JRS - Use the congestion control given in the CC module */
5185 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5186 
5187 	/* Now are we exiting loss recovery ? */
5188 	if (will_exit_fast_recovery) {
5189 		/* Ok, we must exit fast recovery */
5190 		asoc->fast_retran_loss_recovery = 0;
5191 	}
5192 	if ((asoc->sat_t3_loss_recovery) &&
5193 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5194 		/* end satellite t3 loss recovery */
5195 		asoc->sat_t3_loss_recovery = 0;
5196 	}
5197 	/*
5198 	 * CMT Fast recovery
5199 	 */
5200 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5201 		if (net->will_exit_fast_recovery) {
5202 			/* Ok, we must exit fast recovery */
5203 			net->fast_retran_loss_recovery = 0;
5204 		}
5205 	}
5206 
5207 	/* Adjust and set the new rwnd value */
5208 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5209 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5210 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5211 	}
5212 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5213 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5214 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5215 		/* SWS sender side engages */
5216 		asoc->peers_rwnd = 0;
5217 	}
5218 	if (asoc->peers_rwnd > old_rwnd) {
5219 		win_probe_recovery = 1;
5220 	}
5221 
5222 	/*
5223 	 * Now we must setup so we have a timer up for anyone with
5224 	 * outstanding data.
5225 	 */
5226 	done_once = 0;
5227 again:
5228 	j = 0;
5229 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5230 		if (win_probe_recovery && (net->window_probe)) {
5231 			win_probe_recovered = 1;
5232 			/*-
5233 			 * Find first chunk that was used with
5234 			 * window probe and clear the event. Put
5235 			 * it back into the send queue as if has
5236 			 * not been sent.
5237 			 */
5238 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5239 				if (tp1->window_probe) {
5240 					sctp_window_probe_recovery(stcb, asoc, tp1);
5241 					break;
5242 				}
5243 			}
5244 		}
5245 		if (net->flight_size) {
5246 			j++;
5247 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5248 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5249 				    stcb->sctp_ep, stcb, net);
5250 			}
5251 			if (net->window_probe) {
5252 				net->window_probe = 0;
5253 			}
5254 		} else {
5255 			if (net->window_probe) {
5256 				/*
5257 				 * In window probes we must assure a timer
5258 				 * is still running there
5259 				 */
5260 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5261 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5262 					    stcb->sctp_ep, stcb, net);
5263 
5264 				}
5265 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5266 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5267 				    stcb, net,
5268 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5269 			}
5270 		}
5271 	}
5272 	if ((j == 0) &&
5273 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5274 	    (asoc->sent_queue_retran_cnt == 0) &&
5275 	    (win_probe_recovered == 0) &&
5276 	    (done_once == 0)) {
5277 		/*
5278 		 * huh, this should not happen unless all packets are
5279 		 * PR-SCTP and marked to skip of course.
5280 		 */
5281 		if (sctp_fs_audit(asoc)) {
5282 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5283 				net->flight_size = 0;
5284 			}
5285 			asoc->total_flight = 0;
5286 			asoc->total_flight_count = 0;
5287 			asoc->sent_queue_retran_cnt = 0;
5288 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5289 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5290 					sctp_flight_size_increase(tp1);
5291 					sctp_total_flight_increase(stcb, tp1);
5292 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5293 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5294 				}
5295 			}
5296 		}
5297 		done_once = 1;
5298 		goto again;
5299 	}
5300 	/*********************************************/
5301 	/* Here we perform PR-SCTP procedures        */
5302 	/* (section 4.2)                             */
5303 	/*********************************************/
5304 	/* C1. update advancedPeerAckPoint */
5305 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5306 		asoc->advanced_peer_ack_point = cum_ack;
5307 	}
5308 	/* C2. try to further move advancedPeerAckPoint ahead */
5309 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5310 		struct sctp_tmit_chunk *lchk;
5311 		uint32_t old_adv_peer_ack_point;
5312 
5313 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5314 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5315 		/* C3. See if we need to send a Fwd-TSN */
5316 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5317 			/*
5318 			 * ISSUE with ECN, see FWD-TSN processing.
5319 			 */
5320 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5321 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5322 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5323 				    old_adv_peer_ack_point);
5324 			}
5325 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5326 				send_forward_tsn(stcb, asoc);
5327 			} else if (lchk) {
5328 				/* try to FR fwd-tsn's that get lost too */
5329 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5330 					send_forward_tsn(stcb, asoc);
5331 				}
5332 			}
5333 		}
5334 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5335 			if (lchk->whoTo != NULL) {
5336 				break;
5337 			}
5338 		}
5339 		if (lchk != NULL) {
5340 			/* Assure a timer is up */
5341 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5342 			    stcb->sctp_ep, stcb, lchk->whoTo);
5343 		}
5344 	}
5345 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5346 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5347 		    a_rwnd,
5348 		    stcb->asoc.peers_rwnd,
5349 		    stcb->asoc.total_flight,
5350 		    stcb->asoc.total_output_queue_size);
5351 	}
5352 }
5353 
5354 void
5355 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5356 {
5357 	/* Copy cum-ack */
5358 	uint32_t cum_ack, a_rwnd;
5359 
5360 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5361 	/* Arrange so a_rwnd does NOT change */
5362 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5363 
5364 	/* Now call the express sack handling */
5365 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5366 }
5367 
5368 static void
5369 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5370     struct sctp_stream_in *strmin)
5371 {
5372 	struct sctp_queued_to_read *control, *ncontrol;
5373 	struct sctp_association *asoc;
5374 	uint32_t mid;
5375 	int need_reasm_check = 0;
5376 
5377 	asoc = &stcb->asoc;
5378 	mid = strmin->last_mid_delivered;
5379 	/*
5380 	 * First deliver anything prior to and including the stream no that
5381 	 * came in.
5382 	 */
5383 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5384 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5385 			/* this is deliverable now */
5386 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5387 				if (control->on_strm_q) {
5388 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5389 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5390 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5391 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5392 #ifdef INVARIANTS
5393 					} else {
5394 						panic("strmin: %p ctl: %p unknown %d",
5395 						    strmin, control, control->on_strm_q);
5396 #endif
5397 					}
5398 					control->on_strm_q = 0;
5399 				}
5400 				/* subtract pending on streams */
5401 				if (asoc->size_on_all_streams >= control->length) {
5402 					asoc->size_on_all_streams -= control->length;
5403 				} else {
5404 #ifdef INVARIANTS
5405 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5406 #else
5407 					asoc->size_on_all_streams = 0;
5408 #endif
5409 				}
5410 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5411 				/* deliver it to at least the delivery-q */
5412 				if (stcb->sctp_socket) {
5413 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5414 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5415 					    control,
5416 					    &stcb->sctp_socket->so_rcv,
5417 					    1, SCTP_READ_LOCK_HELD,
5418 					    SCTP_SO_NOT_LOCKED);
5419 				}
5420 			} else {
5421 				/* Its a fragmented message */
5422 				if (control->first_frag_seen) {
5423 					/*
5424 					 * Make it so this is next to
5425 					 * deliver, we restore later
5426 					 */
5427 					strmin->last_mid_delivered = control->mid - 1;
5428 					need_reasm_check = 1;
5429 					break;
5430 				}
5431 			}
5432 		} else {
5433 			/* no more delivery now. */
5434 			break;
5435 		}
5436 	}
5437 	if (need_reasm_check) {
5438 		int ret;
5439 
5440 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5441 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5442 			/* Restore the next to deliver unless we are ahead */
5443 			strmin->last_mid_delivered = mid;
5444 		}
5445 		if (ret == 0) {
5446 			/* Left the front Partial one on */
5447 			return;
5448 		}
5449 		need_reasm_check = 0;
5450 	}
5451 	/*
5452 	 * now we must deliver things in queue the normal way  if any are
5453 	 * now ready.
5454 	 */
5455 	mid = strmin->last_mid_delivered + 1;
5456 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5457 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5458 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5459 				/* this is deliverable now */
5460 				if (control->on_strm_q) {
5461 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5462 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5463 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5464 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5465 #ifdef INVARIANTS
5466 					} else {
5467 						panic("strmin: %p ctl: %p unknown %d",
5468 						    strmin, control, control->on_strm_q);
5469 #endif
5470 					}
5471 					control->on_strm_q = 0;
5472 				}
5473 				/* subtract pending on streams */
5474 				if (asoc->size_on_all_streams >= control->length) {
5475 					asoc->size_on_all_streams -= control->length;
5476 				} else {
5477 #ifdef INVARIANTS
5478 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5479 #else
5480 					asoc->size_on_all_streams = 0;
5481 #endif
5482 				}
5483 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5484 				/* deliver it to at least the delivery-q */
5485 				strmin->last_mid_delivered = control->mid;
5486 				if (stcb->sctp_socket) {
5487 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5488 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5489 					    control,
5490 					    &stcb->sctp_socket->so_rcv, 1,
5491 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5492 
5493 				}
5494 				mid = strmin->last_mid_delivered + 1;
5495 			} else {
5496 				/* Its a fragmented message */
5497 				if (control->first_frag_seen) {
5498 					/*
5499 					 * Make it so this is next to
5500 					 * deliver
5501 					 */
5502 					strmin->last_mid_delivered = control->mid - 1;
5503 					need_reasm_check = 1;
5504 					break;
5505 				}
5506 			}
5507 		} else {
5508 			break;
5509 		}
5510 	}
5511 	if (need_reasm_check) {
5512 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5513 	}
5514 }
5515 
5516 
5517 
5518 static void
5519 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5520     struct sctp_association *asoc,
5521     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5522 {
5523 	struct sctp_queued_to_read *control;
5524 	struct sctp_stream_in *strm;
5525 	struct sctp_tmit_chunk *chk, *nchk;
5526 	int cnt_removed = 0;
5527 
5528 	/*
5529 	 * For now large messages held on the stream reasm that are complete
5530 	 * will be tossed too. We could in theory do more work to spin
5531 	 * through and stop after dumping one msg aka seeing the start of a
5532 	 * new msg at the head, and call the delivery function... to see if
5533 	 * it can be delivered... But for now we just dump everything on the
5534 	 * queue.
5535 	 */
5536 	strm = &asoc->strmin[stream];
5537 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5538 	if (control == NULL) {
5539 		/* Not found */
5540 		return;
5541 	}
5542 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5543 		return;
5544 	}
5545 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5546 		/* Purge hanging chunks */
5547 		if (!asoc->idata_supported && (ordered == 0)) {
5548 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5549 				break;
5550 			}
5551 		}
5552 		cnt_removed++;
5553 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5554 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5555 			asoc->size_on_reasm_queue -= chk->send_size;
5556 		} else {
5557 #ifdef INVARIANTS
5558 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5559 #else
5560 			asoc->size_on_reasm_queue = 0;
5561 #endif
5562 		}
5563 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5564 		if (chk->data) {
5565 			sctp_m_freem(chk->data);
5566 			chk->data = NULL;
5567 		}
5568 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5569 	}
5570 	if (!TAILQ_EMPTY(&control->reasm)) {
5571 		/* This has to be old data, unordered */
5572 		if (control->data) {
5573 			sctp_m_freem(control->data);
5574 			control->data = NULL;
5575 		}
5576 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5577 		chk = TAILQ_FIRST(&control->reasm);
5578 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5579 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5580 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5581 			    chk, SCTP_READ_LOCK_HELD);
5582 		}
5583 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5584 		return;
5585 	}
5586 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5587 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5588 		if (asoc->size_on_all_streams >= control->length) {
5589 			asoc->size_on_all_streams -= control->length;
5590 		} else {
5591 #ifdef INVARIANTS
5592 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5593 #else
5594 			asoc->size_on_all_streams = 0;
5595 #endif
5596 		}
5597 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5598 		control->on_strm_q = 0;
5599 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5600 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5601 		control->on_strm_q = 0;
5602 #ifdef INVARIANTS
5603 	} else if (control->on_strm_q) {
5604 		panic("strm: %p ctl: %p unknown %d",
5605 		    strm, control, control->on_strm_q);
5606 #endif
5607 	}
5608 	control->on_strm_q = 0;
5609 	if (control->on_read_q == 0) {
5610 		sctp_free_remote_addr(control->whoFrom);
5611 		if (control->data) {
5612 			sctp_m_freem(control->data);
5613 			control->data = NULL;
5614 		}
5615 		sctp_free_a_readq(stcb, control);
5616 	}
5617 }
5618 
5619 void
5620 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5621     struct sctp_forward_tsn_chunk *fwd,
5622     int *abort_flag, struct mbuf *m, int offset)
5623 {
5624 	/* The pr-sctp fwd tsn */
5625 	/*
5626 	 * here we will perform all the data receiver side steps for
5627 	 * processing FwdTSN, as required in by pr-sctp draft:
5628 	 *
5629 	 * Assume we get FwdTSN(x):
5630 	 *
5631 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5632 	 * + others we have 3) examine and update re-ordering queue on
5633 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5634 	 * report where we are.
5635 	 */
5636 	struct sctp_association *asoc;
5637 	uint32_t new_cum_tsn, gap;
5638 	unsigned int i, fwd_sz, m_size;
5639 	uint32_t str_seq;
5640 	struct sctp_stream_in *strm;
5641 	struct sctp_queued_to_read *control, *sv;
5642 
5643 	asoc = &stcb->asoc;
5644 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5645 		SCTPDBG(SCTP_DEBUG_INDATA1,
5646 		    "Bad size too small/big fwd-tsn\n");
5647 		return;
5648 	}
5649 	m_size = (stcb->asoc.mapping_array_size << 3);
5650 	/*************************************************************/
5651 	/* 1. Here we update local cumTSN and shift the bitmap array */
5652 	/*************************************************************/
5653 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5654 
5655 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5656 		/* Already got there ... */
5657 		return;
5658 	}
5659 	/*
5660 	 * now we know the new TSN is more advanced, let's find the actual
5661 	 * gap
5662 	 */
5663 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5664 	asoc->cumulative_tsn = new_cum_tsn;
5665 	if (gap >= m_size) {
5666 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5667 			struct mbuf *op_err;
5668 			char msg[SCTP_DIAG_INFO_LEN];
5669 
5670 			/*
5671 			 * out of range (of single byte chunks in the rwnd I
5672 			 * give out). This must be an attacker.
5673 			 */
5674 			*abort_flag = 1;
5675 			if (snprintf(msg, sizeof(msg),
5676 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5677 			    new_cum_tsn, asoc->highest_tsn_inside_map) < 0) {
5678 				msg[0] = '\0';
5679 			}
5680 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5681 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5682 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5683 			return;
5684 		}
5685 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5686 
5687 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5688 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5689 		asoc->highest_tsn_inside_map = new_cum_tsn;
5690 
5691 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5692 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5693 
5694 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5695 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5696 		}
5697 	} else {
5698 		SCTP_TCB_LOCK_ASSERT(stcb);
5699 		for (i = 0; i <= gap; i++) {
5700 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5701 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5702 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5703 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5704 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5705 				}
5706 			}
5707 		}
5708 	}
5709 	/*************************************************************/
5710 	/* 2. Clear up re-assembly queue                             */
5711 	/*************************************************************/
5712 
5713 	/* This is now done as part of clearing up the stream/seq */
5714 	if (asoc->idata_supported == 0) {
5715 		uint16_t sid;
5716 
5717 		/* Flush all the un-ordered data based on cum-tsn */
5718 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5719 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5720 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5721 		}
5722 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5723 	}
5724 	/*******************************************************/
5725 	/* 3. Update the PR-stream re-ordering queues and fix  */
5726 	/* delivery issues as needed.                       */
5727 	/*******************************************************/
5728 	fwd_sz -= sizeof(*fwd);
5729 	if (m && fwd_sz) {
5730 		/* New method. */
5731 		unsigned int num_str;
5732 		uint32_t mid, cur_mid;
5733 		uint16_t sid;
5734 		uint16_t ordered, flags;
5735 		struct sctp_strseq *stseq, strseqbuf;
5736 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5737 
5738 		offset += sizeof(*fwd);
5739 
5740 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5741 		if (asoc->idata_supported) {
5742 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5743 		} else {
5744 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5745 		}
5746 		for (i = 0; i < num_str; i++) {
5747 			if (asoc->idata_supported) {
5748 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5749 				    sizeof(struct sctp_strseq_mid),
5750 				    (uint8_t *)&strseqbuf_m);
5751 				offset += sizeof(struct sctp_strseq_mid);
5752 				if (stseq_m == NULL) {
5753 					break;
5754 				}
5755 				sid = ntohs(stseq_m->sid);
5756 				mid = ntohl(stseq_m->mid);
5757 				flags = ntohs(stseq_m->flags);
5758 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5759 					ordered = 0;
5760 				} else {
5761 					ordered = 1;
5762 				}
5763 			} else {
5764 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5765 				    sizeof(struct sctp_strseq),
5766 				    (uint8_t *)&strseqbuf);
5767 				offset += sizeof(struct sctp_strseq);
5768 				if (stseq == NULL) {
5769 					break;
5770 				}
5771 				sid = ntohs(stseq->sid);
5772 				mid = (uint32_t)ntohs(stseq->ssn);
5773 				ordered = 1;
5774 			}
5775 			/* Convert */
5776 
5777 			/* now process */
5778 
5779 			/*
5780 			 * Ok we now look for the stream/seq on the read
5781 			 * queue where its not all delivered. If we find it
5782 			 * we transmute the read entry into a PDI_ABORTED.
5783 			 */
5784 			if (sid >= asoc->streamincnt) {
5785 				/* screwed up streams, stop!  */
5786 				break;
5787 			}
5788 			if ((asoc->str_of_pdapi == sid) &&
5789 			    (asoc->ssn_of_pdapi == mid)) {
5790 				/*
5791 				 * If this is the one we were partially
5792 				 * delivering now then we no longer are.
5793 				 * Note this will change with the reassembly
5794 				 * re-write.
5795 				 */
5796 				asoc->fragmented_delivery_inprogress = 0;
5797 			}
5798 			strm = &asoc->strmin[sid];
5799 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5800 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5801 			}
5802 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5803 				if ((control->sinfo_stream == sid) &&
5804 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5805 					str_seq = (sid << 16) | (0x0000ffff & mid);
5806 					control->pdapi_aborted = 1;
5807 					sv = stcb->asoc.control_pdapi;
5808 					control->end_added = 1;
5809 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5810 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5811 						if (asoc->size_on_all_streams >= control->length) {
5812 							asoc->size_on_all_streams -= control->length;
5813 						} else {
5814 #ifdef INVARIANTS
5815 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5816 #else
5817 							asoc->size_on_all_streams = 0;
5818 #endif
5819 						}
5820 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5821 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5822 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5823 #ifdef INVARIANTS
5824 					} else if (control->on_strm_q) {
5825 						panic("strm: %p ctl: %p unknown %d",
5826 						    strm, control, control->on_strm_q);
5827 #endif
5828 					}
5829 					control->on_strm_q = 0;
5830 					stcb->asoc.control_pdapi = control;
5831 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5832 					    stcb,
5833 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5834 					    (void *)&str_seq,
5835 					    SCTP_SO_NOT_LOCKED);
5836 					stcb->asoc.control_pdapi = sv;
5837 					break;
5838 				} else if ((control->sinfo_stream == sid) &&
5839 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5840 					/* We are past our victim SSN */
5841 					break;
5842 				}
5843 			}
5844 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5845 				/* Update the sequence number */
5846 				strm->last_mid_delivered = mid;
5847 			}
5848 			/* now kick the stream the new way */
5849 			/* sa_ignore NO_NULL_CHK */
5850 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5851 		}
5852 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5853 	}
5854 	/*
5855 	 * Now slide thing forward.
5856 	 */
5857 	sctp_slide_mapping_arrays(stcb);
5858 }
5859