xref: /freebsd/sys/netinet/sctp_indata.c (revision 2938ecc85c29202824e83d65af5c3a4fb7b3e5fb)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		SCTP_SNPRINTF(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid);
444 	} else {
445 		SCTP_SNPRINTF(msg, sizeof(msg),
446 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
447 		    opspot,
448 		    control->fsn_included,
449 		    chk->rec.data.tsn,
450 		    chk->rec.data.sid,
451 		    chk->rec.data.fsn,
452 		    (uint16_t)chk->rec.data.mid);
453 	}
454 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 	sctp_m_freem(chk->data);
456 	chk->data = NULL;
457 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 	*abort_flag = 1;
461 }
462 
463 static void
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
465 {
466 	/*
467 	 * The control could not be placed and must be cleaned.
468 	 */
469 	struct sctp_tmit_chunk *chk, *nchk;
470 
471 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
473 		if (chk->data)
474 			sctp_m_freem(chk->data);
475 		chk->data = NULL;
476 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 	}
478 	sctp_free_remote_addr(control->whoFrom);
479 	if (control->data) {
480 		sctp_m_freem(control->data);
481 		control->data = NULL;
482 	}
483 	sctp_free_a_readq(stcb, control);
484 }
485 
486 /*
487  * Queue the chunk either right into the socket buffer if it is the next one
488  * to go OR put it in the correct place in the delivery queue.  If we do
489  * append to the so_buf, keep doing so until we are out of order as
490  * long as the control's entered are non-fragmented.
491  */
492 static void
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494     struct sctp_association *asoc,
495     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
496 {
497 	/*
498 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 	 * all the data in one stream this could happen quite rapidly. One
500 	 * could use the TSN to keep track of things, but this scheme breaks
501 	 * down in the other type of stream usage that could occur. Send a
502 	 * single msg to stream 0, send 4Billion messages to stream 1, now
503 	 * send a message to stream 0. You have a situation where the TSN
504 	 * has wrapped but not in the stream. Is this worth worrying about
505 	 * or should we just change our queue sort at the bottom to be by
506 	 * TSN.
507 	 *
508 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 	 * assignment this could happen... and I don't see how this would be
511 	 * a violation. So for now I am undecided an will leave the sort by
512 	 * SSN alone. Maybe a hybred approach is the answer
513 	 *
514 	 */
515 	struct sctp_queued_to_read *at;
516 	int queue_needed;
517 	uint32_t nxt_todel;
518 	struct mbuf *op_err;
519 	struct sctp_stream_in *strm;
520 	char msg[SCTP_DIAG_INFO_LEN];
521 
522 	strm = &asoc->strmin[control->sinfo_stream];
523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
525 	}
526 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 		/* The incoming sseq is behind where we last delivered? */
528 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 		    strm->last_mid_delivered, control->mid);
530 		/*
531 		 * throw it in the stream so it gets cleaned up in
532 		 * association destruction
533 		 */
534 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 		if (asoc->idata_supported) {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 			    strm->last_mid_delivered, control->sinfo_tsn,
538 			    control->sinfo_stream, control->mid);
539 		} else {
540 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 			    (uint16_t)strm->last_mid_delivered,
542 			    control->sinfo_tsn,
543 			    control->sinfo_stream,
544 			    (uint16_t)control->mid);
545 		}
546 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 		*abort_flag = 1;
550 		return;
551 
552 	}
553 	queue_needed = 1;
554 	asoc->size_on_all_streams += control->length;
555 	sctp_ucount_incr(asoc->cnt_on_all_streams);
556 	nxt_todel = strm->last_mid_delivered + 1;
557 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 		/* can be delivered right away? */
559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 		}
562 		/* EY it wont be queued if it could be delivered directly */
563 		queue_needed = 0;
564 		if (asoc->size_on_all_streams >= control->length) {
565 			asoc->size_on_all_streams -= control->length;
566 		} else {
567 #ifdef INVARIANTS
568 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
569 #else
570 			asoc->size_on_all_streams = 0;
571 #endif
572 		}
573 		sctp_ucount_decr(asoc->cnt_on_all_streams);
574 		strm->last_mid_delivered++;
575 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 		sctp_add_to_readq(stcb->sctp_ep, stcb,
577 		    control,
578 		    &stcb->sctp_socket->so_rcv, 1,
579 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
581 			/* all delivered */
582 			nxt_todel = strm->last_mid_delivered + 1;
583 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 				if (control->on_strm_q == SCTP_ON_ORDERED) {
586 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 					if (asoc->size_on_all_streams >= control->length) {
588 						asoc->size_on_all_streams -= control->length;
589 					} else {
590 #ifdef INVARIANTS
591 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
592 #else
593 						asoc->size_on_all_streams = 0;
594 #endif
595 					}
596 					sctp_ucount_decr(asoc->cnt_on_all_streams);
597 #ifdef INVARIANTS
598 				} else {
599 					panic("Huh control: %p is on_strm_q: %d",
600 					    control, control->on_strm_q);
601 #endif
602 				}
603 				control->on_strm_q = 0;
604 				strm->last_mid_delivered++;
605 				/*
606 				 * We ignore the return of deliver_data here
607 				 * since we always can hold the chunk on the
608 				 * d-queue. And we have a finite number that
609 				 * can be delivered from the strq.
610 				 */
611 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 					sctp_log_strm_del(control, NULL,
613 					    SCTP_STR_LOG_FROM_IMMED_DEL);
614 				}
615 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 				sctp_add_to_readq(stcb->sctp_ep, stcb,
617 				    control,
618 				    &stcb->sctp_socket->so_rcv, 1,
619 				    SCTP_READ_LOCK_NOT_HELD,
620 				    SCTP_SO_LOCKED);
621 				continue;
622 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
623 				*need_reasm = 1;
624 			}
625 			break;
626 		}
627 	}
628 	if (queue_needed) {
629 		/*
630 		 * Ok, we did not deliver this guy, find the correct place
631 		 * to put it on the queue.
632 		 */
633 		if (sctp_place_control_in_stream(strm, asoc, control)) {
634 			SCTP_SNPRINTF(msg, sizeof(msg),
635 			    "Queue to str MID: %u duplicate", control->mid);
636 			sctp_clean_up_control(stcb, control);
637 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
640 			*abort_flag = 1;
641 		}
642 	}
643 }
644 
645 
646 static void
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
648 {
649 	struct mbuf *m, *prev = NULL;
650 	struct sctp_tcb *stcb;
651 
652 	stcb = control->stcb;
653 	control->held_length = 0;
654 	control->length = 0;
655 	m = control->data;
656 	while (m) {
657 		if (SCTP_BUF_LEN(m) == 0) {
658 			/* Skip mbufs with NO length */
659 			if (prev == NULL) {
660 				/* First one */
661 				control->data = sctp_m_free(m);
662 				m = control->data;
663 			} else {
664 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 				m = SCTP_BUF_NEXT(prev);
666 			}
667 			if (m == NULL) {
668 				control->tail_mbuf = prev;
669 			}
670 			continue;
671 		}
672 		prev = m;
673 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 		if (control->on_read_q) {
675 			/*
676 			 * On read queue so we must increment the SB stuff,
677 			 * we assume caller has done any locks of SB.
678 			 */
679 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
680 		}
681 		m = SCTP_BUF_NEXT(m);
682 	}
683 	if (prev) {
684 		control->tail_mbuf = prev;
685 	}
686 }
687 
688 static void
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
690 {
691 	struct mbuf *prev = NULL;
692 	struct sctp_tcb *stcb;
693 
694 	stcb = control->stcb;
695 	if (stcb == NULL) {
696 #ifdef INVARIANTS
697 		panic("Control broken");
698 #else
699 		return;
700 #endif
701 	}
702 	if (control->tail_mbuf == NULL) {
703 		/* TSNH */
704 		sctp_m_freem(control->data);
705 		control->data = m;
706 		sctp_setup_tail_pointer(control);
707 		return;
708 	}
709 	control->tail_mbuf->m_next = m;
710 	while (m) {
711 		if (SCTP_BUF_LEN(m) == 0) {
712 			/* Skip mbufs with NO length */
713 			if (prev == NULL) {
714 				/* First one */
715 				control->tail_mbuf->m_next = sctp_m_free(m);
716 				m = control->tail_mbuf->m_next;
717 			} else {
718 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 				m = SCTP_BUF_NEXT(prev);
720 			}
721 			if (m == NULL) {
722 				control->tail_mbuf = prev;
723 			}
724 			continue;
725 		}
726 		prev = m;
727 		if (control->on_read_q) {
728 			/*
729 			 * On read queue so we must increment the SB stuff,
730 			 * we assume caller has done any locks of SB.
731 			 */
732 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
733 		}
734 		*added += SCTP_BUF_LEN(m);
735 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 		m = SCTP_BUF_NEXT(m);
737 	}
738 	if (prev) {
739 		control->tail_mbuf = prev;
740 	}
741 }
742 
743 static void
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
745 {
746 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 	nc->sinfo_stream = control->sinfo_stream;
748 	nc->mid = control->mid;
749 	TAILQ_INIT(&nc->reasm);
750 	nc->top_fsn = control->top_fsn;
751 	nc->mid = control->mid;
752 	nc->sinfo_flags = control->sinfo_flags;
753 	nc->sinfo_ppid = control->sinfo_ppid;
754 	nc->sinfo_context = control->sinfo_context;
755 	nc->fsn_included = 0xffffffff;
756 	nc->sinfo_tsn = control->sinfo_tsn;
757 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 	nc->whoFrom = control->whoFrom;
760 	atomic_add_int(&nc->whoFrom->ref_count, 1);
761 	nc->stcb = control->stcb;
762 	nc->port_from = control->port_from;
763 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
764 }
765 
766 static void
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768     struct sctp_inpcb *inp, uint32_t tsn)
769 {
770 	control->fsn_included = tsn;
771 	if (control->on_read_q) {
772 		/*
773 		 * We have to purge it from there, hopefully this will work
774 		 * :-)
775 		 */
776 		TAILQ_REMOVE(&inp->read_queue, control, next);
777 		control->on_read_q = 0;
778 	}
779 }
780 
781 static int
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783     struct sctp_association *asoc,
784     struct sctp_stream_in *strm,
785     struct sctp_queued_to_read *control,
786     uint32_t pd_point,
787     int inp_read_lock_held)
788 {
789 	/*
790 	 * Special handling for the old un-ordered data chunk. All the
791 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 	 * to see if we have it all. If you return one, no other control
793 	 * entries on the un-ordered queue will be looked at. In theory
794 	 * there should be no others entries in reality, unless the guy is
795 	 * sending both unordered NDATA and unordered DATA...
796 	 */
797 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
798 	uint32_t fsn;
799 	struct sctp_queued_to_read *nc;
800 	int cnt_added;
801 
802 	if (control->first_frag_seen == 0) {
803 		/* Nothing we can do, we have not seen the first piece yet */
804 		return (1);
805 	}
806 	/* Collapse any we can */
807 	cnt_added = 0;
808 restart:
809 	fsn = control->fsn_included + 1;
810 	/* Now what can we add? */
811 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 		if (chk->rec.data.fsn == fsn) {
813 			/* Ok lets add it */
814 			sctp_alloc_a_readq(stcb, nc);
815 			if (nc == NULL) {
816 				break;
817 			}
818 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
821 			fsn++;
822 			cnt_added++;
823 			chk = NULL;
824 			if (control->end_added) {
825 				/* We are done */
826 				if (!TAILQ_EMPTY(&control->reasm)) {
827 					/*
828 					 * Ok we have to move anything left
829 					 * on the control queue to a new
830 					 * control.
831 					 */
832 					sctp_build_readq_entry_from_ctl(nc, control);
833 					tchk = TAILQ_FIRST(&control->reasm);
834 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 							asoc->size_on_reasm_queue -= tchk->send_size;
838 						} else {
839 #ifdef INVARIANTS
840 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
841 #else
842 							asoc->size_on_reasm_queue = 0;
843 #endif
844 						}
845 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 						nc->first_frag_seen = 1;
847 						nc->fsn_included = tchk->rec.data.fsn;
848 						nc->data = tchk->data;
849 						nc->sinfo_ppid = tchk->rec.data.ppid;
850 						nc->sinfo_tsn = tchk->rec.data.tsn;
851 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
852 						tchk->data = NULL;
853 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 						sctp_setup_tail_pointer(nc);
855 						tchk = TAILQ_FIRST(&control->reasm);
856 					}
857 					/* Spin the rest onto the queue */
858 					while (tchk) {
859 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 						tchk = TAILQ_FIRST(&control->reasm);
862 					}
863 					/*
864 					 * Now lets add it to the queue
865 					 * after removing control
866 					 */
867 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 					nc->on_strm_q = SCTP_ON_UNORDERED;
869 					if (control->on_strm_q) {
870 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 						control->on_strm_q = 0;
872 					}
873 				}
874 				if (control->pdapi_started) {
875 					strm->pd_api_started = 0;
876 					control->pdapi_started = 0;
877 				}
878 				if (control->on_strm_q) {
879 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 					control->on_strm_q = 0;
881 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882 				}
883 				if (control->on_read_q == 0) {
884 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 					    &stcb->sctp_socket->so_rcv, control->end_added,
886 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 				}
888 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
890 					/*
891 					 * Switch to the new guy and
892 					 * continue
893 					 */
894 					control = nc;
895 					goto restart;
896 				} else {
897 					if (nc->on_strm_q == 0) {
898 						sctp_free_a_readq(stcb, nc);
899 					}
900 				}
901 				return (1);
902 			} else {
903 				sctp_free_a_readq(stcb, nc);
904 			}
905 		} else {
906 			/* Can't add more */
907 			break;
908 		}
909 	}
910 	if (cnt_added && strm->pd_api_started) {
911 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
912 	}
913 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 		strm->pd_api_started = 1;
915 		control->pdapi_started = 1;
916 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 		    &stcb->sctp_socket->so_rcv, control->end_added,
918 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 		return (0);
921 	} else {
922 		return (1);
923 	}
924 }
925 
926 static void
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928     struct sctp_association *asoc,
929     struct sctp_queued_to_read *control,
930     struct sctp_tmit_chunk *chk,
931     int *abort_flag)
932 {
933 	struct sctp_tmit_chunk *at;
934 	int inserted;
935 
936 	/*
937 	 * Here we need to place the chunk into the control structure sorted
938 	 * in the correct order.
939 	 */
940 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 		/* Its the very first one. */
942 		SCTPDBG(SCTP_DEBUG_XXX,
943 		    "chunk is a first fsn: %u becomes fsn_included\n",
944 		    chk->rec.data.fsn);
945 		at = TAILQ_FIRST(&control->reasm);
946 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
947 			/*
948 			 * The first chunk in the reassembly is a smaller
949 			 * TSN than this one, even though this has a first,
950 			 * it must be from a subsequent msg.
951 			 */
952 			goto place_chunk;
953 		}
954 		if (control->first_frag_seen) {
955 			/*
956 			 * In old un-ordered we can reassembly on one
957 			 * control multiple messages. As long as the next
958 			 * FIRST is greater then the old first (TSN i.e. FSN
959 			 * wise)
960 			 */
961 			struct mbuf *tdata;
962 			uint32_t tmp;
963 
964 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
965 				/*
966 				 * Easy way the start of a new guy beyond
967 				 * the lowest
968 				 */
969 				goto place_chunk;
970 			}
971 			if ((chk->rec.data.fsn == control->fsn_included) ||
972 			    (control->pdapi_started)) {
973 				/*
974 				 * Ok this should not happen, if it does we
975 				 * started the pd-api on the higher TSN
976 				 * (since the equals part is a TSN failure
977 				 * it must be that).
978 				 *
979 				 * We are completly hosed in that case since
980 				 * I have no way to recover. This really
981 				 * will only happen if we can get more TSN's
982 				 * higher before the pd-api-point.
983 				 */
984 				sctp_abort_in_reasm(stcb, control, chk,
985 				    abort_flag,
986 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
987 
988 				return;
989 			}
990 			/*
991 			 * Ok we have two firsts and the one we just got is
992 			 * smaller than the one we previously placed.. yuck!
993 			 * We must swap them out.
994 			 */
995 			/* swap the mbufs */
996 			tdata = control->data;
997 			control->data = chk->data;
998 			chk->data = tdata;
999 			/* Save the lengths */
1000 			chk->send_size = control->length;
1001 			/* Recompute length of control and tail pointer */
1002 			sctp_setup_tail_pointer(control);
1003 			/* Fix the FSN included */
1004 			tmp = control->fsn_included;
1005 			control->fsn_included = chk->rec.data.fsn;
1006 			chk->rec.data.fsn = tmp;
1007 			/* Fix the TSN included */
1008 			tmp = control->sinfo_tsn;
1009 			control->sinfo_tsn = chk->rec.data.tsn;
1010 			chk->rec.data.tsn = tmp;
1011 			/* Fix the PPID included */
1012 			tmp = control->sinfo_ppid;
1013 			control->sinfo_ppid = chk->rec.data.ppid;
1014 			chk->rec.data.ppid = tmp;
1015 			/* Fix tail pointer */
1016 			goto place_chunk;
1017 		}
1018 		control->first_frag_seen = 1;
1019 		control->fsn_included = chk->rec.data.fsn;
1020 		control->top_fsn = chk->rec.data.fsn;
1021 		control->sinfo_tsn = chk->rec.data.tsn;
1022 		control->sinfo_ppid = chk->rec.data.ppid;
1023 		control->data = chk->data;
1024 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1025 		chk->data = NULL;
1026 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 		sctp_setup_tail_pointer(control);
1028 		return;
1029 	}
1030 place_chunk:
1031 	inserted = 0;
1032 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1034 			/*
1035 			 * This one in queue is bigger than the new one,
1036 			 * insert the new one before at.
1037 			 */
1038 			asoc->size_on_reasm_queue += chk->send_size;
1039 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 			inserted = 1;
1041 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1042 			break;
1043 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1044 			/*
1045 			 * They sent a duplicate fsn number. This really
1046 			 * should not happen since the FSN is a TSN and it
1047 			 * should have been dropped earlier.
1048 			 */
1049 			sctp_abort_in_reasm(stcb, control, chk,
1050 			    abort_flag,
1051 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1052 			return;
1053 		}
1054 
1055 	}
1056 	if (inserted == 0) {
1057 		/* Its at the end */
1058 		asoc->size_on_reasm_queue += chk->send_size;
1059 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 		control->top_fsn = chk->rec.data.fsn;
1061 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1062 	}
1063 }
1064 
1065 static int
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067     struct sctp_stream_in *strm, int inp_read_lock_held)
1068 {
1069 	/*
1070 	 * Given a stream, strm, see if any of the SSN's on it that are
1071 	 * fragmented are ready to deliver. If so go ahead and place them on
1072 	 * the read queue. In so placing if we have hit the end, then we
1073 	 * need to remove them from the stream's queue.
1074 	 */
1075 	struct sctp_queued_to_read *control, *nctl = NULL;
1076 	uint32_t next_to_del;
1077 	uint32_t pd_point;
1078 	int ret = 0;
1079 
1080 	if (stcb->sctp_socket) {
1081 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 		    stcb->sctp_ep->partial_delivery_point);
1083 	} else {
1084 		pd_point = stcb->sctp_ep->partial_delivery_point;
1085 	}
1086 	control = TAILQ_FIRST(&strm->uno_inqueue);
1087 
1088 	if ((control != NULL) &&
1089 	    (asoc->idata_supported == 0)) {
1090 		/* Special handling needed for "old" data format */
1091 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092 			goto done_un;
1093 		}
1094 	}
1095 	if (strm->pd_api_started) {
1096 		/* Can't add more */
1097 		return (0);
1098 	}
1099 	while (control) {
1100 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 		nctl = TAILQ_NEXT(control, next_instrm);
1103 		if (control->end_added) {
1104 			/* We just put the last bit on */
1105 			if (control->on_strm_q) {
1106 #ifdef INVARIANTS
1107 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 					panic("Huh control: %p on_q: %d -- not unordered?",
1109 					    control, control->on_strm_q);
1110 				}
1111 #endif
1112 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 				if (asoc->size_on_all_streams >= control->length) {
1115 					asoc->size_on_all_streams -= control->length;
1116 				} else {
1117 #ifdef INVARIANTS
1118 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1119 #else
1120 					asoc->size_on_all_streams = 0;
1121 #endif
1122 				}
1123 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1124 				control->on_strm_q = 0;
1125 			}
1126 			if (control->on_read_q == 0) {
1127 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1128 				    control,
1129 				    &stcb->sctp_socket->so_rcv, control->end_added,
1130 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131 			}
1132 		} else {
1133 			/* Can we do a PD-API for this un-ordered guy? */
1134 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135 				strm->pd_api_started = 1;
1136 				control->pdapi_started = 1;
1137 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1138 				    control,
1139 				    &stcb->sctp_socket->so_rcv, control->end_added,
1140 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 
1142 				break;
1143 			}
1144 		}
1145 		control = nctl;
1146 	}
1147 done_un:
1148 	control = TAILQ_FIRST(&strm->inqueue);
1149 	if (strm->pd_api_started) {
1150 		/* Can't add more */
1151 		return (0);
1152 	}
1153 	if (control == NULL) {
1154 		return (ret);
1155 	}
1156 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1157 		/*
1158 		 * Ok the guy at the top was being partially delivered
1159 		 * completed, so we remove it. Note the pd_api flag was
1160 		 * taken off when the chunk was merged on in
1161 		 * sctp_queue_data_for_reasm below.
1162 		 */
1163 		nctl = TAILQ_NEXT(control, next_instrm);
1164 		SCTPDBG(SCTP_DEBUG_XXX,
1165 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1166 		    control, control->end_added, control->mid,
1167 		    control->top_fsn, control->fsn_included,
1168 		    strm->last_mid_delivered);
1169 		if (control->end_added) {
1170 			if (control->on_strm_q) {
1171 #ifdef INVARIANTS
1172 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1173 					panic("Huh control: %p on_q: %d -- not ordered?",
1174 					    control, control->on_strm_q);
1175 				}
1176 #endif
1177 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1178 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1179 				if (asoc->size_on_all_streams >= control->length) {
1180 					asoc->size_on_all_streams -= control->length;
1181 				} else {
1182 #ifdef INVARIANTS
1183 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1184 #else
1185 					asoc->size_on_all_streams = 0;
1186 #endif
1187 				}
1188 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1189 				control->on_strm_q = 0;
1190 			}
1191 			if (strm->pd_api_started && control->pdapi_started) {
1192 				control->pdapi_started = 0;
1193 				strm->pd_api_started = 0;
1194 			}
1195 			if (control->on_read_q == 0) {
1196 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1197 				    control,
1198 				    &stcb->sctp_socket->so_rcv, control->end_added,
1199 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1200 			}
1201 			control = nctl;
1202 		}
1203 	}
1204 	if (strm->pd_api_started) {
1205 		/*
1206 		 * Can't add more must have gotten an un-ordered above being
1207 		 * partially delivered.
1208 		 */
1209 		return (0);
1210 	}
1211 deliver_more:
1212 	next_to_del = strm->last_mid_delivered + 1;
1213 	if (control) {
1214 		SCTPDBG(SCTP_DEBUG_XXX,
1215 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1216 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1217 		    next_to_del);
1218 		nctl = TAILQ_NEXT(control, next_instrm);
1219 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1220 		    (control->first_frag_seen)) {
1221 			int done;
1222 
1223 			/* Ok we can deliver it onto the stream. */
1224 			if (control->end_added) {
1225 				/* We are done with it afterwards */
1226 				if (control->on_strm_q) {
1227 #ifdef INVARIANTS
1228 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1229 						panic("Huh control: %p on_q: %d -- not ordered?",
1230 						    control, control->on_strm_q);
1231 					}
1232 #endif
1233 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1234 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1235 					if (asoc->size_on_all_streams >= control->length) {
1236 						asoc->size_on_all_streams -= control->length;
1237 					} else {
1238 #ifdef INVARIANTS
1239 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1240 #else
1241 						asoc->size_on_all_streams = 0;
1242 #endif
1243 					}
1244 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1245 					control->on_strm_q = 0;
1246 				}
1247 				ret++;
1248 			}
1249 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1250 				/*
1251 				 * A singleton now slipping through - mark
1252 				 * it non-revokable too
1253 				 */
1254 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1255 			} else if (control->end_added == 0) {
1256 				/*
1257 				 * Check if we can defer adding until its
1258 				 * all there
1259 				 */
1260 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1261 					/*
1262 					 * Don't need it or cannot add more
1263 					 * (one being delivered that way)
1264 					 */
1265 					goto out;
1266 				}
1267 			}
1268 			done = (control->end_added) && (control->last_frag_seen);
1269 			if (control->on_read_q == 0) {
1270 				if (!done) {
1271 					if (asoc->size_on_all_streams >= control->length) {
1272 						asoc->size_on_all_streams -= control->length;
1273 					} else {
1274 #ifdef INVARIANTS
1275 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1276 #else
1277 						asoc->size_on_all_streams = 0;
1278 #endif
1279 					}
1280 					strm->pd_api_started = 1;
1281 					control->pdapi_started = 1;
1282 				}
1283 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1284 				    control,
1285 				    &stcb->sctp_socket->so_rcv, control->end_added,
1286 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1287 			}
1288 			strm->last_mid_delivered = next_to_del;
1289 			if (done) {
1290 				control = nctl;
1291 				goto deliver_more;
1292 			}
1293 		}
1294 	}
1295 out:
1296 	return (ret);
1297 }
1298 
1299 
1300 uint32_t
1301 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1302     struct sctp_stream_in *strm,
1303     struct sctp_tcb *stcb, struct sctp_association *asoc,
1304     struct sctp_tmit_chunk *chk, int hold_rlock)
1305 {
1306 	/*
1307 	 * Given a control and a chunk, merge the data from the chk onto the
1308 	 * control and free up the chunk resources.
1309 	 */
1310 	uint32_t added = 0;
1311 	int i_locked = 0;
1312 
1313 	if (control->on_read_q && (hold_rlock == 0)) {
1314 		/*
1315 		 * Its being pd-api'd so we must do some locks.
1316 		 */
1317 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1318 		i_locked = 1;
1319 	}
1320 	if (control->data == NULL) {
1321 		control->data = chk->data;
1322 		sctp_setup_tail_pointer(control);
1323 	} else {
1324 		sctp_add_to_tail_pointer(control, chk->data, &added);
1325 	}
1326 	control->fsn_included = chk->rec.data.fsn;
1327 	asoc->size_on_reasm_queue -= chk->send_size;
1328 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1329 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1330 	chk->data = NULL;
1331 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 		control->first_frag_seen = 1;
1333 		control->sinfo_tsn = chk->rec.data.tsn;
1334 		control->sinfo_ppid = chk->rec.data.ppid;
1335 	}
1336 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1337 		/* Its complete */
1338 		if ((control->on_strm_q) && (control->on_read_q)) {
1339 			if (control->pdapi_started) {
1340 				control->pdapi_started = 0;
1341 				strm->pd_api_started = 0;
1342 			}
1343 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1344 				/* Unordered */
1345 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 				control->on_strm_q = 0;
1347 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1348 				/* Ordered */
1349 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1350 				/*
1351 				 * Don't need to decrement
1352 				 * size_on_all_streams, since control is on
1353 				 * the read queue.
1354 				 */
1355 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1356 				control->on_strm_q = 0;
1357 #ifdef INVARIANTS
1358 			} else if (control->on_strm_q) {
1359 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1360 				    control->on_strm_q);
1361 #endif
1362 			}
1363 		}
1364 		control->end_added = 1;
1365 		control->last_frag_seen = 1;
1366 	}
1367 	if (i_locked) {
1368 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1369 	}
1370 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1371 	return (added);
1372 }
1373 
1374 /*
1375  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1376  * queue, see if anthing can be delivered. If so pull it off (or as much as
1377  * we can. If we run out of space then we must dump what we can and set the
1378  * appropriate flag to say we queued what we could.
1379  */
1380 static void
1381 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1382     struct sctp_queued_to_read *control,
1383     struct sctp_tmit_chunk *chk,
1384     int created_control,
1385     int *abort_flag, uint32_t tsn)
1386 {
1387 	uint32_t next_fsn;
1388 	struct sctp_tmit_chunk *at, *nat;
1389 	struct sctp_stream_in *strm;
1390 	int do_wakeup, unordered;
1391 	uint32_t lenadded;
1392 
1393 	strm = &asoc->strmin[control->sinfo_stream];
1394 	/*
1395 	 * For old un-ordered data chunks.
1396 	 */
1397 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1398 		unordered = 1;
1399 	} else {
1400 		unordered = 0;
1401 	}
1402 	/* Must be added to the stream-in queue */
1403 	if (created_control) {
1404 		if ((unordered == 0) || (asoc->idata_supported)) {
1405 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1406 		}
1407 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1408 			/* Duplicate SSN? */
1409 			sctp_abort_in_reasm(stcb, control, chk,
1410 			    abort_flag,
1411 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1412 			sctp_clean_up_control(stcb, control);
1413 			return;
1414 		}
1415 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1416 			/*
1417 			 * Ok we created this control and now lets validate
1418 			 * that its legal i.e. there is a B bit set, if not
1419 			 * and we have up to the cum-ack then its invalid.
1420 			 */
1421 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1422 				sctp_abort_in_reasm(stcb, control, chk,
1423 				    abort_flag,
1424 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1425 				return;
1426 			}
1427 		}
1428 	}
1429 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1430 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1431 		return;
1432 	}
1433 	/*
1434 	 * Ok we must queue the chunk into the reasembly portion: o if its
1435 	 * the first it goes to the control mbuf. o if its not first but the
1436 	 * next in sequence it goes to the control, and each succeeding one
1437 	 * in order also goes. o if its not in order we place it on the list
1438 	 * in its place.
1439 	 */
1440 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1441 		/* Its the very first one. */
1442 		SCTPDBG(SCTP_DEBUG_XXX,
1443 		    "chunk is a first fsn: %u becomes fsn_included\n",
1444 		    chk->rec.data.fsn);
1445 		if (control->first_frag_seen) {
1446 			/*
1447 			 * Error on senders part, they either sent us two
1448 			 * data chunks with FIRST, or they sent two
1449 			 * un-ordered chunks that were fragmented at the
1450 			 * same time in the same stream.
1451 			 */
1452 			sctp_abort_in_reasm(stcb, control, chk,
1453 			    abort_flag,
1454 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1455 			return;
1456 		}
1457 		control->first_frag_seen = 1;
1458 		control->sinfo_ppid = chk->rec.data.ppid;
1459 		control->sinfo_tsn = chk->rec.data.tsn;
1460 		control->fsn_included = chk->rec.data.fsn;
1461 		control->data = chk->data;
1462 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1463 		chk->data = NULL;
1464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1465 		sctp_setup_tail_pointer(control);
1466 		asoc->size_on_all_streams += control->length;
1467 	} else {
1468 		/* Place the chunk in our list */
1469 		int inserted = 0;
1470 
1471 		if (control->last_frag_seen == 0) {
1472 			/* Still willing to raise highest FSN seen */
1473 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1474 				SCTPDBG(SCTP_DEBUG_XXX,
1475 				    "We have a new top_fsn: %u\n",
1476 				    chk->rec.data.fsn);
1477 				control->top_fsn = chk->rec.data.fsn;
1478 			}
1479 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1480 				SCTPDBG(SCTP_DEBUG_XXX,
1481 				    "The last fsn is now in place fsn: %u\n",
1482 				    chk->rec.data.fsn);
1483 				control->last_frag_seen = 1;
1484 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1485 					SCTPDBG(SCTP_DEBUG_XXX,
1486 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1487 					    chk->rec.data.fsn,
1488 					    control->top_fsn);
1489 					sctp_abort_in_reasm(stcb, control, chk,
1490 					    abort_flag,
1491 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1492 					return;
1493 				}
1494 			}
1495 			if (asoc->idata_supported || control->first_frag_seen) {
1496 				/*
1497 				 * For IDATA we always check since we know
1498 				 * that the first fragment is 0. For old
1499 				 * DATA we have to receive the first before
1500 				 * we know the first FSN (which is the TSN).
1501 				 */
1502 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1503 					/*
1504 					 * We have already delivered up to
1505 					 * this so its a dup
1506 					 */
1507 					sctp_abort_in_reasm(stcb, control, chk,
1508 					    abort_flag,
1509 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510 					return;
1511 				}
1512 			}
1513 		} else {
1514 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 				/* Second last? huh? */
1516 				SCTPDBG(SCTP_DEBUG_XXX,
1517 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1518 				    chk->rec.data.fsn, control->top_fsn);
1519 				sctp_abort_in_reasm(stcb, control,
1520 				    chk, abort_flag,
1521 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1522 				return;
1523 			}
1524 			if (asoc->idata_supported || control->first_frag_seen) {
1525 				/*
1526 				 * For IDATA we always check since we know
1527 				 * that the first fragment is 0. For old
1528 				 * DATA we have to receive the first before
1529 				 * we know the first FSN (which is the TSN).
1530 				 */
1531 
1532 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1533 					/*
1534 					 * We have already delivered up to
1535 					 * this so its a dup
1536 					 */
1537 					SCTPDBG(SCTP_DEBUG_XXX,
1538 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1539 					    chk->rec.data.fsn, control->fsn_included);
1540 					sctp_abort_in_reasm(stcb, control, chk,
1541 					    abort_flag,
1542 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1543 					return;
1544 				}
1545 			}
1546 			/*
1547 			 * validate not beyond top FSN if we have seen last
1548 			 * one
1549 			 */
1550 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1551 				SCTPDBG(SCTP_DEBUG_XXX,
1552 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1553 				    chk->rec.data.fsn,
1554 				    control->top_fsn);
1555 				sctp_abort_in_reasm(stcb, control, chk,
1556 				    abort_flag,
1557 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1558 				return;
1559 			}
1560 		}
1561 		/*
1562 		 * If we reach here, we need to place the new chunk in the
1563 		 * reassembly for this control.
1564 		 */
1565 		SCTPDBG(SCTP_DEBUG_XXX,
1566 		    "chunk is a not first fsn: %u needs to be inserted\n",
1567 		    chk->rec.data.fsn);
1568 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1569 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1570 				/*
1571 				 * This one in queue is bigger than the new
1572 				 * one, insert the new one before at.
1573 				 */
1574 				SCTPDBG(SCTP_DEBUG_XXX,
1575 				    "Insert it before fsn: %u\n",
1576 				    at->rec.data.fsn);
1577 				asoc->size_on_reasm_queue += chk->send_size;
1578 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1579 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1580 				inserted = 1;
1581 				break;
1582 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1583 				/*
1584 				 * Gak, He sent me a duplicate str seq
1585 				 * number
1586 				 */
1587 				/*
1588 				 * foo bar, I guess I will just free this
1589 				 * new guy, should we abort too? FIX ME
1590 				 * MAYBE? Or it COULD be that the SSN's have
1591 				 * wrapped. Maybe I should compare to TSN
1592 				 * somehow... sigh for now just blow away
1593 				 * the chunk!
1594 				 */
1595 				SCTPDBG(SCTP_DEBUG_XXX,
1596 				    "Duplicate to fsn: %u -- abort\n",
1597 				    at->rec.data.fsn);
1598 				sctp_abort_in_reasm(stcb, control,
1599 				    chk, abort_flag,
1600 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1601 				return;
1602 			}
1603 		}
1604 		if (inserted == 0) {
1605 			/* Goes on the end */
1606 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1607 			    chk->rec.data.fsn);
1608 			asoc->size_on_reasm_queue += chk->send_size;
1609 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1611 		}
1612 	}
1613 	/*
1614 	 * Ok lets see if we can suck any up into the control structure that
1615 	 * are in seq if it makes sense.
1616 	 */
1617 	do_wakeup = 0;
1618 	/*
1619 	 * If the first fragment has not been seen there is no sense in
1620 	 * looking.
1621 	 */
1622 	if (control->first_frag_seen) {
1623 		next_fsn = control->fsn_included + 1;
1624 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625 			if (at->rec.data.fsn == next_fsn) {
1626 				/* We can add this one now to the control */
1627 				SCTPDBG(SCTP_DEBUG_XXX,
1628 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1629 				    control, at,
1630 				    at->rec.data.fsn,
1631 				    next_fsn, control->fsn_included);
1632 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634 				if (control->on_read_q) {
1635 					do_wakeup = 1;
1636 				} else {
1637 					/*
1638 					 * We only add to the
1639 					 * size-on-all-streams if its not on
1640 					 * the read q. The read q flag will
1641 					 * cause a sballoc so its accounted
1642 					 * for there.
1643 					 */
1644 					asoc->size_on_all_streams += lenadded;
1645 				}
1646 				next_fsn++;
1647 				if (control->end_added && control->pdapi_started) {
1648 					if (strm->pd_api_started) {
1649 						strm->pd_api_started = 0;
1650 						control->pdapi_started = 0;
1651 					}
1652 					if (control->on_read_q == 0) {
1653 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1654 						    control,
1655 						    &stcb->sctp_socket->so_rcv, control->end_added,
1656 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1657 					}
1658 					break;
1659 				}
1660 			} else {
1661 				break;
1662 			}
1663 		}
1664 	}
1665 	if (do_wakeup) {
1666 		/* Need to wakeup the reader */
1667 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1668 	}
1669 }
1670 
1671 static struct sctp_queued_to_read *
1672 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1673 {
1674 	struct sctp_queued_to_read *control;
1675 
1676 	if (ordered) {
1677 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1678 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1679 				break;
1680 			}
1681 		}
1682 	} else {
1683 		if (idata_supported) {
1684 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1685 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1686 					break;
1687 				}
1688 			}
1689 		} else {
1690 			control = TAILQ_FIRST(&strm->uno_inqueue);
1691 		}
1692 	}
1693 	return (control);
1694 }
1695 
1696 static int
1697 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1698     struct mbuf **m, int offset, int chk_length,
1699     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1700     int *break_flag, int last_chunk, uint8_t chk_type)
1701 {
1702 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1703 	uint32_t tsn, fsn, gap, mid;
1704 	struct mbuf *dmbuf;
1705 	int the_len;
1706 	int need_reasm_check = 0;
1707 	uint16_t sid;
1708 	struct mbuf *op_err;
1709 	char msg[SCTP_DIAG_INFO_LEN];
1710 	struct sctp_queued_to_read *control, *ncontrol;
1711 	uint32_t ppid;
1712 	uint8_t chk_flags;
1713 	struct sctp_stream_reset_list *liste;
1714 	int ordered;
1715 	size_t clen;
1716 	int created_control = 0;
1717 
1718 	if (chk_type == SCTP_IDATA) {
1719 		struct sctp_idata_chunk *chunk, chunk_buf;
1720 
1721 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1722 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1723 		chk_flags = chunk->ch.chunk_flags;
1724 		clen = sizeof(struct sctp_idata_chunk);
1725 		tsn = ntohl(chunk->dp.tsn);
1726 		sid = ntohs(chunk->dp.sid);
1727 		mid = ntohl(chunk->dp.mid);
1728 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1729 			fsn = 0;
1730 			ppid = chunk->dp.ppid_fsn.ppid;
1731 		} else {
1732 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1733 			ppid = 0xffffffff;	/* Use as an invalid value. */
1734 		}
1735 	} else {
1736 		struct sctp_data_chunk *chunk, chunk_buf;
1737 
1738 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1739 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1740 		chk_flags = chunk->ch.chunk_flags;
1741 		clen = sizeof(struct sctp_data_chunk);
1742 		tsn = ntohl(chunk->dp.tsn);
1743 		sid = ntohs(chunk->dp.sid);
1744 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1745 		fsn = tsn;
1746 		ppid = chunk->dp.ppid;
1747 	}
1748 	if ((size_t)chk_length == clen) {
1749 		/*
1750 		 * Need to send an abort since we had a empty data chunk.
1751 		 */
1752 		op_err = sctp_generate_no_user_data_cause(tsn);
1753 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1754 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1755 		*abort_flag = 1;
1756 		return (0);
1757 	}
1758 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1759 		asoc->send_sack = 1;
1760 	}
1761 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1762 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1763 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1764 	}
1765 	if (stcb == NULL) {
1766 		return (0);
1767 	}
1768 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1769 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1770 		/* It is a duplicate */
1771 		SCTP_STAT_INCR(sctps_recvdupdata);
1772 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1773 			/* Record a dup for the next outbound sack */
1774 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1775 			asoc->numduptsns++;
1776 		}
1777 		asoc->send_sack = 1;
1778 		return (0);
1779 	}
1780 	/* Calculate the number of TSN's between the base and this TSN */
1781 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1782 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1783 		/* Can't hold the bit in the mapping at max array, toss it */
1784 		return (0);
1785 	}
1786 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1787 		SCTP_TCB_LOCK_ASSERT(stcb);
1788 		if (sctp_expand_mapping_array(asoc, gap)) {
1789 			/* Can't expand, drop it */
1790 			return (0);
1791 		}
1792 	}
1793 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1794 		*high_tsn = tsn;
1795 	}
1796 	/* See if we have received this one already */
1797 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1798 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1799 		SCTP_STAT_INCR(sctps_recvdupdata);
1800 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1801 			/* Record a dup for the next outbound sack */
1802 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1803 			asoc->numduptsns++;
1804 		}
1805 		asoc->send_sack = 1;
1806 		return (0);
1807 	}
1808 	/*
1809 	 * Check to see about the GONE flag, duplicates would cause a sack
1810 	 * to be sent up above
1811 	 */
1812 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1813 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1814 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1815 		/*
1816 		 * wait a minute, this guy is gone, there is no longer a
1817 		 * receiver. Send peer an ABORT!
1818 		 */
1819 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1820 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1821 		*abort_flag = 1;
1822 		return (0);
1823 	}
1824 	/*
1825 	 * Now before going further we see if there is room. If NOT then we
1826 	 * MAY let one through only IF this TSN is the one we are waiting
1827 	 * for on a partial delivery API.
1828 	 */
1829 
1830 	/* Is the stream valid? */
1831 	if (sid >= asoc->streamincnt) {
1832 		struct sctp_error_invalid_stream *cause;
1833 
1834 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1835 		    0, M_NOWAIT, 1, MT_DATA);
1836 		if (op_err != NULL) {
1837 			/* add some space up front so prepend will work well */
1838 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1839 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1840 			/*
1841 			 * Error causes are just param's and this one has
1842 			 * two back to back phdr, one with the error type
1843 			 * and size, the other with the streamid and a rsvd
1844 			 */
1845 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1846 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1847 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1848 			cause->stream_id = htons(sid);
1849 			cause->reserved = htons(0);
1850 			sctp_queue_op_err(stcb, op_err);
1851 		}
1852 		SCTP_STAT_INCR(sctps_badsid);
1853 		SCTP_TCB_LOCK_ASSERT(stcb);
1854 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1855 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1856 			asoc->highest_tsn_inside_nr_map = tsn;
1857 		}
1858 		if (tsn == (asoc->cumulative_tsn + 1)) {
1859 			/* Update cum-ack */
1860 			asoc->cumulative_tsn = tsn;
1861 		}
1862 		return (0);
1863 	}
1864 	/*
1865 	 * If its a fragmented message, lets see if we can find the control
1866 	 * on the reassembly queues.
1867 	 */
1868 	if ((chk_type == SCTP_IDATA) &&
1869 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1870 	    (fsn == 0)) {
1871 		/*
1872 		 * The first *must* be fsn 0, and other (middle/end) pieces
1873 		 * can *not* be fsn 0. XXX: This can happen in case of a
1874 		 * wrap around. Ignore is for now.
1875 		 */
1876 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1877 		goto err_out;
1878 	}
1879 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1880 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1881 	    chk_flags, control);
1882 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1883 		/* See if we can find the re-assembly entity */
1884 		if (control != NULL) {
1885 			/* We found something, does it belong? */
1886 			if (ordered && (mid != control->mid)) {
1887 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1888 		err_out:
1889 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1890 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1891 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1892 				*abort_flag = 1;
1893 				return (0);
1894 			}
1895 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1896 				/*
1897 				 * We can't have a switched order with an
1898 				 * unordered chunk
1899 				 */
1900 				SCTP_SNPRINTF(msg, sizeof(msg),
1901 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1902 				    tsn);
1903 				goto err_out;
1904 			}
1905 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1906 				/*
1907 				 * We can't have a switched unordered with a
1908 				 * ordered chunk
1909 				 */
1910 				SCTP_SNPRINTF(msg, sizeof(msg),
1911 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1912 				    tsn);
1913 				goto err_out;
1914 			}
1915 		}
1916 	} else {
1917 		/*
1918 		 * Its a complete segment. Lets validate we don't have a
1919 		 * re-assembly going on with the same Stream/Seq (for
1920 		 * ordered) or in the same Stream for unordered.
1921 		 */
1922 		if (control != NULL) {
1923 			if (ordered || asoc->idata_supported) {
1924 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1925 				    chk_flags, mid);
1926 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1927 				goto err_out;
1928 			} else {
1929 				if ((tsn == control->fsn_included + 1) &&
1930 				    (control->end_added == 0)) {
1931 					SCTP_SNPRINTF(msg, sizeof(msg),
1932 					    "Illegal message sequence, missing end for MID: %8.8x",
1933 					    control->fsn_included);
1934 					goto err_out;
1935 				} else {
1936 					control = NULL;
1937 				}
1938 			}
1939 		}
1940 	}
1941 	/* now do the tests */
1942 	if (((asoc->cnt_on_all_streams +
1943 	    asoc->cnt_on_reasm_queue +
1944 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1945 	    (((int)asoc->my_rwnd) <= 0)) {
1946 		/*
1947 		 * When we have NO room in the rwnd we check to make sure
1948 		 * the reader is doing its job...
1949 		 */
1950 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1951 			/* some to read, wake-up */
1952 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1953 		}
1954 		/* now is it in the mapping array of what we have accepted? */
1955 		if (chk_type == SCTP_DATA) {
1956 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1957 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1958 				/* Nope not in the valid range dump it */
1959 		dump_packet:
1960 				sctp_set_rwnd(stcb, asoc);
1961 				if ((asoc->cnt_on_all_streams +
1962 				    asoc->cnt_on_reasm_queue +
1963 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1964 					SCTP_STAT_INCR(sctps_datadropchklmt);
1965 				} else {
1966 					SCTP_STAT_INCR(sctps_datadroprwnd);
1967 				}
1968 				*break_flag = 1;
1969 				return (0);
1970 			}
1971 		} else {
1972 			if (control == NULL) {
1973 				goto dump_packet;
1974 			}
1975 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1976 				goto dump_packet;
1977 			}
1978 		}
1979 	}
1980 #ifdef SCTP_ASOCLOG_OF_TSNS
1981 	SCTP_TCB_LOCK_ASSERT(stcb);
1982 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1983 		asoc->tsn_in_at = 0;
1984 		asoc->tsn_in_wrapped = 1;
1985 	}
1986 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1987 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1988 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1989 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1990 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1991 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1992 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1993 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1994 	asoc->tsn_in_at++;
1995 #endif
1996 	/*
1997 	 * Before we continue lets validate that we are not being fooled by
1998 	 * an evil attacker. We can only have Nk chunks based on our TSN
1999 	 * spread allowed by the mapping array N * 8 bits, so there is no
2000 	 * way our stream sequence numbers could have wrapped. We of course
2001 	 * only validate the FIRST fragment so the bit must be set.
2002 	 */
2003 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2004 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2005 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2006 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2007 		/* The incoming sseq is behind where we last delivered? */
2008 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2009 		    mid, asoc->strmin[sid].last_mid_delivered);
2010 
2011 		if (asoc->idata_supported) {
2012 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2013 			    asoc->strmin[sid].last_mid_delivered,
2014 			    tsn,
2015 			    sid,
2016 			    mid);
2017 		} else {
2018 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2019 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2020 			    tsn,
2021 			    sid,
2022 			    (uint16_t)mid);
2023 		}
2024 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2025 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2026 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2027 		*abort_flag = 1;
2028 		return (0);
2029 	}
2030 	if (chk_type == SCTP_IDATA) {
2031 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2032 	} else {
2033 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2034 	}
2035 	if (last_chunk == 0) {
2036 		if (chk_type == SCTP_IDATA) {
2037 			dmbuf = SCTP_M_COPYM(*m,
2038 			    (offset + sizeof(struct sctp_idata_chunk)),
2039 			    the_len, M_NOWAIT);
2040 		} else {
2041 			dmbuf = SCTP_M_COPYM(*m,
2042 			    (offset + sizeof(struct sctp_data_chunk)),
2043 			    the_len, M_NOWAIT);
2044 		}
2045 #ifdef SCTP_MBUF_LOGGING
2046 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2047 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2048 		}
2049 #endif
2050 	} else {
2051 		/* We can steal the last chunk */
2052 		int l_len;
2053 
2054 		dmbuf = *m;
2055 		/* lop off the top part */
2056 		if (chk_type == SCTP_IDATA) {
2057 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2058 		} else {
2059 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2060 		}
2061 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2062 			l_len = SCTP_BUF_LEN(dmbuf);
2063 		} else {
2064 			/*
2065 			 * need to count up the size hopefully does not hit
2066 			 * this to often :-0
2067 			 */
2068 			struct mbuf *lat;
2069 
2070 			l_len = 0;
2071 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2072 				l_len += SCTP_BUF_LEN(lat);
2073 			}
2074 		}
2075 		if (l_len > the_len) {
2076 			/* Trim the end round bytes off  too */
2077 			m_adj(dmbuf, -(l_len - the_len));
2078 		}
2079 	}
2080 	if (dmbuf == NULL) {
2081 		SCTP_STAT_INCR(sctps_nomem);
2082 		return (0);
2083 	}
2084 	/*
2085 	 * Now no matter what, we need a control, get one if we don't have
2086 	 * one (we may have gotten it above when we found the message was
2087 	 * fragmented
2088 	 */
2089 	if (control == NULL) {
2090 		sctp_alloc_a_readq(stcb, control);
2091 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2092 		    ppid,
2093 		    sid,
2094 		    chk_flags,
2095 		    NULL, fsn, mid);
2096 		if (control == NULL) {
2097 			SCTP_STAT_INCR(sctps_nomem);
2098 			return (0);
2099 		}
2100 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2101 			struct mbuf *mm;
2102 
2103 			control->data = dmbuf;
2104 			control->tail_mbuf = NULL;
2105 			for (mm = control->data; mm; mm = mm->m_next) {
2106 				control->length += SCTP_BUF_LEN(mm);
2107 				if (SCTP_BUF_NEXT(mm) == NULL) {
2108 					control->tail_mbuf = mm;
2109 				}
2110 			}
2111 			control->end_added = 1;
2112 			control->last_frag_seen = 1;
2113 			control->first_frag_seen = 1;
2114 			control->fsn_included = fsn;
2115 			control->top_fsn = fsn;
2116 		}
2117 		created_control = 1;
2118 	}
2119 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2120 	    chk_flags, ordered, mid, control);
2121 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2122 	    TAILQ_EMPTY(&asoc->resetHead) &&
2123 	    ((ordered == 0) ||
2124 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2125 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2126 		/* Candidate for express delivery */
2127 		/*
2128 		 * Its not fragmented, No PD-API is up, Nothing in the
2129 		 * delivery queue, Its un-ordered OR ordered and the next to
2130 		 * deliver AND nothing else is stuck on the stream queue,
2131 		 * And there is room for it in the socket buffer. Lets just
2132 		 * stuff it up the buffer....
2133 		 */
2134 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2135 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2136 			asoc->highest_tsn_inside_nr_map = tsn;
2137 		}
2138 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2139 		    control, mid);
2140 
2141 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2142 		    control, &stcb->sctp_socket->so_rcv,
2143 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2144 
2145 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2146 			/* for ordered, bump what we delivered */
2147 			asoc->strmin[sid].last_mid_delivered++;
2148 		}
2149 		SCTP_STAT_INCR(sctps_recvexpress);
2150 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2151 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2152 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2153 		}
2154 		control = NULL;
2155 		goto finish_express_del;
2156 	}
2157 
2158 	/* Now will we need a chunk too? */
2159 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2160 		sctp_alloc_a_chunk(stcb, chk);
2161 		if (chk == NULL) {
2162 			/* No memory so we drop the chunk */
2163 			SCTP_STAT_INCR(sctps_nomem);
2164 			if (last_chunk == 0) {
2165 				/* we copied it, free the copy */
2166 				sctp_m_freem(dmbuf);
2167 			}
2168 			return (0);
2169 		}
2170 		chk->rec.data.tsn = tsn;
2171 		chk->no_fr_allowed = 0;
2172 		chk->rec.data.fsn = fsn;
2173 		chk->rec.data.mid = mid;
2174 		chk->rec.data.sid = sid;
2175 		chk->rec.data.ppid = ppid;
2176 		chk->rec.data.context = stcb->asoc.context;
2177 		chk->rec.data.doing_fast_retransmit = 0;
2178 		chk->rec.data.rcv_flags = chk_flags;
2179 		chk->asoc = asoc;
2180 		chk->send_size = the_len;
2181 		chk->whoTo = net;
2182 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2183 		    chk,
2184 		    control, mid);
2185 		atomic_add_int(&net->ref_count, 1);
2186 		chk->data = dmbuf;
2187 	}
2188 	/* Set the appropriate TSN mark */
2189 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2190 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2191 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2192 			asoc->highest_tsn_inside_nr_map = tsn;
2193 		}
2194 	} else {
2195 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2196 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2197 			asoc->highest_tsn_inside_map = tsn;
2198 		}
2199 	}
2200 	/* Now is it complete (i.e. not fragmented)? */
2201 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2202 		/*
2203 		 * Special check for when streams are resetting. We could be
2204 		 * more smart about this and check the actual stream to see
2205 		 * if it is not being reset.. that way we would not create a
2206 		 * HOLB when amongst streams being reset and those not being
2207 		 * reset.
2208 		 *
2209 		 */
2210 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2211 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2212 			/*
2213 			 * yep its past where we need to reset... go ahead
2214 			 * and queue it.
2215 			 */
2216 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2217 				/* first one on */
2218 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2219 			} else {
2220 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2221 				unsigned char inserted = 0;
2222 
2223 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2224 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2225 
2226 						continue;
2227 					} else {
2228 						/* found it */
2229 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2230 						inserted = 1;
2231 						break;
2232 					}
2233 				}
2234 				if (inserted == 0) {
2235 					/*
2236 					 * must be put at end, use prevP
2237 					 * (all setup from loop) to setup
2238 					 * nextP.
2239 					 */
2240 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2241 				}
2242 			}
2243 			goto finish_express_del;
2244 		}
2245 		if (chk_flags & SCTP_DATA_UNORDERED) {
2246 			/* queue directly into socket buffer */
2247 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2248 			    control, mid);
2249 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2250 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2251 			    control,
2252 			    &stcb->sctp_socket->so_rcv, 1,
2253 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2254 
2255 		} else {
2256 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2257 			    mid);
2258 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2259 			if (*abort_flag) {
2260 				if (last_chunk) {
2261 					*m = NULL;
2262 				}
2263 				return (0);
2264 			}
2265 		}
2266 		goto finish_express_del;
2267 	}
2268 	/* If we reach here its a reassembly */
2269 	need_reasm_check = 1;
2270 	SCTPDBG(SCTP_DEBUG_XXX,
2271 	    "Queue data to stream for reasm control: %p MID: %u\n",
2272 	    control, mid);
2273 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2274 	if (*abort_flag) {
2275 		/*
2276 		 * the assoc is now gone and chk was put onto the reasm
2277 		 * queue, which has all been freed.
2278 		 */
2279 		if (last_chunk) {
2280 			*m = NULL;
2281 		}
2282 		return (0);
2283 	}
2284 finish_express_del:
2285 	/* Here we tidy up things */
2286 	if (tsn == (asoc->cumulative_tsn + 1)) {
2287 		/* Update cum-ack */
2288 		asoc->cumulative_tsn = tsn;
2289 	}
2290 	if (last_chunk) {
2291 		*m = NULL;
2292 	}
2293 	if (ordered) {
2294 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2295 	} else {
2296 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2297 	}
2298 	SCTP_STAT_INCR(sctps_recvdata);
2299 	/* Set it present please */
2300 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2301 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2302 	}
2303 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2304 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2305 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2306 	}
2307 	if (need_reasm_check) {
2308 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2309 		need_reasm_check = 0;
2310 	}
2311 	/* check the special flag for stream resets */
2312 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2313 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2314 		/*
2315 		 * we have finished working through the backlogged TSN's now
2316 		 * time to reset streams. 1: call reset function. 2: free
2317 		 * pending_reply space 3: distribute any chunks in
2318 		 * pending_reply_queue.
2319 		 */
2320 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2321 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2322 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2323 		SCTP_FREE(liste, SCTP_M_STRESET);
2324 		/* sa_ignore FREED_MEMORY */
2325 		liste = TAILQ_FIRST(&asoc->resetHead);
2326 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2327 			/* All can be removed */
2328 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2329 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2330 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2331 				if (*abort_flag) {
2332 					return (0);
2333 				}
2334 				if (need_reasm_check) {
2335 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2336 					need_reasm_check = 0;
2337 				}
2338 			}
2339 		} else {
2340 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2341 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2342 					break;
2343 				}
2344 				/*
2345 				 * if control->sinfo_tsn is <= liste->tsn we
2346 				 * can process it which is the NOT of
2347 				 * control->sinfo_tsn > liste->tsn
2348 				 */
2349 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2350 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2351 				if (*abort_flag) {
2352 					return (0);
2353 				}
2354 				if (need_reasm_check) {
2355 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2356 					need_reasm_check = 0;
2357 				}
2358 			}
2359 		}
2360 	}
2361 	return (1);
2362 }
2363 
2364 static const int8_t sctp_map_lookup_tab[256] = {
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 5,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 6,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 4,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 5,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 7,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 4,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 5,
2385 	0, 1, 0, 2, 0, 1, 0, 3,
2386 	0, 1, 0, 2, 0, 1, 0, 4,
2387 	0, 1, 0, 2, 0, 1, 0, 3,
2388 	0, 1, 0, 2, 0, 1, 0, 6,
2389 	0, 1, 0, 2, 0, 1, 0, 3,
2390 	0, 1, 0, 2, 0, 1, 0, 4,
2391 	0, 1, 0, 2, 0, 1, 0, 3,
2392 	0, 1, 0, 2, 0, 1, 0, 5,
2393 	0, 1, 0, 2, 0, 1, 0, 3,
2394 	0, 1, 0, 2, 0, 1, 0, 4,
2395 	0, 1, 0, 2, 0, 1, 0, 3,
2396 	0, 1, 0, 2, 0, 1, 0, 8
2397 };
2398 
2399 
2400 void
2401 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2402 {
2403 	/*
2404 	 * Now we also need to check the mapping array in a couple of ways.
2405 	 * 1) Did we move the cum-ack point?
2406 	 *
2407 	 * When you first glance at this you might think that all entries
2408 	 * that make up the position of the cum-ack would be in the
2409 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2410 	 * deliverable. Thats true with one exception, when its a fragmented
2411 	 * message we may not deliver the data until some threshold (or all
2412 	 * of it) is in place. So we must OR the nr_mapping_array and
2413 	 * mapping_array to get a true picture of the cum-ack.
2414 	 */
2415 	struct sctp_association *asoc;
2416 	int at;
2417 	uint8_t val;
2418 	int slide_from, slide_end, lgap, distance;
2419 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2420 
2421 	asoc = &stcb->asoc;
2422 
2423 	old_cumack = asoc->cumulative_tsn;
2424 	old_base = asoc->mapping_array_base_tsn;
2425 	old_highest = asoc->highest_tsn_inside_map;
2426 	/*
2427 	 * We could probably improve this a small bit by calculating the
2428 	 * offset of the current cum-ack as the starting point.
2429 	 */
2430 	at = 0;
2431 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2432 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2433 		if (val == 0xff) {
2434 			at += 8;
2435 		} else {
2436 			/* there is a 0 bit */
2437 			at += sctp_map_lookup_tab[val];
2438 			break;
2439 		}
2440 	}
2441 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2442 
2443 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2444 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2445 #ifdef INVARIANTS
2446 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2447 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2448 #else
2449 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2450 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2451 		sctp_print_mapping_array(asoc);
2452 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2453 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2454 		}
2455 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2456 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2457 #endif
2458 	}
2459 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2460 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2461 	} else {
2462 		highest_tsn = asoc->highest_tsn_inside_map;
2463 	}
2464 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2465 		/* The complete array was completed by a single FR */
2466 		/* highest becomes the cum-ack */
2467 		int clr;
2468 #ifdef INVARIANTS
2469 		unsigned int i;
2470 #endif
2471 
2472 		/* clear the array */
2473 		clr = ((at + 7) >> 3);
2474 		if (clr > asoc->mapping_array_size) {
2475 			clr = asoc->mapping_array_size;
2476 		}
2477 		memset(asoc->mapping_array, 0, clr);
2478 		memset(asoc->nr_mapping_array, 0, clr);
2479 #ifdef INVARIANTS
2480 		for (i = 0; i < asoc->mapping_array_size; i++) {
2481 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2482 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2483 				sctp_print_mapping_array(asoc);
2484 			}
2485 		}
2486 #endif
2487 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2488 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2489 	} else if (at >= 8) {
2490 		/* we can slide the mapping array down */
2491 		/* slide_from holds where we hit the first NON 0xff byte */
2492 
2493 		/*
2494 		 * now calculate the ceiling of the move using our highest
2495 		 * TSN value
2496 		 */
2497 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2498 		slide_end = (lgap >> 3);
2499 		if (slide_end < slide_from) {
2500 			sctp_print_mapping_array(asoc);
2501 #ifdef INVARIANTS
2502 			panic("impossible slide");
2503 #else
2504 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2505 			    lgap, slide_end, slide_from, at);
2506 			return;
2507 #endif
2508 		}
2509 		if (slide_end > asoc->mapping_array_size) {
2510 #ifdef INVARIANTS
2511 			panic("would overrun buffer");
2512 #else
2513 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2514 			    asoc->mapping_array_size, slide_end);
2515 			slide_end = asoc->mapping_array_size;
2516 #endif
2517 		}
2518 		distance = (slide_end - slide_from) + 1;
2519 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 			sctp_log_map(old_base, old_cumack, old_highest,
2521 			    SCTP_MAP_PREPARE_SLIDE);
2522 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2523 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2524 		}
2525 		if (distance + slide_from > asoc->mapping_array_size ||
2526 		    distance < 0) {
2527 			/*
2528 			 * Here we do NOT slide forward the array so that
2529 			 * hopefully when more data comes in to fill it up
2530 			 * we will be able to slide it forward. Really I
2531 			 * don't think this should happen :-0
2532 			 */
2533 
2534 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2535 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2536 				    (uint32_t)asoc->mapping_array_size,
2537 				    SCTP_MAP_SLIDE_NONE);
2538 			}
2539 		} else {
2540 			int ii;
2541 
2542 			for (ii = 0; ii < distance; ii++) {
2543 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2544 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2545 
2546 			}
2547 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2548 				asoc->mapping_array[ii] = 0;
2549 				asoc->nr_mapping_array[ii] = 0;
2550 			}
2551 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2552 				asoc->highest_tsn_inside_map += (slide_from << 3);
2553 			}
2554 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2555 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2556 			}
2557 			asoc->mapping_array_base_tsn += (slide_from << 3);
2558 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2559 				sctp_log_map(asoc->mapping_array_base_tsn,
2560 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2561 				    SCTP_MAP_SLIDE_RESULT);
2562 			}
2563 		}
2564 	}
2565 }
2566 
2567 void
2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2569 {
2570 	struct sctp_association *asoc;
2571 	uint32_t highest_tsn;
2572 	int is_a_gap;
2573 
2574 	sctp_slide_mapping_arrays(stcb);
2575 	asoc = &stcb->asoc;
2576 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2577 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2578 	} else {
2579 		highest_tsn = asoc->highest_tsn_inside_map;
2580 	}
2581 	/* Is there a gap now? */
2582 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2583 
2584 	/*
2585 	 * Now we need to see if we need to queue a sack or just start the
2586 	 * timer (if allowed).
2587 	 */
2588 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2589 		/*
2590 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2591 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2592 		 * SACK
2593 		 */
2594 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2595 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2596 			    stcb->sctp_ep, stcb, NULL,
2597 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2598 		}
2599 		sctp_send_shutdown(stcb,
2600 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2601 		if (is_a_gap) {
2602 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2603 		}
2604 	} else {
2605 		/*
2606 		 * CMT DAC algorithm: increase number of packets received
2607 		 * since last ack
2608 		 */
2609 		stcb->asoc.cmt_dac_pkts_rcvd++;
2610 
2611 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2612 							 * SACK */
2613 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2614 							 * longer is one */
2615 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2616 		    (is_a_gap) ||	/* is still a gap */
2617 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2618 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2619 		    ) {
2620 
2621 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2622 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2623 			    (stcb->asoc.send_sack == 0) &&
2624 			    (stcb->asoc.numduptsns == 0) &&
2625 			    (stcb->asoc.delayed_ack) &&
2626 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2627 
2628 				/*
2629 				 * CMT DAC algorithm: With CMT, delay acks
2630 				 * even in the face of
2631 				 *
2632 				 * reordering. Therefore, if acks that do
2633 				 * not have to be sent because of the above
2634 				 * reasons, will be delayed. That is, acks
2635 				 * that would have been sent due to gap
2636 				 * reports will be delayed with DAC. Start
2637 				 * the delayed ack timer.
2638 				 */
2639 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2640 				    stcb->sctp_ep, stcb, NULL);
2641 			} else {
2642 				/*
2643 				 * Ok we must build a SACK since the timer
2644 				 * is pending, we got our first packet OR
2645 				 * there are gaps or duplicates.
2646 				 */
2647 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2648 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2649 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2650 			}
2651 		} else {
2652 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2653 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2654 				    stcb->sctp_ep, stcb, NULL);
2655 			}
2656 		}
2657 	}
2658 }
2659 
2660 int
2661 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2662     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2663     struct sctp_nets *net, uint32_t *high_tsn)
2664 {
2665 	struct sctp_chunkhdr *ch, chunk_buf;
2666 	struct sctp_association *asoc;
2667 	int num_chunks = 0;	/* number of control chunks processed */
2668 	int stop_proc = 0;
2669 	int break_flag, last_chunk;
2670 	int abort_flag = 0, was_a_gap;
2671 	struct mbuf *m;
2672 	uint32_t highest_tsn;
2673 	uint16_t chk_length;
2674 
2675 	/* set the rwnd */
2676 	sctp_set_rwnd(stcb, &stcb->asoc);
2677 
2678 	m = *mm;
2679 	SCTP_TCB_LOCK_ASSERT(stcb);
2680 	asoc = &stcb->asoc;
2681 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2682 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2683 	} else {
2684 		highest_tsn = asoc->highest_tsn_inside_map;
2685 	}
2686 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2687 	/*
2688 	 * setup where we got the last DATA packet from for any SACK that
2689 	 * may need to go out. Don't bump the net. This is done ONLY when a
2690 	 * chunk is assigned.
2691 	 */
2692 	asoc->last_data_chunk_from = net;
2693 
2694 	/*-
2695 	 * Now before we proceed we must figure out if this is a wasted
2696 	 * cluster... i.e. it is a small packet sent in and yet the driver
2697 	 * underneath allocated a full cluster for it. If so we must copy it
2698 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2699 	 * with cluster starvation.
2700 	 */
2701 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2702 		/* we only handle mbufs that are singletons.. not chains */
2703 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2704 		if (m) {
2705 			/* ok lets see if we can copy the data up */
2706 			caddr_t *from, *to;
2707 
2708 			/* get the pointers and copy */
2709 			to = mtod(m, caddr_t *);
2710 			from = mtod((*mm), caddr_t *);
2711 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2712 			/* copy the length and free up the old */
2713 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2714 			sctp_m_freem(*mm);
2715 			/* success, back copy */
2716 			*mm = m;
2717 		} else {
2718 			/* We are in trouble in the mbuf world .. yikes */
2719 			m = *mm;
2720 		}
2721 	}
2722 	/* get pointer to the first chunk header */
2723 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2724 	    sizeof(struct sctp_chunkhdr),
2725 	    (uint8_t *)&chunk_buf);
2726 	if (ch == NULL) {
2727 		return (1);
2728 	}
2729 	/*
2730 	 * process all DATA chunks...
2731 	 */
2732 	*high_tsn = asoc->cumulative_tsn;
2733 	break_flag = 0;
2734 	asoc->data_pkts_seen++;
2735 	while (stop_proc == 0) {
2736 		/* validate chunk length */
2737 		chk_length = ntohs(ch->chunk_length);
2738 		if (length - *offset < chk_length) {
2739 			/* all done, mutulated chunk */
2740 			stop_proc = 1;
2741 			continue;
2742 		}
2743 		if ((asoc->idata_supported == 1) &&
2744 		    (ch->chunk_type == SCTP_DATA)) {
2745 			struct mbuf *op_err;
2746 			char msg[SCTP_DIAG_INFO_LEN];
2747 
2748 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2751 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2752 			return (2);
2753 		}
2754 		if ((asoc->idata_supported == 0) &&
2755 		    (ch->chunk_type == SCTP_IDATA)) {
2756 			struct mbuf *op_err;
2757 			char msg[SCTP_DIAG_INFO_LEN];
2758 
2759 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2760 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2761 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2762 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2763 			return (2);
2764 		}
2765 		if ((ch->chunk_type == SCTP_DATA) ||
2766 		    (ch->chunk_type == SCTP_IDATA)) {
2767 			uint16_t clen;
2768 
2769 			if (ch->chunk_type == SCTP_DATA) {
2770 				clen = sizeof(struct sctp_data_chunk);
2771 			} else {
2772 				clen = sizeof(struct sctp_idata_chunk);
2773 			}
2774 			if (chk_length < clen) {
2775 				/*
2776 				 * Need to send an abort since we had a
2777 				 * invalid data chunk.
2778 				 */
2779 				struct mbuf *op_err;
2780 				char msg[SCTP_DIAG_INFO_LEN];
2781 
2782 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2783 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2784 				    chk_length);
2785 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2786 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2787 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2788 				return (2);
2789 			}
2790 #ifdef SCTP_AUDITING_ENABLED
2791 			sctp_audit_log(0xB1, 0);
2792 #endif
2793 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2794 				last_chunk = 1;
2795 			} else {
2796 				last_chunk = 0;
2797 			}
2798 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2799 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2800 			    last_chunk, ch->chunk_type)) {
2801 				num_chunks++;
2802 			}
2803 			if (abort_flag)
2804 				return (2);
2805 
2806 			if (break_flag) {
2807 				/*
2808 				 * Set because of out of rwnd space and no
2809 				 * drop rep space left.
2810 				 */
2811 				stop_proc = 1;
2812 				continue;
2813 			}
2814 		} else {
2815 			/* not a data chunk in the data region */
2816 			switch (ch->chunk_type) {
2817 			case SCTP_INITIATION:
2818 			case SCTP_INITIATION_ACK:
2819 			case SCTP_SELECTIVE_ACK:
2820 			case SCTP_NR_SELECTIVE_ACK:
2821 			case SCTP_HEARTBEAT_REQUEST:
2822 			case SCTP_HEARTBEAT_ACK:
2823 			case SCTP_ABORT_ASSOCIATION:
2824 			case SCTP_SHUTDOWN:
2825 			case SCTP_SHUTDOWN_ACK:
2826 			case SCTP_OPERATION_ERROR:
2827 			case SCTP_COOKIE_ECHO:
2828 			case SCTP_COOKIE_ACK:
2829 			case SCTP_ECN_ECHO:
2830 			case SCTP_ECN_CWR:
2831 			case SCTP_SHUTDOWN_COMPLETE:
2832 			case SCTP_AUTHENTICATION:
2833 			case SCTP_ASCONF_ACK:
2834 			case SCTP_PACKET_DROPPED:
2835 			case SCTP_STREAM_RESET:
2836 			case SCTP_FORWARD_CUM_TSN:
2837 			case SCTP_ASCONF:
2838 				{
2839 					/*
2840 					 * Now, what do we do with KNOWN
2841 					 * chunks that are NOT in the right
2842 					 * place?
2843 					 *
2844 					 * For now, I do nothing but ignore
2845 					 * them. We may later want to add
2846 					 * sysctl stuff to switch out and do
2847 					 * either an ABORT() or possibly
2848 					 * process them.
2849 					 */
2850 					struct mbuf *op_err;
2851 					char msg[SCTP_DIAG_INFO_LEN];
2852 
2853 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2854 					    ch->chunk_type);
2855 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2856 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2857 					return (2);
2858 				}
2859 			default:
2860 				/*
2861 				 * Unknown chunk type: use bit rules after
2862 				 * checking length
2863 				 */
2864 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2865 					/*
2866 					 * Need to send an abort since we
2867 					 * had a invalid chunk.
2868 					 */
2869 					struct mbuf *op_err;
2870 					char msg[SCTP_DIAG_INFO_LEN];
2871 
2872 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2873 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2874 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2875 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2876 					return (2);
2877 				}
2878 				if (ch->chunk_type & 0x40) {
2879 					/* Add a error report to the queue */
2880 					struct mbuf *op_err;
2881 					struct sctp_gen_error_cause *cause;
2882 
2883 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2884 					    0, M_NOWAIT, 1, MT_DATA);
2885 					if (op_err != NULL) {
2886 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2887 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2888 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2889 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2890 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2891 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2892 							sctp_queue_op_err(stcb, op_err);
2893 						} else {
2894 							sctp_m_freem(op_err);
2895 						}
2896 					}
2897 				}
2898 				if ((ch->chunk_type & 0x80) == 0) {
2899 					/* discard the rest of this packet */
2900 					stop_proc = 1;
2901 				}	/* else skip this bad chunk and
2902 					 * continue... */
2903 				break;
2904 			}	/* switch of chunk type */
2905 		}
2906 		*offset += SCTP_SIZE32(chk_length);
2907 		if ((*offset >= length) || stop_proc) {
2908 			/* no more data left in the mbuf chain */
2909 			stop_proc = 1;
2910 			continue;
2911 		}
2912 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2913 		    sizeof(struct sctp_chunkhdr),
2914 		    (uint8_t *)&chunk_buf);
2915 		if (ch == NULL) {
2916 			*offset = length;
2917 			stop_proc = 1;
2918 			continue;
2919 		}
2920 	}
2921 	if (break_flag) {
2922 		/*
2923 		 * we need to report rwnd overrun drops.
2924 		 */
2925 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2926 	}
2927 	if (num_chunks) {
2928 		/*
2929 		 * Did we get data, if so update the time for auto-close and
2930 		 * give peer credit for being alive.
2931 		 */
2932 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2933 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2934 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2935 			    stcb->asoc.overall_error_count,
2936 			    0,
2937 			    SCTP_FROM_SCTP_INDATA,
2938 			    __LINE__);
2939 		}
2940 		stcb->asoc.overall_error_count = 0;
2941 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2942 	}
2943 	/* now service all of the reassm queue if needed */
2944 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2945 		/* Assure that we ack right away */
2946 		stcb->asoc.send_sack = 1;
2947 	}
2948 	/* Start a sack timer or QUEUE a SACK for sending */
2949 	sctp_sack_check(stcb, was_a_gap);
2950 	return (0);
2951 }
2952 
2953 static int
2954 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2955     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2956     int *num_frs,
2957     uint32_t *biggest_newly_acked_tsn,
2958     uint32_t *this_sack_lowest_newack,
2959     int *rto_ok)
2960 {
2961 	struct sctp_tmit_chunk *tp1;
2962 	unsigned int theTSN;
2963 	int j, wake_him = 0, circled = 0;
2964 
2965 	/* Recover the tp1 we last saw */
2966 	tp1 = *p_tp1;
2967 	if (tp1 == NULL) {
2968 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2969 	}
2970 	for (j = frag_strt; j <= frag_end; j++) {
2971 		theTSN = j + last_tsn;
2972 		while (tp1) {
2973 			if (tp1->rec.data.doing_fast_retransmit)
2974 				(*num_frs) += 1;
2975 
2976 			/*-
2977 			 * CMT: CUCv2 algorithm. For each TSN being
2978 			 * processed from the sent queue, track the
2979 			 * next expected pseudo-cumack, or
2980 			 * rtx_pseudo_cumack, if required. Separate
2981 			 * cumack trackers for first transmissions,
2982 			 * and retransmissions.
2983 			 */
2984 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2985 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2986 			    (tp1->snd_count == 1)) {
2987 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2988 				tp1->whoTo->find_pseudo_cumack = 0;
2989 			}
2990 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2991 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2992 			    (tp1->snd_count > 1)) {
2993 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2994 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2995 			}
2996 			if (tp1->rec.data.tsn == theTSN) {
2997 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2998 					/*-
2999 					 * must be held until
3000 					 * cum-ack passes
3001 					 */
3002 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3003 						/*-
3004 						 * If it is less than RESEND, it is
3005 						 * now no-longer in flight.
3006 						 * Higher values may already be set
3007 						 * via previous Gap Ack Blocks...
3008 						 * i.e. ACKED or RESEND.
3009 						 */
3010 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3011 						    *biggest_newly_acked_tsn)) {
3012 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3013 						}
3014 						/*-
3015 						 * CMT: SFR algo (and HTNA) - set
3016 						 * saw_newack to 1 for dest being
3017 						 * newly acked. update
3018 						 * this_sack_highest_newack if
3019 						 * appropriate.
3020 						 */
3021 						if (tp1->rec.data.chunk_was_revoked == 0)
3022 							tp1->whoTo->saw_newack = 1;
3023 
3024 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3025 						    tp1->whoTo->this_sack_highest_newack)) {
3026 							tp1->whoTo->this_sack_highest_newack =
3027 							    tp1->rec.data.tsn;
3028 						}
3029 						/*-
3030 						 * CMT DAC algo: also update
3031 						 * this_sack_lowest_newack
3032 						 */
3033 						if (*this_sack_lowest_newack == 0) {
3034 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3035 								sctp_log_sack(*this_sack_lowest_newack,
3036 								    last_tsn,
3037 								    tp1->rec.data.tsn,
3038 								    0,
3039 								    0,
3040 								    SCTP_LOG_TSN_ACKED);
3041 							}
3042 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3043 						}
3044 						/*-
3045 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3046 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3047 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3048 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3049 						 * Separate pseudo_cumack trackers for first transmissions and
3050 						 * retransmissions.
3051 						 */
3052 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3053 							if (tp1->rec.data.chunk_was_revoked == 0) {
3054 								tp1->whoTo->new_pseudo_cumack = 1;
3055 							}
3056 							tp1->whoTo->find_pseudo_cumack = 1;
3057 						}
3058 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3059 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3060 						}
3061 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3062 							if (tp1->rec.data.chunk_was_revoked == 0) {
3063 								tp1->whoTo->new_pseudo_cumack = 1;
3064 							}
3065 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3066 						}
3067 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3068 							sctp_log_sack(*biggest_newly_acked_tsn,
3069 							    last_tsn,
3070 							    tp1->rec.data.tsn,
3071 							    frag_strt,
3072 							    frag_end,
3073 							    SCTP_LOG_TSN_ACKED);
3074 						}
3075 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3076 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3077 							    tp1->whoTo->flight_size,
3078 							    tp1->book_size,
3079 							    (uint32_t)(uintptr_t)tp1->whoTo,
3080 							    tp1->rec.data.tsn);
3081 						}
3082 						sctp_flight_size_decrease(tp1);
3083 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3084 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3085 							    tp1);
3086 						}
3087 						sctp_total_flight_decrease(stcb, tp1);
3088 
3089 						tp1->whoTo->net_ack += tp1->send_size;
3090 						if (tp1->snd_count < 2) {
3091 							/*-
3092 							 * True non-retransmitted chunk
3093 							 */
3094 							tp1->whoTo->net_ack2 += tp1->send_size;
3095 
3096 							/*-
3097 							 * update RTO too ?
3098 							 */
3099 							if (tp1->do_rtt) {
3100 								if (*rto_ok &&
3101 								    sctp_calculate_rto(stcb,
3102 								    &stcb->asoc,
3103 								    tp1->whoTo,
3104 								    &tp1->sent_rcv_time,
3105 								    SCTP_RTT_FROM_DATA)) {
3106 									*rto_ok = 0;
3107 								}
3108 								if (tp1->whoTo->rto_needed == 0) {
3109 									tp1->whoTo->rto_needed = 1;
3110 								}
3111 								tp1->do_rtt = 0;
3112 							}
3113 						}
3114 
3115 					}
3116 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3117 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3118 						    stcb->asoc.this_sack_highest_gap)) {
3119 							stcb->asoc.this_sack_highest_gap =
3120 							    tp1->rec.data.tsn;
3121 						}
3122 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3123 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3124 #ifdef SCTP_AUDITING_ENABLED
3125 							sctp_audit_log(0xB2,
3126 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3127 #endif
3128 						}
3129 					}
3130 					/*-
3131 					 * All chunks NOT UNSENT fall through here and are marked
3132 					 * (leave PR-SCTP ones that are to skip alone though)
3133 					 */
3134 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3135 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 						tp1->sent = SCTP_DATAGRAM_MARKED;
3137 					}
3138 					if (tp1->rec.data.chunk_was_revoked) {
3139 						/* deflate the cwnd */
3140 						tp1->whoTo->cwnd -= tp1->book_size;
3141 						tp1->rec.data.chunk_was_revoked = 0;
3142 					}
3143 					/* NR Sack code here */
3144 					if (nr_sacking &&
3145 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3146 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3147 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3148 #ifdef INVARIANTS
3149 						} else {
3150 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3151 #endif
3152 						}
3153 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3154 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3155 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3156 							stcb->asoc.trigger_reset = 1;
3157 						}
3158 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3159 						if (tp1->data) {
3160 							/*
3161 							 * sa_ignore
3162 							 * NO_NULL_CHK
3163 							 */
3164 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3165 							sctp_m_freem(tp1->data);
3166 							tp1->data = NULL;
3167 						}
3168 						wake_him++;
3169 					}
3170 				}
3171 				break;
3172 			}	/* if (tp1->tsn == theTSN) */
3173 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3174 				break;
3175 			}
3176 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3177 			if ((tp1 == NULL) && (circled == 0)) {
3178 				circled++;
3179 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3180 			}
3181 		}		/* end while (tp1) */
3182 		if (tp1 == NULL) {
3183 			circled = 0;
3184 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3185 		}
3186 		/* In case the fragments were not in order we must reset */
3187 	}			/* end for (j = fragStart */
3188 	*p_tp1 = tp1;
3189 	return (wake_him);	/* Return value only used for nr-sack */
3190 }
3191 
3192 
3193 static int
3194 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3195     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3196     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3197     int num_seg, int num_nr_seg, int *rto_ok)
3198 {
3199 	struct sctp_gap_ack_block *frag, block;
3200 	struct sctp_tmit_chunk *tp1;
3201 	int i;
3202 	int num_frs = 0;
3203 	int chunk_freed;
3204 	int non_revocable;
3205 	uint16_t frag_strt, frag_end, prev_frag_end;
3206 
3207 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3208 	prev_frag_end = 0;
3209 	chunk_freed = 0;
3210 
3211 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3212 		if (i == num_seg) {
3213 			prev_frag_end = 0;
3214 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3215 		}
3216 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3217 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3218 		*offset += sizeof(block);
3219 		if (frag == NULL) {
3220 			return (chunk_freed);
3221 		}
3222 		frag_strt = ntohs(frag->start);
3223 		frag_end = ntohs(frag->end);
3224 
3225 		if (frag_strt > frag_end) {
3226 			/* This gap report is malformed, skip it. */
3227 			continue;
3228 		}
3229 		if (frag_strt <= prev_frag_end) {
3230 			/* This gap report is not in order, so restart. */
3231 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3232 		}
3233 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3234 			*biggest_tsn_acked = last_tsn + frag_end;
3235 		}
3236 		if (i < num_seg) {
3237 			non_revocable = 0;
3238 		} else {
3239 			non_revocable = 1;
3240 		}
3241 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3242 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3243 		    this_sack_lowest_newack, rto_ok)) {
3244 			chunk_freed = 1;
3245 		}
3246 		prev_frag_end = frag_end;
3247 	}
3248 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3249 		if (num_frs)
3250 			sctp_log_fr(*biggest_tsn_acked,
3251 			    *biggest_newly_acked_tsn,
3252 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3253 	}
3254 	return (chunk_freed);
3255 }
3256 
3257 static void
3258 sctp_check_for_revoked(struct sctp_tcb *stcb,
3259     struct sctp_association *asoc, uint32_t cumack,
3260     uint32_t biggest_tsn_acked)
3261 {
3262 	struct sctp_tmit_chunk *tp1;
3263 
3264 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3265 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3266 			/*
3267 			 * ok this guy is either ACK or MARKED. If it is
3268 			 * ACKED it has been previously acked but not this
3269 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3270 			 * again.
3271 			 */
3272 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3273 				break;
3274 			}
3275 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3276 				/* it has been revoked */
3277 				tp1->sent = SCTP_DATAGRAM_SENT;
3278 				tp1->rec.data.chunk_was_revoked = 1;
3279 				/*
3280 				 * We must add this stuff back in to assure
3281 				 * timers and such get started.
3282 				 */
3283 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3284 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3285 					    tp1->whoTo->flight_size,
3286 					    tp1->book_size,
3287 					    (uint32_t)(uintptr_t)tp1->whoTo,
3288 					    tp1->rec.data.tsn);
3289 				}
3290 				sctp_flight_size_increase(tp1);
3291 				sctp_total_flight_increase(stcb, tp1);
3292 				/*
3293 				 * We inflate the cwnd to compensate for our
3294 				 * artificial inflation of the flight_size.
3295 				 */
3296 				tp1->whoTo->cwnd += tp1->book_size;
3297 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3298 					sctp_log_sack(asoc->last_acked_seq,
3299 					    cumack,
3300 					    tp1->rec.data.tsn,
3301 					    0,
3302 					    0,
3303 					    SCTP_LOG_TSN_REVOKED);
3304 				}
3305 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3306 				/* it has been re-acked in this SACK */
3307 				tp1->sent = SCTP_DATAGRAM_ACKED;
3308 			}
3309 		}
3310 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3311 			break;
3312 	}
3313 }
3314 
3315 
3316 static void
3317 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3318     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3319 {
3320 	struct sctp_tmit_chunk *tp1;
3321 	int strike_flag = 0;
3322 	struct timeval now;
3323 	int tot_retrans = 0;
3324 	uint32_t sending_seq;
3325 	struct sctp_nets *net;
3326 	int num_dests_sacked = 0;
3327 
3328 	/*
3329 	 * select the sending_seq, this is either the next thing ready to be
3330 	 * sent but not transmitted, OR, the next seq we assign.
3331 	 */
3332 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3333 	if (tp1 == NULL) {
3334 		sending_seq = asoc->sending_seq;
3335 	} else {
3336 		sending_seq = tp1->rec.data.tsn;
3337 	}
3338 
3339 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3340 	if ((asoc->sctp_cmt_on_off > 0) &&
3341 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3342 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3343 			if (net->saw_newack)
3344 				num_dests_sacked++;
3345 		}
3346 	}
3347 	if (stcb->asoc.prsctp_supported) {
3348 		(void)SCTP_GETTIME_TIMEVAL(&now);
3349 	}
3350 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3351 		strike_flag = 0;
3352 		if (tp1->no_fr_allowed) {
3353 			/* this one had a timeout or something */
3354 			continue;
3355 		}
3356 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3358 				sctp_log_fr(biggest_tsn_newly_acked,
3359 				    tp1->rec.data.tsn,
3360 				    tp1->sent,
3361 				    SCTP_FR_LOG_CHECK_STRIKE);
3362 		}
3363 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3364 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3365 			/* done */
3366 			break;
3367 		}
3368 		if (stcb->asoc.prsctp_supported) {
3369 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3370 				/* Is it expired? */
3371 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3372 					/* Yes so drop it */
3373 					if (tp1->data != NULL) {
3374 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3375 						    SCTP_SO_NOT_LOCKED);
3376 					}
3377 					continue;
3378 				}
3379 			}
3380 
3381 		}
3382 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3383 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3384 			/* we are beyond the tsn in the sack  */
3385 			break;
3386 		}
3387 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3388 			/* either a RESEND, ACKED, or MARKED */
3389 			/* skip */
3390 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3391 				/* Continue strikin FWD-TSN chunks */
3392 				tp1->rec.data.fwd_tsn_cnt++;
3393 			}
3394 			continue;
3395 		}
3396 		/*
3397 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3398 		 */
3399 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3400 			/*
3401 			 * No new acks were receieved for data sent to this
3402 			 * dest. Therefore, according to the SFR algo for
3403 			 * CMT, no data sent to this dest can be marked for
3404 			 * FR using this SACK.
3405 			 */
3406 			continue;
3407 		} else if (tp1->whoTo &&
3408 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3409 			    tp1->whoTo->this_sack_highest_newack) &&
3410 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3411 			/*
3412 			 * CMT: New acks were receieved for data sent to
3413 			 * this dest. But no new acks were seen for data
3414 			 * sent after tp1. Therefore, according to the SFR
3415 			 * algo for CMT, tp1 cannot be marked for FR using
3416 			 * this SACK. This step covers part of the DAC algo
3417 			 * and the HTNA algo as well.
3418 			 */
3419 			continue;
3420 		}
3421 		/*
3422 		 * Here we check to see if we were have already done a FR
3423 		 * and if so we see if the biggest TSN we saw in the sack is
3424 		 * smaller than the recovery point. If so we don't strike
3425 		 * the tsn... otherwise we CAN strike the TSN.
3426 		 */
3427 		/*
3428 		 * @@@ JRI: Check for CMT if (accum_moved &&
3429 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3430 		 * 0)) {
3431 		 */
3432 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3433 			/*
3434 			 * Strike the TSN if in fast-recovery and cum-ack
3435 			 * moved.
3436 			 */
3437 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3438 				sctp_log_fr(biggest_tsn_newly_acked,
3439 				    tp1->rec.data.tsn,
3440 				    tp1->sent,
3441 				    SCTP_FR_LOG_STRIKE_CHUNK);
3442 			}
3443 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3444 				tp1->sent++;
3445 			}
3446 			if ((asoc->sctp_cmt_on_off > 0) &&
3447 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3448 				/*
3449 				 * CMT DAC algorithm: If SACK flag is set to
3450 				 * 0, then lowest_newack test will not pass
3451 				 * because it would have been set to the
3452 				 * cumack earlier. If not already to be
3453 				 * rtx'd, If not a mixed sack and if tp1 is
3454 				 * not between two sacked TSNs, then mark by
3455 				 * one more. NOTE that we are marking by one
3456 				 * additional time since the SACK DAC flag
3457 				 * indicates that two packets have been
3458 				 * received after this missing TSN.
3459 				 */
3460 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3461 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3462 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3463 						sctp_log_fr(16 + num_dests_sacked,
3464 						    tp1->rec.data.tsn,
3465 						    tp1->sent,
3466 						    SCTP_FR_LOG_STRIKE_CHUNK);
3467 					}
3468 					tp1->sent++;
3469 				}
3470 			}
3471 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3472 		    (asoc->sctp_cmt_on_off == 0)) {
3473 			/*
3474 			 * For those that have done a FR we must take
3475 			 * special consideration if we strike. I.e the
3476 			 * biggest_newly_acked must be higher than the
3477 			 * sending_seq at the time we did the FR.
3478 			 */
3479 			if (
3480 #ifdef SCTP_FR_TO_ALTERNATE
3481 			/*
3482 			 * If FR's go to new networks, then we must only do
3483 			 * this for singly homed asoc's. However if the FR's
3484 			 * go to the same network (Armando's work) then its
3485 			 * ok to FR multiple times.
3486 			 */
3487 			    (asoc->numnets < 2)
3488 #else
3489 			    (1)
3490 #endif
3491 			    ) {
3492 
3493 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3494 				    tp1->rec.data.fast_retran_tsn)) {
3495 					/*
3496 					 * Strike the TSN, since this ack is
3497 					 * beyond where things were when we
3498 					 * did a FR.
3499 					 */
3500 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3501 						sctp_log_fr(biggest_tsn_newly_acked,
3502 						    tp1->rec.data.tsn,
3503 						    tp1->sent,
3504 						    SCTP_FR_LOG_STRIKE_CHUNK);
3505 					}
3506 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3507 						tp1->sent++;
3508 					}
3509 					strike_flag = 1;
3510 					if ((asoc->sctp_cmt_on_off > 0) &&
3511 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3512 						/*
3513 						 * CMT DAC algorithm: If
3514 						 * SACK flag is set to 0,
3515 						 * then lowest_newack test
3516 						 * will not pass because it
3517 						 * would have been set to
3518 						 * the cumack earlier. If
3519 						 * not already to be rtx'd,
3520 						 * If not a mixed sack and
3521 						 * if tp1 is not between two
3522 						 * sacked TSNs, then mark by
3523 						 * one more. NOTE that we
3524 						 * are marking by one
3525 						 * additional time since the
3526 						 * SACK DAC flag indicates
3527 						 * that two packets have
3528 						 * been received after this
3529 						 * missing TSN.
3530 						 */
3531 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3532 						    (num_dests_sacked == 1) &&
3533 						    SCTP_TSN_GT(this_sack_lowest_newack,
3534 						    tp1->rec.data.tsn)) {
3535 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3536 								sctp_log_fr(32 + num_dests_sacked,
3537 								    tp1->rec.data.tsn,
3538 								    tp1->sent,
3539 								    SCTP_FR_LOG_STRIKE_CHUNK);
3540 							}
3541 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3542 								tp1->sent++;
3543 							}
3544 						}
3545 					}
3546 				}
3547 			}
3548 			/*
3549 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3550 			 * algo covers HTNA.
3551 			 */
3552 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3553 		    biggest_tsn_newly_acked)) {
3554 			/*
3555 			 * We don't strike these: This is the  HTNA
3556 			 * algorithm i.e. we don't strike If our TSN is
3557 			 * larger than the Highest TSN Newly Acked.
3558 			 */
3559 			;
3560 		} else {
3561 			/* Strike the TSN */
3562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3563 				sctp_log_fr(biggest_tsn_newly_acked,
3564 				    tp1->rec.data.tsn,
3565 				    tp1->sent,
3566 				    SCTP_FR_LOG_STRIKE_CHUNK);
3567 			}
3568 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3569 				tp1->sent++;
3570 			}
3571 			if ((asoc->sctp_cmt_on_off > 0) &&
3572 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3573 				/*
3574 				 * CMT DAC algorithm: If SACK flag is set to
3575 				 * 0, then lowest_newack test will not pass
3576 				 * because it would have been set to the
3577 				 * cumack earlier. If not already to be
3578 				 * rtx'd, If not a mixed sack and if tp1 is
3579 				 * not between two sacked TSNs, then mark by
3580 				 * one more. NOTE that we are marking by one
3581 				 * additional time since the SACK DAC flag
3582 				 * indicates that two packets have been
3583 				 * received after this missing TSN.
3584 				 */
3585 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3586 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3587 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3588 						sctp_log_fr(48 + num_dests_sacked,
3589 						    tp1->rec.data.tsn,
3590 						    tp1->sent,
3591 						    SCTP_FR_LOG_STRIKE_CHUNK);
3592 					}
3593 					tp1->sent++;
3594 				}
3595 			}
3596 		}
3597 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3598 			struct sctp_nets *alt;
3599 
3600 			/* fix counts and things */
3601 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3602 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3603 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3604 				    tp1->book_size,
3605 				    (uint32_t)(uintptr_t)tp1->whoTo,
3606 				    tp1->rec.data.tsn);
3607 			}
3608 			if (tp1->whoTo) {
3609 				tp1->whoTo->net_ack++;
3610 				sctp_flight_size_decrease(tp1);
3611 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3612 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3613 					    tp1);
3614 				}
3615 			}
3616 
3617 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3618 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3619 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3620 			}
3621 			/* add back to the rwnd */
3622 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3623 
3624 			/* remove from the total flight */
3625 			sctp_total_flight_decrease(stcb, tp1);
3626 
3627 			if ((stcb->asoc.prsctp_supported) &&
3628 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3629 				/*
3630 				 * Has it been retransmitted tv_sec times? -
3631 				 * we store the retran count there.
3632 				 */
3633 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3634 					/* Yes, so drop it */
3635 					if (tp1->data != NULL) {
3636 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3637 						    SCTP_SO_NOT_LOCKED);
3638 					}
3639 					/* Make sure to flag we had a FR */
3640 					if (tp1->whoTo != NULL) {
3641 						tp1->whoTo->net_ack++;
3642 					}
3643 					continue;
3644 				}
3645 			}
3646 			/*
3647 			 * SCTP_PRINTF("OK, we are now ready to FR this
3648 			 * guy\n");
3649 			 */
3650 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3651 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3652 				    0, SCTP_FR_MARKED);
3653 			}
3654 			if (strike_flag) {
3655 				/* This is a subsequent FR */
3656 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3657 			}
3658 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3659 			if (asoc->sctp_cmt_on_off > 0) {
3660 				/*
3661 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3662 				 * If CMT is being used, then pick dest with
3663 				 * largest ssthresh for any retransmission.
3664 				 */
3665 				tp1->no_fr_allowed = 1;
3666 				alt = tp1->whoTo;
3667 				/* sa_ignore NO_NULL_CHK */
3668 				if (asoc->sctp_cmt_pf > 0) {
3669 					/*
3670 					 * JRS 5/18/07 - If CMT PF is on,
3671 					 * use the PF version of
3672 					 * find_alt_net()
3673 					 */
3674 					alt = sctp_find_alternate_net(stcb, alt, 2);
3675 				} else {
3676 					/*
3677 					 * JRS 5/18/07 - If only CMT is on,
3678 					 * use the CMT version of
3679 					 * find_alt_net()
3680 					 */
3681 					/* sa_ignore NO_NULL_CHK */
3682 					alt = sctp_find_alternate_net(stcb, alt, 1);
3683 				}
3684 				if (alt == NULL) {
3685 					alt = tp1->whoTo;
3686 				}
3687 				/*
3688 				 * CUCv2: If a different dest is picked for
3689 				 * the retransmission, then new
3690 				 * (rtx-)pseudo_cumack needs to be tracked
3691 				 * for orig dest. Let CUCv2 track new (rtx-)
3692 				 * pseudo-cumack always.
3693 				 */
3694 				if (tp1->whoTo) {
3695 					tp1->whoTo->find_pseudo_cumack = 1;
3696 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3697 				}
3698 
3699 			} else {	/* CMT is OFF */
3700 
3701 #ifdef SCTP_FR_TO_ALTERNATE
3702 				/* Can we find an alternate? */
3703 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3704 #else
3705 				/*
3706 				 * default behavior is to NOT retransmit
3707 				 * FR's to an alternate. Armando Caro's
3708 				 * paper details why.
3709 				 */
3710 				alt = tp1->whoTo;
3711 #endif
3712 			}
3713 
3714 			tp1->rec.data.doing_fast_retransmit = 1;
3715 			tot_retrans++;
3716 			/* mark the sending seq for possible subsequent FR's */
3717 			/*
3718 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3719 			 * (uint32_t)tpi->rec.data.tsn);
3720 			 */
3721 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3722 				/*
3723 				 * If the queue of send is empty then its
3724 				 * the next sequence number that will be
3725 				 * assigned so we subtract one from this to
3726 				 * get the one we last sent.
3727 				 */
3728 				tp1->rec.data.fast_retran_tsn = sending_seq;
3729 			} else {
3730 				/*
3731 				 * If there are chunks on the send queue
3732 				 * (unsent data that has made it from the
3733 				 * stream queues but not out the door, we
3734 				 * take the first one (which will have the
3735 				 * lowest TSN) and subtract one to get the
3736 				 * one we last sent.
3737 				 */
3738 				struct sctp_tmit_chunk *ttt;
3739 
3740 				ttt = TAILQ_FIRST(&asoc->send_queue);
3741 				tp1->rec.data.fast_retran_tsn =
3742 				    ttt->rec.data.tsn;
3743 			}
3744 
3745 			if (tp1->do_rtt) {
3746 				/*
3747 				 * this guy had a RTO calculation pending on
3748 				 * it, cancel it
3749 				 */
3750 				if ((tp1->whoTo != NULL) &&
3751 				    (tp1->whoTo->rto_needed == 0)) {
3752 					tp1->whoTo->rto_needed = 1;
3753 				}
3754 				tp1->do_rtt = 0;
3755 			}
3756 			if (alt != tp1->whoTo) {
3757 				/* yes, there is an alternate. */
3758 				sctp_free_remote_addr(tp1->whoTo);
3759 				/* sa_ignore FREED_MEMORY */
3760 				tp1->whoTo = alt;
3761 				atomic_add_int(&alt->ref_count, 1);
3762 			}
3763 		}
3764 	}
3765 }
3766 
3767 struct sctp_tmit_chunk *
3768 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3769     struct sctp_association *asoc)
3770 {
3771 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3772 	struct timeval now;
3773 	int now_filled = 0;
3774 
3775 	if (asoc->prsctp_supported == 0) {
3776 		return (NULL);
3777 	}
3778 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3779 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3780 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3781 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3782 			/* no chance to advance, out of here */
3783 			break;
3784 		}
3785 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3786 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3787 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3788 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3789 				    asoc->advanced_peer_ack_point,
3790 				    tp1->rec.data.tsn, 0, 0);
3791 			}
3792 		}
3793 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3794 			/*
3795 			 * We can't fwd-tsn past any that are reliable aka
3796 			 * retransmitted until the asoc fails.
3797 			 */
3798 			break;
3799 		}
3800 		if (!now_filled) {
3801 			(void)SCTP_GETTIME_TIMEVAL(&now);
3802 			now_filled = 1;
3803 		}
3804 		/*
3805 		 * now we got a chunk which is marked for another
3806 		 * retransmission to a PR-stream but has run out its chances
3807 		 * already maybe OR has been marked to skip now. Can we skip
3808 		 * it if its a resend?
3809 		 */
3810 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3811 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3812 			/*
3813 			 * Now is this one marked for resend and its time is
3814 			 * now up?
3815 			 */
3816 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3817 				/* Yes so drop it */
3818 				if (tp1->data) {
3819 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3820 					    1, SCTP_SO_NOT_LOCKED);
3821 				}
3822 			} else {
3823 				/*
3824 				 * No, we are done when hit one for resend
3825 				 * whos time as not expired.
3826 				 */
3827 				break;
3828 			}
3829 		}
3830 		/*
3831 		 * Ok now if this chunk is marked to drop it we can clean up
3832 		 * the chunk, advance our peer ack point and we can check
3833 		 * the next chunk.
3834 		 */
3835 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3836 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3837 			/* advance PeerAckPoint goes forward */
3838 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3839 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3840 				a_adv = tp1;
3841 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3842 				/* No update but we do save the chk */
3843 				a_adv = tp1;
3844 			}
3845 		} else {
3846 			/*
3847 			 * If it is still in RESEND we can advance no
3848 			 * further
3849 			 */
3850 			break;
3851 		}
3852 	}
3853 	return (a_adv);
3854 }
3855 
3856 static int
3857 sctp_fs_audit(struct sctp_association *asoc)
3858 {
3859 	struct sctp_tmit_chunk *chk;
3860 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3861 	int ret;
3862 #ifndef INVARIANTS
3863 	int entry_flight, entry_cnt;
3864 #endif
3865 
3866 	ret = 0;
3867 #ifndef INVARIANTS
3868 	entry_flight = asoc->total_flight;
3869 	entry_cnt = asoc->total_flight_count;
3870 #endif
3871 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3872 		return (0);
3873 
3874 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3875 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3876 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3877 			    chk->rec.data.tsn,
3878 			    chk->send_size,
3879 			    chk->snd_count);
3880 			inflight++;
3881 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3882 			resend++;
3883 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3884 			inbetween++;
3885 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3886 			above++;
3887 		} else {
3888 			acked++;
3889 		}
3890 	}
3891 
3892 	if ((inflight > 0) || (inbetween > 0)) {
3893 #ifdef INVARIANTS
3894 		panic("Flight size-express incorrect? \n");
3895 #else
3896 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3897 		    entry_flight, entry_cnt);
3898 
3899 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3900 		    inflight, inbetween, resend, above, acked);
3901 		ret = 1;
3902 #endif
3903 	}
3904 	return (ret);
3905 }
3906 
3907 
3908 static void
3909 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3910     struct sctp_association *asoc,
3911     struct sctp_tmit_chunk *tp1)
3912 {
3913 	tp1->window_probe = 0;
3914 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3915 		/* TSN's skipped we do NOT move back. */
3916 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3917 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3918 		    tp1->book_size,
3919 		    (uint32_t)(uintptr_t)tp1->whoTo,
3920 		    tp1->rec.data.tsn);
3921 		return;
3922 	}
3923 	/* First setup this by shrinking flight */
3924 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3925 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3926 		    tp1);
3927 	}
3928 	sctp_flight_size_decrease(tp1);
3929 	sctp_total_flight_decrease(stcb, tp1);
3930 	/* Now mark for resend */
3931 	tp1->sent = SCTP_DATAGRAM_RESEND;
3932 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3933 
3934 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3935 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3936 		    tp1->whoTo->flight_size,
3937 		    tp1->book_size,
3938 		    (uint32_t)(uintptr_t)tp1->whoTo,
3939 		    tp1->rec.data.tsn);
3940 	}
3941 }
3942 
3943 void
3944 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3945     uint32_t rwnd, int *abort_now, int ecne_seen)
3946 {
3947 	struct sctp_nets *net;
3948 	struct sctp_association *asoc;
3949 	struct sctp_tmit_chunk *tp1, *tp2;
3950 	uint32_t old_rwnd;
3951 	int win_probe_recovery = 0;
3952 	int win_probe_recovered = 0;
3953 	int j, done_once = 0;
3954 	int rto_ok = 1;
3955 	uint32_t send_s;
3956 
3957 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3958 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3959 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3960 	}
3961 	SCTP_TCB_LOCK_ASSERT(stcb);
3962 #ifdef SCTP_ASOCLOG_OF_TSNS
3963 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3964 	stcb->asoc.cumack_log_at++;
3965 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3966 		stcb->asoc.cumack_log_at = 0;
3967 	}
3968 #endif
3969 	asoc = &stcb->asoc;
3970 	old_rwnd = asoc->peers_rwnd;
3971 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3972 		/* old ack */
3973 		return;
3974 	} else if (asoc->last_acked_seq == cumack) {
3975 		/* Window update sack */
3976 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3977 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3978 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3979 			/* SWS sender side engages */
3980 			asoc->peers_rwnd = 0;
3981 		}
3982 		if (asoc->peers_rwnd > old_rwnd) {
3983 			goto again;
3984 		}
3985 		return;
3986 	}
3987 
3988 	/* First setup for CC stuff */
3989 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3990 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3991 			/* Drag along the window_tsn for cwr's */
3992 			net->cwr_window_tsn = cumack;
3993 		}
3994 		net->prev_cwnd = net->cwnd;
3995 		net->net_ack = 0;
3996 		net->net_ack2 = 0;
3997 
3998 		/*
3999 		 * CMT: Reset CUC and Fast recovery algo variables before
4000 		 * SACK processing
4001 		 */
4002 		net->new_pseudo_cumack = 0;
4003 		net->will_exit_fast_recovery = 0;
4004 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4005 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4006 		}
4007 	}
4008 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4009 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4010 		    sctpchunk_listhead);
4011 		send_s = tp1->rec.data.tsn + 1;
4012 	} else {
4013 		send_s = asoc->sending_seq;
4014 	}
4015 	if (SCTP_TSN_GE(cumack, send_s)) {
4016 		struct mbuf *op_err;
4017 		char msg[SCTP_DIAG_INFO_LEN];
4018 
4019 		*abort_now = 1;
4020 		/* XXX */
4021 		SCTP_SNPRINTF(msg, sizeof(msg),
4022 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4023 		    cumack, send_s);
4024 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4025 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4026 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4027 		return;
4028 	}
4029 	asoc->this_sack_highest_gap = cumack;
4030 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4031 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4032 		    stcb->asoc.overall_error_count,
4033 		    0,
4034 		    SCTP_FROM_SCTP_INDATA,
4035 		    __LINE__);
4036 	}
4037 	stcb->asoc.overall_error_count = 0;
4038 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4039 		/* process the new consecutive TSN first */
4040 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4041 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4042 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4043 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4044 				}
4045 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4046 					/*
4047 					 * If it is less than ACKED, it is
4048 					 * now no-longer in flight. Higher
4049 					 * values may occur during marking
4050 					 */
4051 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4052 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4053 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4054 							    tp1->whoTo->flight_size,
4055 							    tp1->book_size,
4056 							    (uint32_t)(uintptr_t)tp1->whoTo,
4057 							    tp1->rec.data.tsn);
4058 						}
4059 						sctp_flight_size_decrease(tp1);
4060 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4061 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4062 							    tp1);
4063 						}
4064 						/* sa_ignore NO_NULL_CHK */
4065 						sctp_total_flight_decrease(stcb, tp1);
4066 					}
4067 					tp1->whoTo->net_ack += tp1->send_size;
4068 					if (tp1->snd_count < 2) {
4069 						/*
4070 						 * True non-retransmitted
4071 						 * chunk
4072 						 */
4073 						tp1->whoTo->net_ack2 +=
4074 						    tp1->send_size;
4075 
4076 						/* update RTO too? */
4077 						if (tp1->do_rtt) {
4078 							if (rto_ok &&
4079 							    sctp_calculate_rto(stcb,
4080 							    &stcb->asoc,
4081 							    tp1->whoTo,
4082 							    &tp1->sent_rcv_time,
4083 							    SCTP_RTT_FROM_DATA)) {
4084 								rto_ok = 0;
4085 							}
4086 							if (tp1->whoTo->rto_needed == 0) {
4087 								tp1->whoTo->rto_needed = 1;
4088 							}
4089 							tp1->do_rtt = 0;
4090 						}
4091 					}
4092 					/*
4093 					 * CMT: CUCv2 algorithm. From the
4094 					 * cumack'd TSNs, for each TSN being
4095 					 * acked for the first time, set the
4096 					 * following variables for the
4097 					 * corresp destination.
4098 					 * new_pseudo_cumack will trigger a
4099 					 * cwnd update.
4100 					 * find_(rtx_)pseudo_cumack will
4101 					 * trigger search for the next
4102 					 * expected (rtx-)pseudo-cumack.
4103 					 */
4104 					tp1->whoTo->new_pseudo_cumack = 1;
4105 					tp1->whoTo->find_pseudo_cumack = 1;
4106 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4107 
4108 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4109 						/* sa_ignore NO_NULL_CHK */
4110 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4111 					}
4112 				}
4113 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4114 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4115 				}
4116 				if (tp1->rec.data.chunk_was_revoked) {
4117 					/* deflate the cwnd */
4118 					tp1->whoTo->cwnd -= tp1->book_size;
4119 					tp1->rec.data.chunk_was_revoked = 0;
4120 				}
4121 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4122 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4123 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4124 #ifdef INVARIANTS
4125 					} else {
4126 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4127 #endif
4128 					}
4129 				}
4130 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4131 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4132 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4133 					asoc->trigger_reset = 1;
4134 				}
4135 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4136 				if (tp1->data) {
4137 					/* sa_ignore NO_NULL_CHK */
4138 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4139 					sctp_m_freem(tp1->data);
4140 					tp1->data = NULL;
4141 				}
4142 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4143 					sctp_log_sack(asoc->last_acked_seq,
4144 					    cumack,
4145 					    tp1->rec.data.tsn,
4146 					    0,
4147 					    0,
4148 					    SCTP_LOG_FREE_SENT);
4149 				}
4150 				asoc->sent_queue_cnt--;
4151 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4152 			} else {
4153 				break;
4154 			}
4155 		}
4156 
4157 	}
4158 	/* sa_ignore NO_NULL_CHK */
4159 	if (stcb->sctp_socket) {
4160 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4161 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4162 			/* sa_ignore NO_NULL_CHK */
4163 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4164 		}
4165 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4166 	} else {
4167 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4168 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4169 		}
4170 	}
4171 
4172 	/* JRS - Use the congestion control given in the CC module */
4173 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4174 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4175 			if (net->net_ack2 > 0) {
4176 				/*
4177 				 * Karn's rule applies to clearing error
4178 				 * count, this is optional.
4179 				 */
4180 				net->error_count = 0;
4181 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4182 					/* addr came good */
4183 					net->dest_state |= SCTP_ADDR_REACHABLE;
4184 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4185 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4186 				}
4187 				if (net == stcb->asoc.primary_destination) {
4188 					if (stcb->asoc.alternate) {
4189 						/*
4190 						 * release the alternate,
4191 						 * primary is good
4192 						 */
4193 						sctp_free_remote_addr(stcb->asoc.alternate);
4194 						stcb->asoc.alternate = NULL;
4195 					}
4196 				}
4197 				if (net->dest_state & SCTP_ADDR_PF) {
4198 					net->dest_state &= ~SCTP_ADDR_PF;
4199 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4200 					    stcb->sctp_ep, stcb, net,
4201 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4202 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4203 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4204 					/* Done with this net */
4205 					net->net_ack = 0;
4206 				}
4207 				/* restore any doubled timers */
4208 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4209 				if (net->RTO < stcb->asoc.minrto) {
4210 					net->RTO = stcb->asoc.minrto;
4211 				}
4212 				if (net->RTO > stcb->asoc.maxrto) {
4213 					net->RTO = stcb->asoc.maxrto;
4214 				}
4215 			}
4216 		}
4217 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4218 	}
4219 	asoc->last_acked_seq = cumack;
4220 
4221 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4222 		/* nothing left in-flight */
4223 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4224 			net->flight_size = 0;
4225 			net->partial_bytes_acked = 0;
4226 		}
4227 		asoc->total_flight = 0;
4228 		asoc->total_flight_count = 0;
4229 	}
4230 
4231 	/* RWND update */
4232 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4233 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4234 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4235 		/* SWS sender side engages */
4236 		asoc->peers_rwnd = 0;
4237 	}
4238 	if (asoc->peers_rwnd > old_rwnd) {
4239 		win_probe_recovery = 1;
4240 	}
4241 	/* Now assure a timer where data is queued at */
4242 again:
4243 	j = 0;
4244 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4245 		if (win_probe_recovery && (net->window_probe)) {
4246 			win_probe_recovered = 1;
4247 			/*
4248 			 * Find first chunk that was used with window probe
4249 			 * and clear the sent
4250 			 */
4251 			/* sa_ignore FREED_MEMORY */
4252 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4253 				if (tp1->window_probe) {
4254 					/* move back to data send queue */
4255 					sctp_window_probe_recovery(stcb, asoc, tp1);
4256 					break;
4257 				}
4258 			}
4259 		}
4260 		if (net->flight_size) {
4261 			j++;
4262 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4263 			if (net->window_probe) {
4264 				net->window_probe = 0;
4265 			}
4266 		} else {
4267 			if (net->window_probe) {
4268 				/*
4269 				 * In window probes we must assure a timer
4270 				 * is still running there
4271 				 */
4272 				net->window_probe = 0;
4273 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4274 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4275 				}
4276 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4277 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4278 				    stcb, net,
4279 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4280 			}
4281 		}
4282 	}
4283 	if ((j == 0) &&
4284 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4285 	    (asoc->sent_queue_retran_cnt == 0) &&
4286 	    (win_probe_recovered == 0) &&
4287 	    (done_once == 0)) {
4288 		/*
4289 		 * huh, this should not happen unless all packets are
4290 		 * PR-SCTP and marked to skip of course.
4291 		 */
4292 		if (sctp_fs_audit(asoc)) {
4293 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4294 				net->flight_size = 0;
4295 			}
4296 			asoc->total_flight = 0;
4297 			asoc->total_flight_count = 0;
4298 			asoc->sent_queue_retran_cnt = 0;
4299 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4300 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4301 					sctp_flight_size_increase(tp1);
4302 					sctp_total_flight_increase(stcb, tp1);
4303 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4304 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4305 				}
4306 			}
4307 		}
4308 		done_once = 1;
4309 		goto again;
4310 	}
4311 	/**********************************/
4312 	/* Now what about shutdown issues */
4313 	/**********************************/
4314 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4315 		/* nothing left on sendqueue.. consider done */
4316 		/* clean up */
4317 		if ((asoc->stream_queue_cnt == 1) &&
4318 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4319 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4320 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4321 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4322 		}
4323 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4324 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4325 		    (asoc->stream_queue_cnt == 1) &&
4326 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4327 			struct mbuf *op_err;
4328 
4329 			*abort_now = 1;
4330 			/* XXX */
4331 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4332 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4333 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4334 			return;
4335 		}
4336 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4337 		    (asoc->stream_queue_cnt == 0)) {
4338 			struct sctp_nets *netp;
4339 
4340 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4341 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4342 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4343 			}
4344 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4345 			sctp_stop_timers_for_shutdown(stcb);
4346 			if (asoc->alternate) {
4347 				netp = asoc->alternate;
4348 			} else {
4349 				netp = asoc->primary_destination;
4350 			}
4351 			sctp_send_shutdown(stcb, netp);
4352 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4353 			    stcb->sctp_ep, stcb, netp);
4354 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4355 			    stcb->sctp_ep, stcb, NULL);
4356 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4357 		    (asoc->stream_queue_cnt == 0)) {
4358 			struct sctp_nets *netp;
4359 
4360 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4361 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4362 			sctp_stop_timers_for_shutdown(stcb);
4363 			if (asoc->alternate) {
4364 				netp = asoc->alternate;
4365 			} else {
4366 				netp = asoc->primary_destination;
4367 			}
4368 			sctp_send_shutdown_ack(stcb, netp);
4369 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4370 			    stcb->sctp_ep, stcb, netp);
4371 		}
4372 	}
4373 	/*********************************************/
4374 	/* Here we perform PR-SCTP procedures        */
4375 	/* (section 4.2)                             */
4376 	/*********************************************/
4377 	/* C1. update advancedPeerAckPoint */
4378 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4379 		asoc->advanced_peer_ack_point = cumack;
4380 	}
4381 	/* PR-Sctp issues need to be addressed too */
4382 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4383 		struct sctp_tmit_chunk *lchk;
4384 		uint32_t old_adv_peer_ack_point;
4385 
4386 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4387 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4388 		/* C3. See if we need to send a Fwd-TSN */
4389 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4390 			/*
4391 			 * ISSUE with ECN, see FWD-TSN processing.
4392 			 */
4393 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4394 				send_forward_tsn(stcb, asoc);
4395 			} else if (lchk) {
4396 				/* try to FR fwd-tsn's that get lost too */
4397 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4398 					send_forward_tsn(stcb, asoc);
4399 				}
4400 			}
4401 		}
4402 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4403 			if (lchk->whoTo != NULL) {
4404 				break;
4405 			}
4406 		}
4407 		if (lchk != NULL) {
4408 			/* Assure a timer is up */
4409 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4410 			    stcb->sctp_ep, stcb, lchk->whoTo);
4411 		}
4412 	}
4413 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4414 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4415 		    rwnd,
4416 		    stcb->asoc.peers_rwnd,
4417 		    stcb->asoc.total_flight,
4418 		    stcb->asoc.total_output_queue_size);
4419 	}
4420 }
4421 
4422 void
4423 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4424     struct sctp_tcb *stcb,
4425     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4426     int *abort_now, uint8_t flags,
4427     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4428 {
4429 	struct sctp_association *asoc;
4430 	struct sctp_tmit_chunk *tp1, *tp2;
4431 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4432 	uint16_t wake_him = 0;
4433 	uint32_t send_s = 0;
4434 	long j;
4435 	int accum_moved = 0;
4436 	int will_exit_fast_recovery = 0;
4437 	uint32_t a_rwnd, old_rwnd;
4438 	int win_probe_recovery = 0;
4439 	int win_probe_recovered = 0;
4440 	struct sctp_nets *net = NULL;
4441 	int done_once;
4442 	int rto_ok = 1;
4443 	uint8_t reneged_all = 0;
4444 	uint8_t cmt_dac_flag;
4445 
4446 	/*
4447 	 * we take any chance we can to service our queues since we cannot
4448 	 * get awoken when the socket is read from :<
4449 	 */
4450 	/*
4451 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4452 	 * old sack, if so discard. 2) If there is nothing left in the send
4453 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4454 	 * too, update any rwnd change and verify no timers are running.
4455 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4456 	 * moved process these first and note that it moved. 4) Process any
4457 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4458 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4459 	 * sync up flightsizes and things, stop all timers and also check
4460 	 * for shutdown_pending state. If so then go ahead and send off the
4461 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4462 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4463 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4464 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4465 	 * if in shutdown_recv state.
4466 	 */
4467 	SCTP_TCB_LOCK_ASSERT(stcb);
4468 	/* CMT DAC algo */
4469 	this_sack_lowest_newack = 0;
4470 	SCTP_STAT_INCR(sctps_slowpath_sack);
4471 	last_tsn = cum_ack;
4472 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4473 #ifdef SCTP_ASOCLOG_OF_TSNS
4474 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4475 	stcb->asoc.cumack_log_at++;
4476 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4477 		stcb->asoc.cumack_log_at = 0;
4478 	}
4479 #endif
4480 	a_rwnd = rwnd;
4481 
4482 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4483 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4484 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4485 	}
4486 
4487 	old_rwnd = stcb->asoc.peers_rwnd;
4488 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4489 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4490 		    stcb->asoc.overall_error_count,
4491 		    0,
4492 		    SCTP_FROM_SCTP_INDATA,
4493 		    __LINE__);
4494 	}
4495 	stcb->asoc.overall_error_count = 0;
4496 	asoc = &stcb->asoc;
4497 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4498 		sctp_log_sack(asoc->last_acked_seq,
4499 		    cum_ack,
4500 		    0,
4501 		    num_seg,
4502 		    num_dup,
4503 		    SCTP_LOG_NEW_SACK);
4504 	}
4505 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4506 		uint16_t i;
4507 		uint32_t *dupdata, dblock;
4508 
4509 		for (i = 0; i < num_dup; i++) {
4510 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4511 			    sizeof(uint32_t), (uint8_t *)&dblock);
4512 			if (dupdata == NULL) {
4513 				break;
4514 			}
4515 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4516 		}
4517 	}
4518 	/* reality check */
4519 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4520 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4521 		    sctpchunk_listhead);
4522 		send_s = tp1->rec.data.tsn + 1;
4523 	} else {
4524 		tp1 = NULL;
4525 		send_s = asoc->sending_seq;
4526 	}
4527 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4528 		struct mbuf *op_err;
4529 		char msg[SCTP_DIAG_INFO_LEN];
4530 
4531 		/*
4532 		 * no way, we have not even sent this TSN out yet. Peer is
4533 		 * hopelessly messed up with us.
4534 		 */
4535 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4536 		    cum_ack, send_s);
4537 		if (tp1) {
4538 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4539 			    tp1->rec.data.tsn, (void *)tp1);
4540 		}
4541 hopeless_peer:
4542 		*abort_now = 1;
4543 		/* XXX */
4544 		SCTP_SNPRINTF(msg, sizeof(msg),
4545 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4546 		    cum_ack, send_s);
4547 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4548 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4549 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4550 		return;
4551 	}
4552 	/**********************/
4553 	/* 1) check the range */
4554 	/**********************/
4555 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4556 		/* acking something behind */
4557 		return;
4558 	}
4559 
4560 	/* update the Rwnd of the peer */
4561 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4562 	    TAILQ_EMPTY(&asoc->send_queue) &&
4563 	    (asoc->stream_queue_cnt == 0)) {
4564 		/* nothing left on send/sent and strmq */
4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4566 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4567 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4568 		}
4569 		asoc->peers_rwnd = a_rwnd;
4570 		if (asoc->sent_queue_retran_cnt) {
4571 			asoc->sent_queue_retran_cnt = 0;
4572 		}
4573 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4574 			/* SWS sender side engages */
4575 			asoc->peers_rwnd = 0;
4576 		}
4577 		/* stop any timers */
4578 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4580 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4581 			net->partial_bytes_acked = 0;
4582 			net->flight_size = 0;
4583 		}
4584 		asoc->total_flight = 0;
4585 		asoc->total_flight_count = 0;
4586 		return;
4587 	}
4588 	/*
4589 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4590 	 * things. The total byte count acked is tracked in netAckSz AND
4591 	 * netAck2 is used to track the total bytes acked that are un-
4592 	 * amibguious and were never retransmitted. We track these on a per
4593 	 * destination address basis.
4594 	 */
4595 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4596 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4597 			/* Drag along the window_tsn for cwr's */
4598 			net->cwr_window_tsn = cum_ack;
4599 		}
4600 		net->prev_cwnd = net->cwnd;
4601 		net->net_ack = 0;
4602 		net->net_ack2 = 0;
4603 
4604 		/*
4605 		 * CMT: Reset CUC and Fast recovery algo variables before
4606 		 * SACK processing
4607 		 */
4608 		net->new_pseudo_cumack = 0;
4609 		net->will_exit_fast_recovery = 0;
4610 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4611 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4612 		}
4613 
4614 		/*
4615 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4616 		 * to be greater than the cumack. Also reset saw_newack to 0
4617 		 * for all dests.
4618 		 */
4619 		net->saw_newack = 0;
4620 		net->this_sack_highest_newack = last_tsn;
4621 	}
4622 	/* process the new consecutive TSN first */
4623 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4624 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4625 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4626 				accum_moved = 1;
4627 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4628 					/*
4629 					 * If it is less than ACKED, it is
4630 					 * now no-longer in flight. Higher
4631 					 * values may occur during marking
4632 					 */
4633 					if ((tp1->whoTo->dest_state &
4634 					    SCTP_ADDR_UNCONFIRMED) &&
4635 					    (tp1->snd_count < 2)) {
4636 						/*
4637 						 * If there was no retran
4638 						 * and the address is
4639 						 * un-confirmed and we sent
4640 						 * there and are now
4641 						 * sacked.. its confirmed,
4642 						 * mark it so.
4643 						 */
4644 						tp1->whoTo->dest_state &=
4645 						    ~SCTP_ADDR_UNCONFIRMED;
4646 					}
4647 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4648 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4649 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4650 							    tp1->whoTo->flight_size,
4651 							    tp1->book_size,
4652 							    (uint32_t)(uintptr_t)tp1->whoTo,
4653 							    tp1->rec.data.tsn);
4654 						}
4655 						sctp_flight_size_decrease(tp1);
4656 						sctp_total_flight_decrease(stcb, tp1);
4657 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4658 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4659 							    tp1);
4660 						}
4661 					}
4662 					tp1->whoTo->net_ack += tp1->send_size;
4663 
4664 					/* CMT SFR and DAC algos */
4665 					this_sack_lowest_newack = tp1->rec.data.tsn;
4666 					tp1->whoTo->saw_newack = 1;
4667 
4668 					if (tp1->snd_count < 2) {
4669 						/*
4670 						 * True non-retransmitted
4671 						 * chunk
4672 						 */
4673 						tp1->whoTo->net_ack2 +=
4674 						    tp1->send_size;
4675 
4676 						/* update RTO too? */
4677 						if (tp1->do_rtt) {
4678 							if (rto_ok &&
4679 							    sctp_calculate_rto(stcb,
4680 							    &stcb->asoc,
4681 							    tp1->whoTo,
4682 							    &tp1->sent_rcv_time,
4683 							    SCTP_RTT_FROM_DATA)) {
4684 								rto_ok = 0;
4685 							}
4686 							if (tp1->whoTo->rto_needed == 0) {
4687 								tp1->whoTo->rto_needed = 1;
4688 							}
4689 							tp1->do_rtt = 0;
4690 						}
4691 					}
4692 					/*
4693 					 * CMT: CUCv2 algorithm. From the
4694 					 * cumack'd TSNs, for each TSN being
4695 					 * acked for the first time, set the
4696 					 * following variables for the
4697 					 * corresp destination.
4698 					 * new_pseudo_cumack will trigger a
4699 					 * cwnd update.
4700 					 * find_(rtx_)pseudo_cumack will
4701 					 * trigger search for the next
4702 					 * expected (rtx-)pseudo-cumack.
4703 					 */
4704 					tp1->whoTo->new_pseudo_cumack = 1;
4705 					tp1->whoTo->find_pseudo_cumack = 1;
4706 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4707 
4708 
4709 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4710 						sctp_log_sack(asoc->last_acked_seq,
4711 						    cum_ack,
4712 						    tp1->rec.data.tsn,
4713 						    0,
4714 						    0,
4715 						    SCTP_LOG_TSN_ACKED);
4716 					}
4717 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4718 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4719 					}
4720 				}
4721 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4722 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4723 #ifdef SCTP_AUDITING_ENABLED
4724 					sctp_audit_log(0xB3,
4725 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4726 #endif
4727 				}
4728 				if (tp1->rec.data.chunk_was_revoked) {
4729 					/* deflate the cwnd */
4730 					tp1->whoTo->cwnd -= tp1->book_size;
4731 					tp1->rec.data.chunk_was_revoked = 0;
4732 				}
4733 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4734 					tp1->sent = SCTP_DATAGRAM_ACKED;
4735 				}
4736 			}
4737 		} else {
4738 			break;
4739 		}
4740 	}
4741 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4742 	/* always set this up to cum-ack */
4743 	asoc->this_sack_highest_gap = last_tsn;
4744 
4745 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4746 
4747 		/*
4748 		 * thisSackHighestGap will increase while handling NEW
4749 		 * segments this_sack_highest_newack will increase while
4750 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4751 		 * used for CMT DAC algo. saw_newack will also change.
4752 		 */
4753 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4754 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4755 		    num_seg, num_nr_seg, &rto_ok)) {
4756 			wake_him++;
4757 		}
4758 		/*
4759 		 * validate the biggest_tsn_acked in the gap acks if strict
4760 		 * adherence is wanted.
4761 		 */
4762 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4763 			/*
4764 			 * peer is either confused or we are under attack.
4765 			 * We must abort.
4766 			 */
4767 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4768 			    biggest_tsn_acked, send_s);
4769 			goto hopeless_peer;
4770 		}
4771 	}
4772 	/*******************************************/
4773 	/* cancel ALL T3-send timer if accum moved */
4774 	/*******************************************/
4775 	if (asoc->sctp_cmt_on_off > 0) {
4776 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4777 			if (net->new_pseudo_cumack)
4778 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4779 				    stcb, net,
4780 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4781 
4782 		}
4783 	} else {
4784 		if (accum_moved) {
4785 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4786 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4787 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4788 			}
4789 		}
4790 	}
4791 	/********************************************/
4792 	/* drop the acked chunks from the sentqueue */
4793 	/********************************************/
4794 	asoc->last_acked_seq = cum_ack;
4795 
4796 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4797 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4798 			break;
4799 		}
4800 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4801 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4802 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4803 #ifdef INVARIANTS
4804 			} else {
4805 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4806 #endif
4807 			}
4808 		}
4809 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4810 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4811 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4812 			asoc->trigger_reset = 1;
4813 		}
4814 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4815 		if (PR_SCTP_ENABLED(tp1->flags)) {
4816 			if (asoc->pr_sctp_cnt != 0)
4817 				asoc->pr_sctp_cnt--;
4818 		}
4819 		asoc->sent_queue_cnt--;
4820 		if (tp1->data) {
4821 			/* sa_ignore NO_NULL_CHK */
4822 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4823 			sctp_m_freem(tp1->data);
4824 			tp1->data = NULL;
4825 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4826 				asoc->sent_queue_cnt_removeable--;
4827 			}
4828 		}
4829 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4830 			sctp_log_sack(asoc->last_acked_seq,
4831 			    cum_ack,
4832 			    tp1->rec.data.tsn,
4833 			    0,
4834 			    0,
4835 			    SCTP_LOG_FREE_SENT);
4836 		}
4837 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4838 		wake_him++;
4839 	}
4840 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4841 #ifdef INVARIANTS
4842 		panic("Warning flight size is positive and should be 0");
4843 #else
4844 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4845 		    asoc->total_flight);
4846 #endif
4847 		asoc->total_flight = 0;
4848 	}
4849 
4850 	/* sa_ignore NO_NULL_CHK */
4851 	if ((wake_him) && (stcb->sctp_socket)) {
4852 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4853 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4854 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4855 		}
4856 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4857 	} else {
4858 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4859 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4860 		}
4861 	}
4862 
4863 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4864 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4865 			/* Setup so we will exit RFC2582 fast recovery */
4866 			will_exit_fast_recovery = 1;
4867 		}
4868 	}
4869 	/*
4870 	 * Check for revoked fragments:
4871 	 *
4872 	 * if Previous sack - Had no frags then we can't have any revoked if
4873 	 * Previous sack - Had frag's then - If we now have frags aka
4874 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4875 	 * some of them. else - The peer revoked all ACKED fragments, since
4876 	 * we had some before and now we have NONE.
4877 	 */
4878 
4879 	if (num_seg) {
4880 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4881 		asoc->saw_sack_with_frags = 1;
4882 	} else if (asoc->saw_sack_with_frags) {
4883 		int cnt_revoked = 0;
4884 
4885 		/* Peer revoked all dg's marked or acked */
4886 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4887 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4888 				tp1->sent = SCTP_DATAGRAM_SENT;
4889 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4890 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4891 					    tp1->whoTo->flight_size,
4892 					    tp1->book_size,
4893 					    (uint32_t)(uintptr_t)tp1->whoTo,
4894 					    tp1->rec.data.tsn);
4895 				}
4896 				sctp_flight_size_increase(tp1);
4897 				sctp_total_flight_increase(stcb, tp1);
4898 				tp1->rec.data.chunk_was_revoked = 1;
4899 				/*
4900 				 * To ensure that this increase in
4901 				 * flightsize, which is artificial, does not
4902 				 * throttle the sender, we also increase the
4903 				 * cwnd artificially.
4904 				 */
4905 				tp1->whoTo->cwnd += tp1->book_size;
4906 				cnt_revoked++;
4907 			}
4908 		}
4909 		if (cnt_revoked) {
4910 			reneged_all = 1;
4911 		}
4912 		asoc->saw_sack_with_frags = 0;
4913 	}
4914 	if (num_nr_seg > 0)
4915 		asoc->saw_sack_with_nr_frags = 1;
4916 	else
4917 		asoc->saw_sack_with_nr_frags = 0;
4918 
4919 	/* JRS - Use the congestion control given in the CC module */
4920 	if (ecne_seen == 0) {
4921 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4922 			if (net->net_ack2 > 0) {
4923 				/*
4924 				 * Karn's rule applies to clearing error
4925 				 * count, this is optional.
4926 				 */
4927 				net->error_count = 0;
4928 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4929 					/* addr came good */
4930 					net->dest_state |= SCTP_ADDR_REACHABLE;
4931 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4932 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4933 				}
4934 
4935 				if (net == stcb->asoc.primary_destination) {
4936 					if (stcb->asoc.alternate) {
4937 						/*
4938 						 * release the alternate,
4939 						 * primary is good
4940 						 */
4941 						sctp_free_remote_addr(stcb->asoc.alternate);
4942 						stcb->asoc.alternate = NULL;
4943 					}
4944 				}
4945 
4946 				if (net->dest_state & SCTP_ADDR_PF) {
4947 					net->dest_state &= ~SCTP_ADDR_PF;
4948 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4949 					    stcb->sctp_ep, stcb, net,
4950 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4951 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4952 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4953 					/* Done with this net */
4954 					net->net_ack = 0;
4955 				}
4956 				/* restore any doubled timers */
4957 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4958 				if (net->RTO < stcb->asoc.minrto) {
4959 					net->RTO = stcb->asoc.minrto;
4960 				}
4961 				if (net->RTO > stcb->asoc.maxrto) {
4962 					net->RTO = stcb->asoc.maxrto;
4963 				}
4964 			}
4965 		}
4966 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4967 	}
4968 
4969 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4970 		/* nothing left in-flight */
4971 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4972 			/* stop all timers */
4973 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4974 			    stcb, net,
4975 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4976 			net->flight_size = 0;
4977 			net->partial_bytes_acked = 0;
4978 		}
4979 		asoc->total_flight = 0;
4980 		asoc->total_flight_count = 0;
4981 	}
4982 
4983 	/**********************************/
4984 	/* Now what about shutdown issues */
4985 	/**********************************/
4986 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4987 		/* nothing left on sendqueue.. consider done */
4988 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4989 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4990 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4991 		}
4992 		asoc->peers_rwnd = a_rwnd;
4993 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 			/* SWS sender side engages */
4995 			asoc->peers_rwnd = 0;
4996 		}
4997 		/* clean up */
4998 		if ((asoc->stream_queue_cnt == 1) &&
4999 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5000 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5001 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5002 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5003 		}
5004 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5005 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5006 		    (asoc->stream_queue_cnt == 1) &&
5007 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5008 			struct mbuf *op_err;
5009 
5010 			*abort_now = 1;
5011 			/* XXX */
5012 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5013 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5014 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5015 			return;
5016 		}
5017 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5018 		    (asoc->stream_queue_cnt == 0)) {
5019 			struct sctp_nets *netp;
5020 
5021 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5022 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5023 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5024 			}
5025 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5026 			sctp_stop_timers_for_shutdown(stcb);
5027 			if (asoc->alternate) {
5028 				netp = asoc->alternate;
5029 			} else {
5030 				netp = asoc->primary_destination;
5031 			}
5032 			sctp_send_shutdown(stcb, netp);
5033 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5034 			    stcb->sctp_ep, stcb, netp);
5035 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5036 			    stcb->sctp_ep, stcb, NULL);
5037 			return;
5038 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5039 		    (asoc->stream_queue_cnt == 0)) {
5040 			struct sctp_nets *netp;
5041 
5042 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5043 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5044 			sctp_stop_timers_for_shutdown(stcb);
5045 			if (asoc->alternate) {
5046 				netp = asoc->alternate;
5047 			} else {
5048 				netp = asoc->primary_destination;
5049 			}
5050 			sctp_send_shutdown_ack(stcb, netp);
5051 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5052 			    stcb->sctp_ep, stcb, netp);
5053 			return;
5054 		}
5055 	}
5056 	/*
5057 	 * Now here we are going to recycle net_ack for a different use...
5058 	 * HEADS UP.
5059 	 */
5060 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5061 		net->net_ack = 0;
5062 	}
5063 
5064 	/*
5065 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5066 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5067 	 * automatically ensure that.
5068 	 */
5069 	if ((asoc->sctp_cmt_on_off > 0) &&
5070 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5071 	    (cmt_dac_flag == 0)) {
5072 		this_sack_lowest_newack = cum_ack;
5073 	}
5074 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5075 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5076 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5077 	}
5078 	/* JRS - Use the congestion control given in the CC module */
5079 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5080 
5081 	/* Now are we exiting loss recovery ? */
5082 	if (will_exit_fast_recovery) {
5083 		/* Ok, we must exit fast recovery */
5084 		asoc->fast_retran_loss_recovery = 0;
5085 	}
5086 	if ((asoc->sat_t3_loss_recovery) &&
5087 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5088 		/* end satellite t3 loss recovery */
5089 		asoc->sat_t3_loss_recovery = 0;
5090 	}
5091 	/*
5092 	 * CMT Fast recovery
5093 	 */
5094 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5095 		if (net->will_exit_fast_recovery) {
5096 			/* Ok, we must exit fast recovery */
5097 			net->fast_retran_loss_recovery = 0;
5098 		}
5099 	}
5100 
5101 	/* Adjust and set the new rwnd value */
5102 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5103 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5104 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5105 	}
5106 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5107 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5108 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5109 		/* SWS sender side engages */
5110 		asoc->peers_rwnd = 0;
5111 	}
5112 	if (asoc->peers_rwnd > old_rwnd) {
5113 		win_probe_recovery = 1;
5114 	}
5115 
5116 	/*
5117 	 * Now we must setup so we have a timer up for anyone with
5118 	 * outstanding data.
5119 	 */
5120 	done_once = 0;
5121 again:
5122 	j = 0;
5123 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5124 		if (win_probe_recovery && (net->window_probe)) {
5125 			win_probe_recovered = 1;
5126 			/*-
5127 			 * Find first chunk that was used with
5128 			 * window probe and clear the event. Put
5129 			 * it back into the send queue as if has
5130 			 * not been sent.
5131 			 */
5132 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5133 				if (tp1->window_probe) {
5134 					sctp_window_probe_recovery(stcb, asoc, tp1);
5135 					break;
5136 				}
5137 			}
5138 		}
5139 		if (net->flight_size) {
5140 			j++;
5141 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5142 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5143 				    stcb->sctp_ep, stcb, net);
5144 			}
5145 			if (net->window_probe) {
5146 				net->window_probe = 0;
5147 			}
5148 		} else {
5149 			if (net->window_probe) {
5150 				/*
5151 				 * In window probes we must assure a timer
5152 				 * is still running there
5153 				 */
5154 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5155 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5156 					    stcb->sctp_ep, stcb, net);
5157 
5158 				}
5159 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5160 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5161 				    stcb, net,
5162 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5163 			}
5164 		}
5165 	}
5166 	if ((j == 0) &&
5167 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5168 	    (asoc->sent_queue_retran_cnt == 0) &&
5169 	    (win_probe_recovered == 0) &&
5170 	    (done_once == 0)) {
5171 		/*
5172 		 * huh, this should not happen unless all packets are
5173 		 * PR-SCTP and marked to skip of course.
5174 		 */
5175 		if (sctp_fs_audit(asoc)) {
5176 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5177 				net->flight_size = 0;
5178 			}
5179 			asoc->total_flight = 0;
5180 			asoc->total_flight_count = 0;
5181 			asoc->sent_queue_retran_cnt = 0;
5182 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5183 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5184 					sctp_flight_size_increase(tp1);
5185 					sctp_total_flight_increase(stcb, tp1);
5186 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5187 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5188 				}
5189 			}
5190 		}
5191 		done_once = 1;
5192 		goto again;
5193 	}
5194 	/*********************************************/
5195 	/* Here we perform PR-SCTP procedures        */
5196 	/* (section 4.2)                             */
5197 	/*********************************************/
5198 	/* C1. update advancedPeerAckPoint */
5199 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5200 		asoc->advanced_peer_ack_point = cum_ack;
5201 	}
5202 	/* C2. try to further move advancedPeerAckPoint ahead */
5203 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5204 		struct sctp_tmit_chunk *lchk;
5205 		uint32_t old_adv_peer_ack_point;
5206 
5207 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5208 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5209 		/* C3. See if we need to send a Fwd-TSN */
5210 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5211 			/*
5212 			 * ISSUE with ECN, see FWD-TSN processing.
5213 			 */
5214 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5215 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5216 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5217 				    old_adv_peer_ack_point);
5218 			}
5219 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5220 				send_forward_tsn(stcb, asoc);
5221 			} else if (lchk) {
5222 				/* try to FR fwd-tsn's that get lost too */
5223 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5224 					send_forward_tsn(stcb, asoc);
5225 				}
5226 			}
5227 		}
5228 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5229 			if (lchk->whoTo != NULL) {
5230 				break;
5231 			}
5232 		}
5233 		if (lchk != NULL) {
5234 			/* Assure a timer is up */
5235 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5236 			    stcb->sctp_ep, stcb, lchk->whoTo);
5237 		}
5238 	}
5239 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5240 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5241 		    a_rwnd,
5242 		    stcb->asoc.peers_rwnd,
5243 		    stcb->asoc.total_flight,
5244 		    stcb->asoc.total_output_queue_size);
5245 	}
5246 }
5247 
5248 void
5249 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5250 {
5251 	/* Copy cum-ack */
5252 	uint32_t cum_ack, a_rwnd;
5253 
5254 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5255 	/* Arrange so a_rwnd does NOT change */
5256 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5257 
5258 	/* Now call the express sack handling */
5259 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5260 }
5261 
5262 static void
5263 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5264     struct sctp_stream_in *strmin)
5265 {
5266 	struct sctp_queued_to_read *control, *ncontrol;
5267 	struct sctp_association *asoc;
5268 	uint32_t mid;
5269 	int need_reasm_check = 0;
5270 
5271 	asoc = &stcb->asoc;
5272 	mid = strmin->last_mid_delivered;
5273 	/*
5274 	 * First deliver anything prior to and including the stream no that
5275 	 * came in.
5276 	 */
5277 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5278 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5279 			/* this is deliverable now */
5280 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5281 				if (control->on_strm_q) {
5282 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5283 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5284 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5285 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5286 #ifdef INVARIANTS
5287 					} else {
5288 						panic("strmin: %p ctl: %p unknown %d",
5289 						    strmin, control, control->on_strm_q);
5290 #endif
5291 					}
5292 					control->on_strm_q = 0;
5293 				}
5294 				/* subtract pending on streams */
5295 				if (asoc->size_on_all_streams >= control->length) {
5296 					asoc->size_on_all_streams -= control->length;
5297 				} else {
5298 #ifdef INVARIANTS
5299 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5300 #else
5301 					asoc->size_on_all_streams = 0;
5302 #endif
5303 				}
5304 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5305 				/* deliver it to at least the delivery-q */
5306 				if (stcb->sctp_socket) {
5307 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5308 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5309 					    control,
5310 					    &stcb->sctp_socket->so_rcv,
5311 					    1, SCTP_READ_LOCK_HELD,
5312 					    SCTP_SO_NOT_LOCKED);
5313 				}
5314 			} else {
5315 				/* Its a fragmented message */
5316 				if (control->first_frag_seen) {
5317 					/*
5318 					 * Make it so this is next to
5319 					 * deliver, we restore later
5320 					 */
5321 					strmin->last_mid_delivered = control->mid - 1;
5322 					need_reasm_check = 1;
5323 					break;
5324 				}
5325 			}
5326 		} else {
5327 			/* no more delivery now. */
5328 			break;
5329 		}
5330 	}
5331 	if (need_reasm_check) {
5332 		int ret;
5333 
5334 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5335 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5336 			/* Restore the next to deliver unless we are ahead */
5337 			strmin->last_mid_delivered = mid;
5338 		}
5339 		if (ret == 0) {
5340 			/* Left the front Partial one on */
5341 			return;
5342 		}
5343 		need_reasm_check = 0;
5344 	}
5345 	/*
5346 	 * now we must deliver things in queue the normal way  if any are
5347 	 * now ready.
5348 	 */
5349 	mid = strmin->last_mid_delivered + 1;
5350 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5351 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5352 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5353 				/* this is deliverable now */
5354 				if (control->on_strm_q) {
5355 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5356 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5357 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5358 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5359 #ifdef INVARIANTS
5360 					} else {
5361 						panic("strmin: %p ctl: %p unknown %d",
5362 						    strmin, control, control->on_strm_q);
5363 #endif
5364 					}
5365 					control->on_strm_q = 0;
5366 				}
5367 				/* subtract pending on streams */
5368 				if (asoc->size_on_all_streams >= control->length) {
5369 					asoc->size_on_all_streams -= control->length;
5370 				} else {
5371 #ifdef INVARIANTS
5372 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5373 #else
5374 					asoc->size_on_all_streams = 0;
5375 #endif
5376 				}
5377 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5378 				/* deliver it to at least the delivery-q */
5379 				strmin->last_mid_delivered = control->mid;
5380 				if (stcb->sctp_socket) {
5381 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5382 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5383 					    control,
5384 					    &stcb->sctp_socket->so_rcv, 1,
5385 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5386 
5387 				}
5388 				mid = strmin->last_mid_delivered + 1;
5389 			} else {
5390 				/* Its a fragmented message */
5391 				if (control->first_frag_seen) {
5392 					/*
5393 					 * Make it so this is next to
5394 					 * deliver
5395 					 */
5396 					strmin->last_mid_delivered = control->mid - 1;
5397 					need_reasm_check = 1;
5398 					break;
5399 				}
5400 			}
5401 		} else {
5402 			break;
5403 		}
5404 	}
5405 	if (need_reasm_check) {
5406 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5407 	}
5408 }
5409 
5410 
5411 
5412 static void
5413 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5414     struct sctp_association *asoc,
5415     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5416 {
5417 	struct sctp_queued_to_read *control;
5418 	struct sctp_stream_in *strm;
5419 	struct sctp_tmit_chunk *chk, *nchk;
5420 	int cnt_removed = 0;
5421 
5422 	/*
5423 	 * For now large messages held on the stream reasm that are complete
5424 	 * will be tossed too. We could in theory do more work to spin
5425 	 * through and stop after dumping one msg aka seeing the start of a
5426 	 * new msg at the head, and call the delivery function... to see if
5427 	 * it can be delivered... But for now we just dump everything on the
5428 	 * queue.
5429 	 */
5430 	strm = &asoc->strmin[stream];
5431 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5432 	if (control == NULL) {
5433 		/* Not found */
5434 		return;
5435 	}
5436 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5437 		return;
5438 	}
5439 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5440 		/* Purge hanging chunks */
5441 		if (!asoc->idata_supported && (ordered == 0)) {
5442 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5443 				break;
5444 			}
5445 		}
5446 		cnt_removed++;
5447 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5448 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5449 			asoc->size_on_reasm_queue -= chk->send_size;
5450 		} else {
5451 #ifdef INVARIANTS
5452 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5453 #else
5454 			asoc->size_on_reasm_queue = 0;
5455 #endif
5456 		}
5457 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5458 		if (chk->data) {
5459 			sctp_m_freem(chk->data);
5460 			chk->data = NULL;
5461 		}
5462 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5463 	}
5464 	if (!TAILQ_EMPTY(&control->reasm)) {
5465 		/* This has to be old data, unordered */
5466 		if (control->data) {
5467 			sctp_m_freem(control->data);
5468 			control->data = NULL;
5469 		}
5470 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5471 		chk = TAILQ_FIRST(&control->reasm);
5472 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5473 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5474 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5475 			    chk, SCTP_READ_LOCK_HELD);
5476 		}
5477 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5478 		return;
5479 	}
5480 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5481 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5482 		if (asoc->size_on_all_streams >= control->length) {
5483 			asoc->size_on_all_streams -= control->length;
5484 		} else {
5485 #ifdef INVARIANTS
5486 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5487 #else
5488 			asoc->size_on_all_streams = 0;
5489 #endif
5490 		}
5491 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5492 		control->on_strm_q = 0;
5493 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5494 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5495 		control->on_strm_q = 0;
5496 #ifdef INVARIANTS
5497 	} else if (control->on_strm_q) {
5498 		panic("strm: %p ctl: %p unknown %d",
5499 		    strm, control, control->on_strm_q);
5500 #endif
5501 	}
5502 	control->on_strm_q = 0;
5503 	if (control->on_read_q == 0) {
5504 		sctp_free_remote_addr(control->whoFrom);
5505 		if (control->data) {
5506 			sctp_m_freem(control->data);
5507 			control->data = NULL;
5508 		}
5509 		sctp_free_a_readq(stcb, control);
5510 	}
5511 }
5512 
5513 void
5514 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5515     struct sctp_forward_tsn_chunk *fwd,
5516     int *abort_flag, struct mbuf *m, int offset)
5517 {
5518 	/* The pr-sctp fwd tsn */
5519 	/*
5520 	 * here we will perform all the data receiver side steps for
5521 	 * processing FwdTSN, as required in by pr-sctp draft:
5522 	 *
5523 	 * Assume we get FwdTSN(x):
5524 	 *
5525 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5526 	 * + others we have 3) examine and update re-ordering queue on
5527 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5528 	 * report where we are.
5529 	 */
5530 	struct sctp_association *asoc;
5531 	uint32_t new_cum_tsn, gap;
5532 	unsigned int i, fwd_sz, m_size;
5533 	uint32_t str_seq;
5534 	struct sctp_stream_in *strm;
5535 	struct sctp_queued_to_read *control, *sv;
5536 
5537 	asoc = &stcb->asoc;
5538 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5539 		SCTPDBG(SCTP_DEBUG_INDATA1,
5540 		    "Bad size too small/big fwd-tsn\n");
5541 		return;
5542 	}
5543 	m_size = (stcb->asoc.mapping_array_size << 3);
5544 	/*************************************************************/
5545 	/* 1. Here we update local cumTSN and shift the bitmap array */
5546 	/*************************************************************/
5547 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5548 
5549 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5550 		/* Already got there ... */
5551 		return;
5552 	}
5553 	/*
5554 	 * now we know the new TSN is more advanced, let's find the actual
5555 	 * gap
5556 	 */
5557 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5558 	asoc->cumulative_tsn = new_cum_tsn;
5559 	if (gap >= m_size) {
5560 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5561 			struct mbuf *op_err;
5562 			char msg[SCTP_DIAG_INFO_LEN];
5563 
5564 			/*
5565 			 * out of range (of single byte chunks in the rwnd I
5566 			 * give out). This must be an attacker.
5567 			 */
5568 			*abort_flag = 1;
5569 			SCTP_SNPRINTF(msg, sizeof(msg),
5570 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5571 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5572 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5573 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5574 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5575 			return;
5576 		}
5577 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5578 
5579 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5580 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5581 		asoc->highest_tsn_inside_map = new_cum_tsn;
5582 
5583 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5584 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5585 
5586 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5587 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5588 		}
5589 	} else {
5590 		SCTP_TCB_LOCK_ASSERT(stcb);
5591 		for (i = 0; i <= gap; i++) {
5592 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5593 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5594 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5595 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5596 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5597 				}
5598 			}
5599 		}
5600 	}
5601 	/*************************************************************/
5602 	/* 2. Clear up re-assembly queue                             */
5603 	/*************************************************************/
5604 
5605 	/* This is now done as part of clearing up the stream/seq */
5606 	if (asoc->idata_supported == 0) {
5607 		uint16_t sid;
5608 
5609 		/* Flush all the un-ordered data based on cum-tsn */
5610 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5611 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5612 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5613 		}
5614 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5615 	}
5616 	/*******************************************************/
5617 	/* 3. Update the PR-stream re-ordering queues and fix  */
5618 	/* delivery issues as needed.                       */
5619 	/*******************************************************/
5620 	fwd_sz -= sizeof(*fwd);
5621 	if (m && fwd_sz) {
5622 		/* New method. */
5623 		unsigned int num_str;
5624 		uint32_t mid, cur_mid;
5625 		uint16_t sid;
5626 		uint16_t ordered, flags;
5627 		struct sctp_strseq *stseq, strseqbuf;
5628 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5629 
5630 		offset += sizeof(*fwd);
5631 
5632 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5633 		if (asoc->idata_supported) {
5634 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5635 		} else {
5636 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5637 		}
5638 		for (i = 0; i < num_str; i++) {
5639 			if (asoc->idata_supported) {
5640 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5641 				    sizeof(struct sctp_strseq_mid),
5642 				    (uint8_t *)&strseqbuf_m);
5643 				offset += sizeof(struct sctp_strseq_mid);
5644 				if (stseq_m == NULL) {
5645 					break;
5646 				}
5647 				sid = ntohs(stseq_m->sid);
5648 				mid = ntohl(stseq_m->mid);
5649 				flags = ntohs(stseq_m->flags);
5650 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5651 					ordered = 0;
5652 				} else {
5653 					ordered = 1;
5654 				}
5655 			} else {
5656 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5657 				    sizeof(struct sctp_strseq),
5658 				    (uint8_t *)&strseqbuf);
5659 				offset += sizeof(struct sctp_strseq);
5660 				if (stseq == NULL) {
5661 					break;
5662 				}
5663 				sid = ntohs(stseq->sid);
5664 				mid = (uint32_t)ntohs(stseq->ssn);
5665 				ordered = 1;
5666 			}
5667 			/* Convert */
5668 
5669 			/* now process */
5670 
5671 			/*
5672 			 * Ok we now look for the stream/seq on the read
5673 			 * queue where its not all delivered. If we find it
5674 			 * we transmute the read entry into a PDI_ABORTED.
5675 			 */
5676 			if (sid >= asoc->streamincnt) {
5677 				/* screwed up streams, stop!  */
5678 				break;
5679 			}
5680 			if ((asoc->str_of_pdapi == sid) &&
5681 			    (asoc->ssn_of_pdapi == mid)) {
5682 				/*
5683 				 * If this is the one we were partially
5684 				 * delivering now then we no longer are.
5685 				 * Note this will change with the reassembly
5686 				 * re-write.
5687 				 */
5688 				asoc->fragmented_delivery_inprogress = 0;
5689 			}
5690 			strm = &asoc->strmin[sid];
5691 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5692 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5693 			}
5694 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5695 				if ((control->sinfo_stream == sid) &&
5696 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5697 					str_seq = (sid << 16) | (0x0000ffff & mid);
5698 					control->pdapi_aborted = 1;
5699 					sv = stcb->asoc.control_pdapi;
5700 					control->end_added = 1;
5701 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5702 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5703 						if (asoc->size_on_all_streams >= control->length) {
5704 							asoc->size_on_all_streams -= control->length;
5705 						} else {
5706 #ifdef INVARIANTS
5707 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5708 #else
5709 							asoc->size_on_all_streams = 0;
5710 #endif
5711 						}
5712 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5713 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5714 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5715 #ifdef INVARIANTS
5716 					} else if (control->on_strm_q) {
5717 						panic("strm: %p ctl: %p unknown %d",
5718 						    strm, control, control->on_strm_q);
5719 #endif
5720 					}
5721 					control->on_strm_q = 0;
5722 					stcb->asoc.control_pdapi = control;
5723 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5724 					    stcb,
5725 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5726 					    (void *)&str_seq,
5727 					    SCTP_SO_NOT_LOCKED);
5728 					stcb->asoc.control_pdapi = sv;
5729 					break;
5730 				} else if ((control->sinfo_stream == sid) &&
5731 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5732 					/* We are past our victim SSN */
5733 					break;
5734 				}
5735 			}
5736 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5737 				/* Update the sequence number */
5738 				strm->last_mid_delivered = mid;
5739 			}
5740 			/* now kick the stream the new way */
5741 			/* sa_ignore NO_NULL_CHK */
5742 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5743 		}
5744 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5745 	}
5746 	/*
5747 	 * Now slide thing forward.
5748 	 */
5749 	sctp_slide_mapping_arrays(stcb);
5750 }
5751