xref: /freebsd/sys/netinet/sctp_indata.c (revision 767173cec2b2041e1f847bc8896092f9c1481242)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		SCTP_SNPRINTF(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid);
444 	} else {
445 		SCTP_SNPRINTF(msg, sizeof(msg),
446 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
447 		    opspot,
448 		    control->fsn_included,
449 		    chk->rec.data.tsn,
450 		    chk->rec.data.sid,
451 		    chk->rec.data.fsn,
452 		    (uint16_t)chk->rec.data.mid);
453 	}
454 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 	sctp_m_freem(chk->data);
456 	chk->data = NULL;
457 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 	*abort_flag = 1;
461 }
462 
463 static void
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
465 {
466 	/*
467 	 * The control could not be placed and must be cleaned.
468 	 */
469 	struct sctp_tmit_chunk *chk, *nchk;
470 
471 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
473 		if (chk->data)
474 			sctp_m_freem(chk->data);
475 		chk->data = NULL;
476 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 	}
478 	sctp_free_remote_addr(control->whoFrom);
479 	if (control->data) {
480 		sctp_m_freem(control->data);
481 		control->data = NULL;
482 	}
483 	sctp_free_a_readq(stcb, control);
484 }
485 
486 /*
487  * Queue the chunk either right into the socket buffer if it is the next one
488  * to go OR put it in the correct place in the delivery queue.  If we do
489  * append to the so_buf, keep doing so until we are out of order as
490  * long as the control's entered are non-fragmented.
491  */
492 static void
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494     struct sctp_association *asoc,
495     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
496 {
497 	/*
498 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 	 * all the data in one stream this could happen quite rapidly. One
500 	 * could use the TSN to keep track of things, but this scheme breaks
501 	 * down in the other type of stream usage that could occur. Send a
502 	 * single msg to stream 0, send 4Billion messages to stream 1, now
503 	 * send a message to stream 0. You have a situation where the TSN
504 	 * has wrapped but not in the stream. Is this worth worrying about
505 	 * or should we just change our queue sort at the bottom to be by
506 	 * TSN.
507 	 *
508 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 	 * assignment this could happen... and I don't see how this would be
511 	 * a violation. So for now I am undecided an will leave the sort by
512 	 * SSN alone. Maybe a hybred approach is the answer
513 	 *
514 	 */
515 	struct sctp_queued_to_read *at;
516 	int queue_needed;
517 	uint32_t nxt_todel;
518 	struct mbuf *op_err;
519 	struct sctp_stream_in *strm;
520 	char msg[SCTP_DIAG_INFO_LEN];
521 
522 	strm = &asoc->strmin[control->sinfo_stream];
523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
525 	}
526 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 		/* The incoming sseq is behind where we last delivered? */
528 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 		    strm->last_mid_delivered, control->mid);
530 		/*
531 		 * throw it in the stream so it gets cleaned up in
532 		 * association destruction
533 		 */
534 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 		if (asoc->idata_supported) {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 			    strm->last_mid_delivered, control->sinfo_tsn,
538 			    control->sinfo_stream, control->mid);
539 		} else {
540 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 			    (uint16_t)strm->last_mid_delivered,
542 			    control->sinfo_tsn,
543 			    control->sinfo_stream,
544 			    (uint16_t)control->mid);
545 		}
546 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 		*abort_flag = 1;
550 		return;
551 
552 	}
553 	queue_needed = 1;
554 	asoc->size_on_all_streams += control->length;
555 	sctp_ucount_incr(asoc->cnt_on_all_streams);
556 	nxt_todel = strm->last_mid_delivered + 1;
557 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 		/* can be delivered right away? */
559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 		}
562 		/* EY it wont be queued if it could be delivered directly */
563 		queue_needed = 0;
564 		if (asoc->size_on_all_streams >= control->length) {
565 			asoc->size_on_all_streams -= control->length;
566 		} else {
567 #ifdef INVARIANTS
568 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
569 #else
570 			asoc->size_on_all_streams = 0;
571 #endif
572 		}
573 		sctp_ucount_decr(asoc->cnt_on_all_streams);
574 		strm->last_mid_delivered++;
575 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 		sctp_add_to_readq(stcb->sctp_ep, stcb,
577 		    control,
578 		    &stcb->sctp_socket->so_rcv, 1,
579 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
581 			/* all delivered */
582 			nxt_todel = strm->last_mid_delivered + 1;
583 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 				if (control->on_strm_q == SCTP_ON_ORDERED) {
586 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 					if (asoc->size_on_all_streams >= control->length) {
588 						asoc->size_on_all_streams -= control->length;
589 					} else {
590 #ifdef INVARIANTS
591 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
592 #else
593 						asoc->size_on_all_streams = 0;
594 #endif
595 					}
596 					sctp_ucount_decr(asoc->cnt_on_all_streams);
597 #ifdef INVARIANTS
598 				} else {
599 					panic("Huh control: %p is on_strm_q: %d",
600 					    control, control->on_strm_q);
601 #endif
602 				}
603 				control->on_strm_q = 0;
604 				strm->last_mid_delivered++;
605 				/*
606 				 * We ignore the return of deliver_data here
607 				 * since we always can hold the chunk on the
608 				 * d-queue. And we have a finite number that
609 				 * can be delivered from the strq.
610 				 */
611 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 					sctp_log_strm_del(control, NULL,
613 					    SCTP_STR_LOG_FROM_IMMED_DEL);
614 				}
615 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 				sctp_add_to_readq(stcb->sctp_ep, stcb,
617 				    control,
618 				    &stcb->sctp_socket->so_rcv, 1,
619 				    SCTP_READ_LOCK_NOT_HELD,
620 				    SCTP_SO_LOCKED);
621 				continue;
622 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
623 				*need_reasm = 1;
624 			}
625 			break;
626 		}
627 	}
628 	if (queue_needed) {
629 		/*
630 		 * Ok, we did not deliver this guy, find the correct place
631 		 * to put it on the queue.
632 		 */
633 		if (sctp_place_control_in_stream(strm, asoc, control)) {
634 			SCTP_SNPRINTF(msg, sizeof(msg),
635 			    "Queue to str MID: %u duplicate", control->mid);
636 			sctp_clean_up_control(stcb, control);
637 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
640 			*abort_flag = 1;
641 		}
642 	}
643 }
644 
645 
646 static void
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
648 {
649 	struct mbuf *m, *prev = NULL;
650 	struct sctp_tcb *stcb;
651 
652 	stcb = control->stcb;
653 	control->held_length = 0;
654 	control->length = 0;
655 	m = control->data;
656 	while (m) {
657 		if (SCTP_BUF_LEN(m) == 0) {
658 			/* Skip mbufs with NO length */
659 			if (prev == NULL) {
660 				/* First one */
661 				control->data = sctp_m_free(m);
662 				m = control->data;
663 			} else {
664 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 				m = SCTP_BUF_NEXT(prev);
666 			}
667 			if (m == NULL) {
668 				control->tail_mbuf = prev;
669 			}
670 			continue;
671 		}
672 		prev = m;
673 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 		if (control->on_read_q) {
675 			/*
676 			 * On read queue so we must increment the SB stuff,
677 			 * we assume caller has done any locks of SB.
678 			 */
679 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
680 		}
681 		m = SCTP_BUF_NEXT(m);
682 	}
683 	if (prev) {
684 		control->tail_mbuf = prev;
685 	}
686 }
687 
688 static void
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
690 {
691 	struct mbuf *prev = NULL;
692 	struct sctp_tcb *stcb;
693 
694 	stcb = control->stcb;
695 	if (stcb == NULL) {
696 #ifdef INVARIANTS
697 		panic("Control broken");
698 #else
699 		return;
700 #endif
701 	}
702 	if (control->tail_mbuf == NULL) {
703 		/* TSNH */
704 		sctp_m_freem(control->data);
705 		control->data = m;
706 		sctp_setup_tail_pointer(control);
707 		return;
708 	}
709 	control->tail_mbuf->m_next = m;
710 	while (m) {
711 		if (SCTP_BUF_LEN(m) == 0) {
712 			/* Skip mbufs with NO length */
713 			if (prev == NULL) {
714 				/* First one */
715 				control->tail_mbuf->m_next = sctp_m_free(m);
716 				m = control->tail_mbuf->m_next;
717 			} else {
718 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 				m = SCTP_BUF_NEXT(prev);
720 			}
721 			if (m == NULL) {
722 				control->tail_mbuf = prev;
723 			}
724 			continue;
725 		}
726 		prev = m;
727 		if (control->on_read_q) {
728 			/*
729 			 * On read queue so we must increment the SB stuff,
730 			 * we assume caller has done any locks of SB.
731 			 */
732 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
733 		}
734 		*added += SCTP_BUF_LEN(m);
735 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 		m = SCTP_BUF_NEXT(m);
737 	}
738 	if (prev) {
739 		control->tail_mbuf = prev;
740 	}
741 }
742 
743 static void
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
745 {
746 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 	nc->sinfo_stream = control->sinfo_stream;
748 	nc->mid = control->mid;
749 	TAILQ_INIT(&nc->reasm);
750 	nc->top_fsn = control->top_fsn;
751 	nc->mid = control->mid;
752 	nc->sinfo_flags = control->sinfo_flags;
753 	nc->sinfo_ppid = control->sinfo_ppid;
754 	nc->sinfo_context = control->sinfo_context;
755 	nc->fsn_included = 0xffffffff;
756 	nc->sinfo_tsn = control->sinfo_tsn;
757 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 	nc->whoFrom = control->whoFrom;
760 	atomic_add_int(&nc->whoFrom->ref_count, 1);
761 	nc->stcb = control->stcb;
762 	nc->port_from = control->port_from;
763 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
764 }
765 
766 static void
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768     struct sctp_inpcb *inp, uint32_t tsn)
769 {
770 	control->fsn_included = tsn;
771 	if (control->on_read_q) {
772 		/*
773 		 * We have to purge it from there, hopefully this will work
774 		 * :-)
775 		 */
776 		TAILQ_REMOVE(&inp->read_queue, control, next);
777 		control->on_read_q = 0;
778 	}
779 }
780 
781 static int
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783     struct sctp_association *asoc,
784     struct sctp_stream_in *strm,
785     struct sctp_queued_to_read *control,
786     uint32_t pd_point,
787     int inp_read_lock_held)
788 {
789 	/*
790 	 * Special handling for the old un-ordered data chunk. All the
791 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 	 * to see if we have it all. If you return one, no other control
793 	 * entries on the un-ordered queue will be looked at. In theory
794 	 * there should be no others entries in reality, unless the guy is
795 	 * sending both unordered NDATA and unordered DATA...
796 	 */
797 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
798 	uint32_t fsn;
799 	struct sctp_queued_to_read *nc;
800 	int cnt_added;
801 
802 	if (control->first_frag_seen == 0) {
803 		/* Nothing we can do, we have not seen the first piece yet */
804 		return (1);
805 	}
806 	/* Collapse any we can */
807 	cnt_added = 0;
808 restart:
809 	fsn = control->fsn_included + 1;
810 	/* Now what can we add? */
811 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 		if (chk->rec.data.fsn == fsn) {
813 			/* Ok lets add it */
814 			sctp_alloc_a_readq(stcb, nc);
815 			if (nc == NULL) {
816 				break;
817 			}
818 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
821 			fsn++;
822 			cnt_added++;
823 			chk = NULL;
824 			if (control->end_added) {
825 				/* We are done */
826 				if (!TAILQ_EMPTY(&control->reasm)) {
827 					/*
828 					 * Ok we have to move anything left
829 					 * on the control queue to a new
830 					 * control.
831 					 */
832 					sctp_build_readq_entry_from_ctl(nc, control);
833 					tchk = TAILQ_FIRST(&control->reasm);
834 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 							asoc->size_on_reasm_queue -= tchk->send_size;
838 						} else {
839 #ifdef INVARIANTS
840 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
841 #else
842 							asoc->size_on_reasm_queue = 0;
843 #endif
844 						}
845 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 						nc->first_frag_seen = 1;
847 						nc->fsn_included = tchk->rec.data.fsn;
848 						nc->data = tchk->data;
849 						nc->sinfo_ppid = tchk->rec.data.ppid;
850 						nc->sinfo_tsn = tchk->rec.data.tsn;
851 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
852 						tchk->data = NULL;
853 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 						sctp_setup_tail_pointer(nc);
855 						tchk = TAILQ_FIRST(&control->reasm);
856 					}
857 					/* Spin the rest onto the queue */
858 					while (tchk) {
859 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 						tchk = TAILQ_FIRST(&control->reasm);
862 					}
863 					/*
864 					 * Now lets add it to the queue
865 					 * after removing control
866 					 */
867 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 					nc->on_strm_q = SCTP_ON_UNORDERED;
869 					if (control->on_strm_q) {
870 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 						control->on_strm_q = 0;
872 					}
873 				}
874 				if (control->pdapi_started) {
875 					strm->pd_api_started = 0;
876 					control->pdapi_started = 0;
877 				}
878 				if (control->on_strm_q) {
879 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 					control->on_strm_q = 0;
881 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882 				}
883 				if (control->on_read_q == 0) {
884 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 					    &stcb->sctp_socket->so_rcv, control->end_added,
886 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 				}
888 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
890 					/*
891 					 * Switch to the new guy and
892 					 * continue
893 					 */
894 					control = nc;
895 					goto restart;
896 				} else {
897 					if (nc->on_strm_q == 0) {
898 						sctp_free_a_readq(stcb, nc);
899 					}
900 				}
901 				return (1);
902 			} else {
903 				sctp_free_a_readq(stcb, nc);
904 			}
905 		} else {
906 			/* Can't add more */
907 			break;
908 		}
909 	}
910 	if (cnt_added && strm->pd_api_started) {
911 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
912 	}
913 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 		strm->pd_api_started = 1;
915 		control->pdapi_started = 1;
916 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 		    &stcb->sctp_socket->so_rcv, control->end_added,
918 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 		return (0);
921 	} else {
922 		return (1);
923 	}
924 }
925 
926 static void
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928     struct sctp_association *asoc,
929     struct sctp_queued_to_read *control,
930     struct sctp_tmit_chunk *chk,
931     int *abort_flag)
932 {
933 	struct sctp_tmit_chunk *at;
934 	int inserted;
935 
936 	/*
937 	 * Here we need to place the chunk into the control structure sorted
938 	 * in the correct order.
939 	 */
940 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 		/* Its the very first one. */
942 		SCTPDBG(SCTP_DEBUG_XXX,
943 		    "chunk is a first fsn: %u becomes fsn_included\n",
944 		    chk->rec.data.fsn);
945 		at = TAILQ_FIRST(&control->reasm);
946 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
947 			/*
948 			 * The first chunk in the reassembly is a smaller
949 			 * TSN than this one, even though this has a first,
950 			 * it must be from a subsequent msg.
951 			 */
952 			goto place_chunk;
953 		}
954 		if (control->first_frag_seen) {
955 			/*
956 			 * In old un-ordered we can reassembly on one
957 			 * control multiple messages. As long as the next
958 			 * FIRST is greater then the old first (TSN i.e. FSN
959 			 * wise)
960 			 */
961 			struct mbuf *tdata;
962 			uint32_t tmp;
963 
964 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
965 				/*
966 				 * Easy way the start of a new guy beyond
967 				 * the lowest
968 				 */
969 				goto place_chunk;
970 			}
971 			if ((chk->rec.data.fsn == control->fsn_included) ||
972 			    (control->pdapi_started)) {
973 				/*
974 				 * Ok this should not happen, if it does we
975 				 * started the pd-api on the higher TSN
976 				 * (since the equals part is a TSN failure
977 				 * it must be that).
978 				 *
979 				 * We are completly hosed in that case since
980 				 * I have no way to recover. This really
981 				 * will only happen if we can get more TSN's
982 				 * higher before the pd-api-point.
983 				 */
984 				sctp_abort_in_reasm(stcb, control, chk,
985 				    abort_flag,
986 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
987 
988 				return;
989 			}
990 			/*
991 			 * Ok we have two firsts and the one we just got is
992 			 * smaller than the one we previously placed.. yuck!
993 			 * We must swap them out.
994 			 */
995 			/* swap the mbufs */
996 			tdata = control->data;
997 			control->data = chk->data;
998 			chk->data = tdata;
999 			/* Save the lengths */
1000 			chk->send_size = control->length;
1001 			/* Recompute length of control and tail pointer */
1002 			sctp_setup_tail_pointer(control);
1003 			/* Fix the FSN included */
1004 			tmp = control->fsn_included;
1005 			control->fsn_included = chk->rec.data.fsn;
1006 			chk->rec.data.fsn = tmp;
1007 			/* Fix the TSN included */
1008 			tmp = control->sinfo_tsn;
1009 			control->sinfo_tsn = chk->rec.data.tsn;
1010 			chk->rec.data.tsn = tmp;
1011 			/* Fix the PPID included */
1012 			tmp = control->sinfo_ppid;
1013 			control->sinfo_ppid = chk->rec.data.ppid;
1014 			chk->rec.data.ppid = tmp;
1015 			/* Fix tail pointer */
1016 			goto place_chunk;
1017 		}
1018 		control->first_frag_seen = 1;
1019 		control->fsn_included = chk->rec.data.fsn;
1020 		control->top_fsn = chk->rec.data.fsn;
1021 		control->sinfo_tsn = chk->rec.data.tsn;
1022 		control->sinfo_ppid = chk->rec.data.ppid;
1023 		control->data = chk->data;
1024 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1025 		chk->data = NULL;
1026 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 		sctp_setup_tail_pointer(control);
1028 		return;
1029 	}
1030 place_chunk:
1031 	inserted = 0;
1032 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1034 			/*
1035 			 * This one in queue is bigger than the new one,
1036 			 * insert the new one before at.
1037 			 */
1038 			asoc->size_on_reasm_queue += chk->send_size;
1039 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 			inserted = 1;
1041 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1042 			break;
1043 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1044 			/*
1045 			 * They sent a duplicate fsn number. This really
1046 			 * should not happen since the FSN is a TSN and it
1047 			 * should have been dropped earlier.
1048 			 */
1049 			sctp_abort_in_reasm(stcb, control, chk,
1050 			    abort_flag,
1051 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1052 			return;
1053 		}
1054 
1055 	}
1056 	if (inserted == 0) {
1057 		/* Its at the end */
1058 		asoc->size_on_reasm_queue += chk->send_size;
1059 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 		control->top_fsn = chk->rec.data.fsn;
1061 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1062 	}
1063 }
1064 
1065 static int
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067     struct sctp_stream_in *strm, int inp_read_lock_held)
1068 {
1069 	/*
1070 	 * Given a stream, strm, see if any of the SSN's on it that are
1071 	 * fragmented are ready to deliver. If so go ahead and place them on
1072 	 * the read queue. In so placing if we have hit the end, then we
1073 	 * need to remove them from the stream's queue.
1074 	 */
1075 	struct sctp_queued_to_read *control, *nctl = NULL;
1076 	uint32_t next_to_del;
1077 	uint32_t pd_point;
1078 	int ret = 0;
1079 
1080 	if (stcb->sctp_socket) {
1081 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 		    stcb->sctp_ep->partial_delivery_point);
1083 	} else {
1084 		pd_point = stcb->sctp_ep->partial_delivery_point;
1085 	}
1086 	control = TAILQ_FIRST(&strm->uno_inqueue);
1087 
1088 	if ((control != NULL) &&
1089 	    (asoc->idata_supported == 0)) {
1090 		/* Special handling needed for "old" data format */
1091 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092 			goto done_un;
1093 		}
1094 	}
1095 	if (strm->pd_api_started) {
1096 		/* Can't add more */
1097 		return (0);
1098 	}
1099 	while (control) {
1100 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 		nctl = TAILQ_NEXT(control, next_instrm);
1103 		if (control->end_added) {
1104 			/* We just put the last bit on */
1105 			if (control->on_strm_q) {
1106 #ifdef INVARIANTS
1107 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 					panic("Huh control: %p on_q: %d -- not unordered?",
1109 					    control, control->on_strm_q);
1110 				}
1111 #endif
1112 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 				control->on_strm_q = 0;
1115 			}
1116 			if (control->on_read_q == 0) {
1117 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1118 				    control,
1119 				    &stcb->sctp_socket->so_rcv, control->end_added,
1120 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1121 			}
1122 		} else {
1123 			/* Can we do a PD-API for this un-ordered guy? */
1124 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1125 				strm->pd_api_started = 1;
1126 				control->pdapi_started = 1;
1127 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1128 				    control,
1129 				    &stcb->sctp_socket->so_rcv, control->end_added,
1130 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131 
1132 				break;
1133 			}
1134 		}
1135 		control = nctl;
1136 	}
1137 done_un:
1138 	control = TAILQ_FIRST(&strm->inqueue);
1139 	if (strm->pd_api_started) {
1140 		/* Can't add more */
1141 		return (0);
1142 	}
1143 	if (control == NULL) {
1144 		return (ret);
1145 	}
1146 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1147 		/*
1148 		 * Ok the guy at the top was being partially delivered
1149 		 * completed, so we remove it. Note the pd_api flag was
1150 		 * taken off when the chunk was merged on in
1151 		 * sctp_queue_data_for_reasm below.
1152 		 */
1153 		nctl = TAILQ_NEXT(control, next_instrm);
1154 		SCTPDBG(SCTP_DEBUG_XXX,
1155 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1156 		    control, control->end_added, control->mid,
1157 		    control->top_fsn, control->fsn_included,
1158 		    strm->last_mid_delivered);
1159 		if (control->end_added) {
1160 			if (control->on_strm_q) {
1161 #ifdef INVARIANTS
1162 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1163 					panic("Huh control: %p on_q: %d -- not ordered?",
1164 					    control, control->on_strm_q);
1165 				}
1166 #endif
1167 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1168 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1169 				if (asoc->size_on_all_streams >= control->length) {
1170 					asoc->size_on_all_streams -= control->length;
1171 				} else {
1172 #ifdef INVARIANTS
1173 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1174 #else
1175 					asoc->size_on_all_streams = 0;
1176 #endif
1177 				}
1178 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1179 				control->on_strm_q = 0;
1180 			}
1181 			if (strm->pd_api_started && control->pdapi_started) {
1182 				control->pdapi_started = 0;
1183 				strm->pd_api_started = 0;
1184 			}
1185 			if (control->on_read_q == 0) {
1186 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1187 				    control,
1188 				    &stcb->sctp_socket->so_rcv, control->end_added,
1189 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1190 			}
1191 			control = nctl;
1192 		}
1193 	}
1194 	if (strm->pd_api_started) {
1195 		/*
1196 		 * Can't add more must have gotten an un-ordered above being
1197 		 * partially delivered.
1198 		 */
1199 		return (0);
1200 	}
1201 deliver_more:
1202 	next_to_del = strm->last_mid_delivered + 1;
1203 	if (control) {
1204 		SCTPDBG(SCTP_DEBUG_XXX,
1205 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1206 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1207 		    next_to_del);
1208 		nctl = TAILQ_NEXT(control, next_instrm);
1209 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1210 		    (control->first_frag_seen)) {
1211 			int done;
1212 
1213 			/* Ok we can deliver it onto the stream. */
1214 			if (control->end_added) {
1215 				/* We are done with it afterwards */
1216 				if (control->on_strm_q) {
1217 #ifdef INVARIANTS
1218 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1219 						panic("Huh control: %p on_q: %d -- not ordered?",
1220 						    control, control->on_strm_q);
1221 					}
1222 #endif
1223 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1224 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1225 					if (asoc->size_on_all_streams >= control->length) {
1226 						asoc->size_on_all_streams -= control->length;
1227 					} else {
1228 #ifdef INVARIANTS
1229 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1230 #else
1231 						asoc->size_on_all_streams = 0;
1232 #endif
1233 					}
1234 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1235 					control->on_strm_q = 0;
1236 				}
1237 				ret++;
1238 			}
1239 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1240 				/*
1241 				 * A singleton now slipping through - mark
1242 				 * it non-revokable too
1243 				 */
1244 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1245 			} else if (control->end_added == 0) {
1246 				/*
1247 				 * Check if we can defer adding until its
1248 				 * all there
1249 				 */
1250 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1251 					/*
1252 					 * Don't need it or cannot add more
1253 					 * (one being delivered that way)
1254 					 */
1255 					goto out;
1256 				}
1257 			}
1258 			done = (control->end_added) && (control->last_frag_seen);
1259 			if (control->on_read_q == 0) {
1260 				if (!done) {
1261 					if (asoc->size_on_all_streams >= control->length) {
1262 						asoc->size_on_all_streams -= control->length;
1263 					} else {
1264 #ifdef INVARIANTS
1265 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1266 #else
1267 						asoc->size_on_all_streams = 0;
1268 #endif
1269 					}
1270 					strm->pd_api_started = 1;
1271 					control->pdapi_started = 1;
1272 				}
1273 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1274 				    control,
1275 				    &stcb->sctp_socket->so_rcv, control->end_added,
1276 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1277 			}
1278 			strm->last_mid_delivered = next_to_del;
1279 			if (done) {
1280 				control = nctl;
1281 				goto deliver_more;
1282 			}
1283 		}
1284 	}
1285 out:
1286 	return (ret);
1287 }
1288 
1289 
1290 uint32_t
1291 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1292     struct sctp_stream_in *strm,
1293     struct sctp_tcb *stcb, struct sctp_association *asoc,
1294     struct sctp_tmit_chunk *chk, int hold_rlock)
1295 {
1296 	/*
1297 	 * Given a control and a chunk, merge the data from the chk onto the
1298 	 * control and free up the chunk resources.
1299 	 */
1300 	uint32_t added = 0;
1301 	int i_locked = 0;
1302 
1303 	if (control->on_read_q && (hold_rlock == 0)) {
1304 		/*
1305 		 * Its being pd-api'd so we must do some locks.
1306 		 */
1307 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1308 		i_locked = 1;
1309 	}
1310 	if (control->data == NULL) {
1311 		control->data = chk->data;
1312 		sctp_setup_tail_pointer(control);
1313 	} else {
1314 		sctp_add_to_tail_pointer(control, chk->data, &added);
1315 	}
1316 	control->fsn_included = chk->rec.data.fsn;
1317 	asoc->size_on_reasm_queue -= chk->send_size;
1318 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1319 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1320 	chk->data = NULL;
1321 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1322 		control->first_frag_seen = 1;
1323 		control->sinfo_tsn = chk->rec.data.tsn;
1324 		control->sinfo_ppid = chk->rec.data.ppid;
1325 	}
1326 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1327 		/* Its complete */
1328 		if ((control->on_strm_q) && (control->on_read_q)) {
1329 			if (control->pdapi_started) {
1330 				control->pdapi_started = 0;
1331 				strm->pd_api_started = 0;
1332 			}
1333 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1334 				/* Unordered */
1335 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1336 				control->on_strm_q = 0;
1337 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1338 				/* Ordered */
1339 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1340 				/*
1341 				 * Don't need to decrement
1342 				 * size_on_all_streams, since control is on
1343 				 * the read queue.
1344 				 */
1345 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1346 				control->on_strm_q = 0;
1347 #ifdef INVARIANTS
1348 			} else if (control->on_strm_q) {
1349 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1350 				    control->on_strm_q);
1351 #endif
1352 			}
1353 		}
1354 		control->end_added = 1;
1355 		control->last_frag_seen = 1;
1356 	}
1357 	if (i_locked) {
1358 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1359 	}
1360 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 	return (added);
1362 }
1363 
1364 /*
1365  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1366  * queue, see if anthing can be delivered. If so pull it off (or as much as
1367  * we can. If we run out of space then we must dump what we can and set the
1368  * appropriate flag to say we queued what we could.
1369  */
1370 static void
1371 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1372     struct sctp_queued_to_read *control,
1373     struct sctp_tmit_chunk *chk,
1374     int created_control,
1375     int *abort_flag, uint32_t tsn)
1376 {
1377 	uint32_t next_fsn;
1378 	struct sctp_tmit_chunk *at, *nat;
1379 	struct sctp_stream_in *strm;
1380 	int do_wakeup, unordered;
1381 	uint32_t lenadded;
1382 
1383 	strm = &asoc->strmin[control->sinfo_stream];
1384 	/*
1385 	 * For old un-ordered data chunks.
1386 	 */
1387 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1388 		unordered = 1;
1389 	} else {
1390 		unordered = 0;
1391 	}
1392 	/* Must be added to the stream-in queue */
1393 	if (created_control) {
1394 		if (unordered == 0) {
1395 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1396 		}
1397 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1398 			/* Duplicate SSN? */
1399 			sctp_abort_in_reasm(stcb, control, chk,
1400 			    abort_flag,
1401 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1402 			sctp_clean_up_control(stcb, control);
1403 			return;
1404 		}
1405 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1406 			/*
1407 			 * Ok we created this control and now lets validate
1408 			 * that its legal i.e. there is a B bit set, if not
1409 			 * and we have up to the cum-ack then its invalid.
1410 			 */
1411 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1412 				sctp_abort_in_reasm(stcb, control, chk,
1413 				    abort_flag,
1414 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1415 				return;
1416 			}
1417 		}
1418 	}
1419 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1420 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1421 		return;
1422 	}
1423 	/*
1424 	 * Ok we must queue the chunk into the reasembly portion: o if its
1425 	 * the first it goes to the control mbuf. o if its not first but the
1426 	 * next in sequence it goes to the control, and each succeeding one
1427 	 * in order also goes. o if its not in order we place it on the list
1428 	 * in its place.
1429 	 */
1430 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1431 		/* Its the very first one. */
1432 		SCTPDBG(SCTP_DEBUG_XXX,
1433 		    "chunk is a first fsn: %u becomes fsn_included\n",
1434 		    chk->rec.data.fsn);
1435 		if (control->first_frag_seen) {
1436 			/*
1437 			 * Error on senders part, they either sent us two
1438 			 * data chunks with FIRST, or they sent two
1439 			 * un-ordered chunks that were fragmented at the
1440 			 * same time in the same stream.
1441 			 */
1442 			sctp_abort_in_reasm(stcb, control, chk,
1443 			    abort_flag,
1444 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1445 			return;
1446 		}
1447 		control->first_frag_seen = 1;
1448 		control->sinfo_ppid = chk->rec.data.ppid;
1449 		control->sinfo_tsn = chk->rec.data.tsn;
1450 		control->fsn_included = chk->rec.data.fsn;
1451 		control->data = chk->data;
1452 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1453 		chk->data = NULL;
1454 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1455 		sctp_setup_tail_pointer(control);
1456 		asoc->size_on_all_streams += control->length;
1457 	} else {
1458 		/* Place the chunk in our list */
1459 		int inserted = 0;
1460 
1461 		if (control->last_frag_seen == 0) {
1462 			/* Still willing to raise highest FSN seen */
1463 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1464 				SCTPDBG(SCTP_DEBUG_XXX,
1465 				    "We have a new top_fsn: %u\n",
1466 				    chk->rec.data.fsn);
1467 				control->top_fsn = chk->rec.data.fsn;
1468 			}
1469 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1470 				SCTPDBG(SCTP_DEBUG_XXX,
1471 				    "The last fsn is now in place fsn: %u\n",
1472 				    chk->rec.data.fsn);
1473 				control->last_frag_seen = 1;
1474 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1475 					SCTPDBG(SCTP_DEBUG_XXX,
1476 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1477 					    chk->rec.data.fsn,
1478 					    control->top_fsn);
1479 					sctp_abort_in_reasm(stcb, control, chk,
1480 					    abort_flag,
1481 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1482 					return;
1483 				}
1484 			}
1485 			if (asoc->idata_supported || control->first_frag_seen) {
1486 				/*
1487 				 * For IDATA we always check since we know
1488 				 * that the first fragment is 0. For old
1489 				 * DATA we have to receive the first before
1490 				 * we know the first FSN (which is the TSN).
1491 				 */
1492 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1493 					/*
1494 					 * We have already delivered up to
1495 					 * this so its a dup
1496 					 */
1497 					sctp_abort_in_reasm(stcb, control, chk,
1498 					    abort_flag,
1499 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1500 					return;
1501 				}
1502 			}
1503 		} else {
1504 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1505 				/* Second last? huh? */
1506 				SCTPDBG(SCTP_DEBUG_XXX,
1507 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1508 				    chk->rec.data.fsn, control->top_fsn);
1509 				sctp_abort_in_reasm(stcb, control,
1510 				    chk, abort_flag,
1511 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1512 				return;
1513 			}
1514 			if (asoc->idata_supported || control->first_frag_seen) {
1515 				/*
1516 				 * For IDATA we always check since we know
1517 				 * that the first fragment is 0. For old
1518 				 * DATA we have to receive the first before
1519 				 * we know the first FSN (which is the TSN).
1520 				 */
1521 
1522 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1523 					/*
1524 					 * We have already delivered up to
1525 					 * this so its a dup
1526 					 */
1527 					SCTPDBG(SCTP_DEBUG_XXX,
1528 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1529 					    chk->rec.data.fsn, control->fsn_included);
1530 					sctp_abort_in_reasm(stcb, control, chk,
1531 					    abort_flag,
1532 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1533 					return;
1534 				}
1535 			}
1536 			/*
1537 			 * validate not beyond top FSN if we have seen last
1538 			 * one
1539 			 */
1540 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1541 				SCTPDBG(SCTP_DEBUG_XXX,
1542 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1543 				    chk->rec.data.fsn,
1544 				    control->top_fsn);
1545 				sctp_abort_in_reasm(stcb, control, chk,
1546 				    abort_flag,
1547 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1548 				return;
1549 			}
1550 		}
1551 		/*
1552 		 * If we reach here, we need to place the new chunk in the
1553 		 * reassembly for this control.
1554 		 */
1555 		SCTPDBG(SCTP_DEBUG_XXX,
1556 		    "chunk is a not first fsn: %u needs to be inserted\n",
1557 		    chk->rec.data.fsn);
1558 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1559 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1560 				/*
1561 				 * This one in queue is bigger than the new
1562 				 * one, insert the new one before at.
1563 				 */
1564 				SCTPDBG(SCTP_DEBUG_XXX,
1565 				    "Insert it before fsn: %u\n",
1566 				    at->rec.data.fsn);
1567 				asoc->size_on_reasm_queue += chk->send_size;
1568 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1569 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1570 				inserted = 1;
1571 				break;
1572 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1573 				/*
1574 				 * Gak, He sent me a duplicate str seq
1575 				 * number
1576 				 */
1577 				/*
1578 				 * foo bar, I guess I will just free this
1579 				 * new guy, should we abort too? FIX ME
1580 				 * MAYBE? Or it COULD be that the SSN's have
1581 				 * wrapped. Maybe I should compare to TSN
1582 				 * somehow... sigh for now just blow away
1583 				 * the chunk!
1584 				 */
1585 				SCTPDBG(SCTP_DEBUG_XXX,
1586 				    "Duplicate to fsn: %u -- abort\n",
1587 				    at->rec.data.fsn);
1588 				sctp_abort_in_reasm(stcb, control,
1589 				    chk, abort_flag,
1590 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1591 				return;
1592 			}
1593 		}
1594 		if (inserted == 0) {
1595 			/* Goes on the end */
1596 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1597 			    chk->rec.data.fsn);
1598 			asoc->size_on_reasm_queue += chk->send_size;
1599 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1600 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1601 		}
1602 	}
1603 	/*
1604 	 * Ok lets see if we can suck any up into the control structure that
1605 	 * are in seq if it makes sense.
1606 	 */
1607 	do_wakeup = 0;
1608 	/*
1609 	 * If the first fragment has not been seen there is no sense in
1610 	 * looking.
1611 	 */
1612 	if (control->first_frag_seen) {
1613 		next_fsn = control->fsn_included + 1;
1614 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1615 			if (at->rec.data.fsn == next_fsn) {
1616 				/* We can add this one now to the control */
1617 				SCTPDBG(SCTP_DEBUG_XXX,
1618 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1619 				    control, at,
1620 				    at->rec.data.fsn,
1621 				    next_fsn, control->fsn_included);
1622 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1623 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1624 				if (control->on_read_q) {
1625 					do_wakeup = 1;
1626 				} else {
1627 					/*
1628 					 * We only add to the
1629 					 * size-on-all-streams if its not on
1630 					 * the read q. The read q flag will
1631 					 * cause a sballoc so its accounted
1632 					 * for there.
1633 					 */
1634 					asoc->size_on_all_streams += lenadded;
1635 				}
1636 				next_fsn++;
1637 				if (control->end_added && control->pdapi_started) {
1638 					if (strm->pd_api_started) {
1639 						strm->pd_api_started = 0;
1640 						control->pdapi_started = 0;
1641 					}
1642 					if (control->on_read_q == 0) {
1643 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1644 						    control,
1645 						    &stcb->sctp_socket->so_rcv, control->end_added,
1646 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1647 					}
1648 					break;
1649 				}
1650 			} else {
1651 				break;
1652 			}
1653 		}
1654 	}
1655 	if (do_wakeup) {
1656 		/* Need to wakeup the reader */
1657 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1658 	}
1659 }
1660 
1661 static struct sctp_queued_to_read *
1662 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1663 {
1664 	struct sctp_queued_to_read *control;
1665 
1666 	if (ordered) {
1667 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1668 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1669 				break;
1670 			}
1671 		}
1672 	} else {
1673 		if (idata_supported) {
1674 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1675 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1676 					break;
1677 				}
1678 			}
1679 		} else {
1680 			control = TAILQ_FIRST(&strm->uno_inqueue);
1681 		}
1682 	}
1683 	return (control);
1684 }
1685 
1686 static int
1687 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1688     struct mbuf **m, int offset, int chk_length,
1689     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1690     int *break_flag, int last_chunk, uint8_t chk_type)
1691 {
1692 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1693 	uint32_t tsn, fsn, gap, mid;
1694 	struct mbuf *dmbuf;
1695 	int the_len;
1696 	int need_reasm_check = 0;
1697 	uint16_t sid;
1698 	struct mbuf *op_err;
1699 	char msg[SCTP_DIAG_INFO_LEN];
1700 	struct sctp_queued_to_read *control, *ncontrol;
1701 	uint32_t ppid;
1702 	uint8_t chk_flags;
1703 	struct sctp_stream_reset_list *liste;
1704 	int ordered;
1705 	size_t clen;
1706 	int created_control = 0;
1707 
1708 	if (chk_type == SCTP_IDATA) {
1709 		struct sctp_idata_chunk *chunk, chunk_buf;
1710 
1711 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1712 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1713 		chk_flags = chunk->ch.chunk_flags;
1714 		clen = sizeof(struct sctp_idata_chunk);
1715 		tsn = ntohl(chunk->dp.tsn);
1716 		sid = ntohs(chunk->dp.sid);
1717 		mid = ntohl(chunk->dp.mid);
1718 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1719 			fsn = 0;
1720 			ppid = chunk->dp.ppid_fsn.ppid;
1721 		} else {
1722 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1723 			ppid = 0xffffffff;	/* Use as an invalid value. */
1724 		}
1725 	} else {
1726 		struct sctp_data_chunk *chunk, chunk_buf;
1727 
1728 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1729 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1730 		chk_flags = chunk->ch.chunk_flags;
1731 		clen = sizeof(struct sctp_data_chunk);
1732 		tsn = ntohl(chunk->dp.tsn);
1733 		sid = ntohs(chunk->dp.sid);
1734 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1735 		fsn = tsn;
1736 		ppid = chunk->dp.ppid;
1737 	}
1738 	if ((size_t)chk_length == clen) {
1739 		/*
1740 		 * Need to send an abort since we had a empty data chunk.
1741 		 */
1742 		op_err = sctp_generate_no_user_data_cause(tsn);
1743 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1744 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1745 		*abort_flag = 1;
1746 		return (0);
1747 	}
1748 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1749 		asoc->send_sack = 1;
1750 	}
1751 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1752 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1753 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1754 	}
1755 	if (stcb == NULL) {
1756 		return (0);
1757 	}
1758 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1759 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1760 		/* It is a duplicate */
1761 		SCTP_STAT_INCR(sctps_recvdupdata);
1762 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1763 			/* Record a dup for the next outbound sack */
1764 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1765 			asoc->numduptsns++;
1766 		}
1767 		asoc->send_sack = 1;
1768 		return (0);
1769 	}
1770 	/* Calculate the number of TSN's between the base and this TSN */
1771 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1772 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1773 		/* Can't hold the bit in the mapping at max array, toss it */
1774 		return (0);
1775 	}
1776 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1777 		SCTP_TCB_LOCK_ASSERT(stcb);
1778 		if (sctp_expand_mapping_array(asoc, gap)) {
1779 			/* Can't expand, drop it */
1780 			return (0);
1781 		}
1782 	}
1783 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1784 		*high_tsn = tsn;
1785 	}
1786 	/* See if we have received this one already */
1787 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1788 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1789 		SCTP_STAT_INCR(sctps_recvdupdata);
1790 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1791 			/* Record a dup for the next outbound sack */
1792 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1793 			asoc->numduptsns++;
1794 		}
1795 		asoc->send_sack = 1;
1796 		return (0);
1797 	}
1798 	/*
1799 	 * Check to see about the GONE flag, duplicates would cause a sack
1800 	 * to be sent up above
1801 	 */
1802 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1803 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1804 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1805 		/*
1806 		 * wait a minute, this guy is gone, there is no longer a
1807 		 * receiver. Send peer an ABORT!
1808 		 */
1809 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1810 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1811 		*abort_flag = 1;
1812 		return (0);
1813 	}
1814 	/*
1815 	 * Now before going further we see if there is room. If NOT then we
1816 	 * MAY let one through only IF this TSN is the one we are waiting
1817 	 * for on a partial delivery API.
1818 	 */
1819 
1820 	/* Is the stream valid? */
1821 	if (sid >= asoc->streamincnt) {
1822 		struct sctp_error_invalid_stream *cause;
1823 
1824 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1825 		    0, M_NOWAIT, 1, MT_DATA);
1826 		if (op_err != NULL) {
1827 			/* add some space up front so prepend will work well */
1828 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1829 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1830 			/*
1831 			 * Error causes are just param's and this one has
1832 			 * two back to back phdr, one with the error type
1833 			 * and size, the other with the streamid and a rsvd
1834 			 */
1835 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1836 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1837 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1838 			cause->stream_id = htons(sid);
1839 			cause->reserved = htons(0);
1840 			sctp_queue_op_err(stcb, op_err);
1841 		}
1842 		SCTP_STAT_INCR(sctps_badsid);
1843 		SCTP_TCB_LOCK_ASSERT(stcb);
1844 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1845 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1846 			asoc->highest_tsn_inside_nr_map = tsn;
1847 		}
1848 		if (tsn == (asoc->cumulative_tsn + 1)) {
1849 			/* Update cum-ack */
1850 			asoc->cumulative_tsn = tsn;
1851 		}
1852 		return (0);
1853 	}
1854 	/*
1855 	 * If its a fragmented message, lets see if we can find the control
1856 	 * on the reassembly queues.
1857 	 */
1858 	if ((chk_type == SCTP_IDATA) &&
1859 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1860 	    (fsn == 0)) {
1861 		/*
1862 		 * The first *must* be fsn 0, and other (middle/end) pieces
1863 		 * can *not* be fsn 0. XXX: This can happen in case of a
1864 		 * wrap around. Ignore is for now.
1865 		 */
1866 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1867 		goto err_out;
1868 	}
1869 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1870 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1871 	    chk_flags, control);
1872 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1873 		/* See if we can find the re-assembly entity */
1874 		if (control != NULL) {
1875 			/* We found something, does it belong? */
1876 			if (ordered && (mid != control->mid)) {
1877 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1878 		err_out:
1879 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1880 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1881 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1882 				*abort_flag = 1;
1883 				return (0);
1884 			}
1885 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1886 				/*
1887 				 * We can't have a switched order with an
1888 				 * unordered chunk
1889 				 */
1890 				SCTP_SNPRINTF(msg, sizeof(msg),
1891 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1892 				    tsn);
1893 				goto err_out;
1894 			}
1895 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1896 				/*
1897 				 * We can't have a switched unordered with a
1898 				 * ordered chunk
1899 				 */
1900 				SCTP_SNPRINTF(msg, sizeof(msg),
1901 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1902 				    tsn);
1903 				goto err_out;
1904 			}
1905 		}
1906 	} else {
1907 		/*
1908 		 * Its a complete segment. Lets validate we don't have a
1909 		 * re-assembly going on with the same Stream/Seq (for
1910 		 * ordered) or in the same Stream for unordered.
1911 		 */
1912 		if (control != NULL) {
1913 			if (ordered || asoc->idata_supported) {
1914 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1915 				    chk_flags, mid);
1916 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1917 				goto err_out;
1918 			} else {
1919 				if ((tsn == control->fsn_included + 1) &&
1920 				    (control->end_added == 0)) {
1921 					SCTP_SNPRINTF(msg, sizeof(msg),
1922 					    "Illegal message sequence, missing end for MID: %8.8x",
1923 					    control->fsn_included);
1924 					goto err_out;
1925 				} else {
1926 					control = NULL;
1927 				}
1928 			}
1929 		}
1930 	}
1931 	/* now do the tests */
1932 	if (((asoc->cnt_on_all_streams +
1933 	    asoc->cnt_on_reasm_queue +
1934 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1935 	    (((int)asoc->my_rwnd) <= 0)) {
1936 		/*
1937 		 * When we have NO room in the rwnd we check to make sure
1938 		 * the reader is doing its job...
1939 		 */
1940 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1941 			/* some to read, wake-up */
1942 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1943 		}
1944 		/* now is it in the mapping array of what we have accepted? */
1945 		if (chk_type == SCTP_DATA) {
1946 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1947 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1948 				/* Nope not in the valid range dump it */
1949 		dump_packet:
1950 				sctp_set_rwnd(stcb, asoc);
1951 				if ((asoc->cnt_on_all_streams +
1952 				    asoc->cnt_on_reasm_queue +
1953 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1954 					SCTP_STAT_INCR(sctps_datadropchklmt);
1955 				} else {
1956 					SCTP_STAT_INCR(sctps_datadroprwnd);
1957 				}
1958 				*break_flag = 1;
1959 				return (0);
1960 			}
1961 		} else {
1962 			if (control == NULL) {
1963 				goto dump_packet;
1964 			}
1965 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1966 				goto dump_packet;
1967 			}
1968 		}
1969 	}
1970 #ifdef SCTP_ASOCLOG_OF_TSNS
1971 	SCTP_TCB_LOCK_ASSERT(stcb);
1972 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1973 		asoc->tsn_in_at = 0;
1974 		asoc->tsn_in_wrapped = 1;
1975 	}
1976 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1977 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1978 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1979 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1980 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1981 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1982 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1983 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1984 	asoc->tsn_in_at++;
1985 #endif
1986 	/*
1987 	 * Before we continue lets validate that we are not being fooled by
1988 	 * an evil attacker. We can only have Nk chunks based on our TSN
1989 	 * spread allowed by the mapping array N * 8 bits, so there is no
1990 	 * way our stream sequence numbers could have wrapped. We of course
1991 	 * only validate the FIRST fragment so the bit must be set.
1992 	 */
1993 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1994 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1995 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1996 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1997 		/* The incoming sseq is behind where we last delivered? */
1998 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1999 		    mid, asoc->strmin[sid].last_mid_delivered);
2000 
2001 		if (asoc->idata_supported) {
2002 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2003 			    asoc->strmin[sid].last_mid_delivered,
2004 			    tsn,
2005 			    sid,
2006 			    mid);
2007 		} else {
2008 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2009 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2010 			    tsn,
2011 			    sid,
2012 			    (uint16_t)mid);
2013 		}
2014 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2015 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2016 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2017 		*abort_flag = 1;
2018 		return (0);
2019 	}
2020 	if (chk_type == SCTP_IDATA) {
2021 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2022 	} else {
2023 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2024 	}
2025 	if (last_chunk == 0) {
2026 		if (chk_type == SCTP_IDATA) {
2027 			dmbuf = SCTP_M_COPYM(*m,
2028 			    (offset + sizeof(struct sctp_idata_chunk)),
2029 			    the_len, M_NOWAIT);
2030 		} else {
2031 			dmbuf = SCTP_M_COPYM(*m,
2032 			    (offset + sizeof(struct sctp_data_chunk)),
2033 			    the_len, M_NOWAIT);
2034 		}
2035 #ifdef SCTP_MBUF_LOGGING
2036 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2037 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2038 		}
2039 #endif
2040 	} else {
2041 		/* We can steal the last chunk */
2042 		int l_len;
2043 
2044 		dmbuf = *m;
2045 		/* lop off the top part */
2046 		if (chk_type == SCTP_IDATA) {
2047 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2048 		} else {
2049 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2050 		}
2051 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2052 			l_len = SCTP_BUF_LEN(dmbuf);
2053 		} else {
2054 			/*
2055 			 * need to count up the size hopefully does not hit
2056 			 * this to often :-0
2057 			 */
2058 			struct mbuf *lat;
2059 
2060 			l_len = 0;
2061 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2062 				l_len += SCTP_BUF_LEN(lat);
2063 			}
2064 		}
2065 		if (l_len > the_len) {
2066 			/* Trim the end round bytes off  too */
2067 			m_adj(dmbuf, -(l_len - the_len));
2068 		}
2069 	}
2070 	if (dmbuf == NULL) {
2071 		SCTP_STAT_INCR(sctps_nomem);
2072 		return (0);
2073 	}
2074 	/*
2075 	 * Now no matter what, we need a control, get one if we don't have
2076 	 * one (we may have gotten it above when we found the message was
2077 	 * fragmented
2078 	 */
2079 	if (control == NULL) {
2080 		sctp_alloc_a_readq(stcb, control);
2081 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2082 		    ppid,
2083 		    sid,
2084 		    chk_flags,
2085 		    NULL, fsn, mid);
2086 		if (control == NULL) {
2087 			SCTP_STAT_INCR(sctps_nomem);
2088 			return (0);
2089 		}
2090 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2091 			struct mbuf *mm;
2092 
2093 			control->data = dmbuf;
2094 			control->tail_mbuf = NULL;
2095 			for (mm = control->data; mm; mm = mm->m_next) {
2096 				control->length += SCTP_BUF_LEN(mm);
2097 				if (SCTP_BUF_NEXT(mm) == NULL) {
2098 					control->tail_mbuf = mm;
2099 				}
2100 			}
2101 			control->end_added = 1;
2102 			control->last_frag_seen = 1;
2103 			control->first_frag_seen = 1;
2104 			control->fsn_included = fsn;
2105 			control->top_fsn = fsn;
2106 		}
2107 		created_control = 1;
2108 	}
2109 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2110 	    chk_flags, ordered, mid, control);
2111 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2112 	    TAILQ_EMPTY(&asoc->resetHead) &&
2113 	    ((ordered == 0) ||
2114 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2115 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2116 		/* Candidate for express delivery */
2117 		/*
2118 		 * Its not fragmented, No PD-API is up, Nothing in the
2119 		 * delivery queue, Its un-ordered OR ordered and the next to
2120 		 * deliver AND nothing else is stuck on the stream queue,
2121 		 * And there is room for it in the socket buffer. Lets just
2122 		 * stuff it up the buffer....
2123 		 */
2124 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2125 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2126 			asoc->highest_tsn_inside_nr_map = tsn;
2127 		}
2128 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2129 		    control, mid);
2130 
2131 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2132 		    control, &stcb->sctp_socket->so_rcv,
2133 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2134 
2135 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2136 			/* for ordered, bump what we delivered */
2137 			asoc->strmin[sid].last_mid_delivered++;
2138 		}
2139 		SCTP_STAT_INCR(sctps_recvexpress);
2140 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2141 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2142 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2143 		}
2144 		control = NULL;
2145 		goto finish_express_del;
2146 	}
2147 
2148 	/* Now will we need a chunk too? */
2149 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2150 		sctp_alloc_a_chunk(stcb, chk);
2151 		if (chk == NULL) {
2152 			/* No memory so we drop the chunk */
2153 			SCTP_STAT_INCR(sctps_nomem);
2154 			if (last_chunk == 0) {
2155 				/* we copied it, free the copy */
2156 				sctp_m_freem(dmbuf);
2157 			}
2158 			return (0);
2159 		}
2160 		chk->rec.data.tsn = tsn;
2161 		chk->no_fr_allowed = 0;
2162 		chk->rec.data.fsn = fsn;
2163 		chk->rec.data.mid = mid;
2164 		chk->rec.data.sid = sid;
2165 		chk->rec.data.ppid = ppid;
2166 		chk->rec.data.context = stcb->asoc.context;
2167 		chk->rec.data.doing_fast_retransmit = 0;
2168 		chk->rec.data.rcv_flags = chk_flags;
2169 		chk->asoc = asoc;
2170 		chk->send_size = the_len;
2171 		chk->whoTo = net;
2172 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2173 		    chk,
2174 		    control, mid);
2175 		atomic_add_int(&net->ref_count, 1);
2176 		chk->data = dmbuf;
2177 	}
2178 	/* Set the appropriate TSN mark */
2179 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2180 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2181 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2182 			asoc->highest_tsn_inside_nr_map = tsn;
2183 		}
2184 	} else {
2185 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2186 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2187 			asoc->highest_tsn_inside_map = tsn;
2188 		}
2189 	}
2190 	/* Now is it complete (i.e. not fragmented)? */
2191 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2192 		/*
2193 		 * Special check for when streams are resetting. We could be
2194 		 * more smart about this and check the actual stream to see
2195 		 * if it is not being reset.. that way we would not create a
2196 		 * HOLB when amongst streams being reset and those not being
2197 		 * reset.
2198 		 *
2199 		 */
2200 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2201 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2202 			/*
2203 			 * yep its past where we need to reset... go ahead
2204 			 * and queue it.
2205 			 */
2206 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2207 				/* first one on */
2208 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2209 			} else {
2210 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2211 				unsigned char inserted = 0;
2212 
2213 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2214 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2215 
2216 						continue;
2217 					} else {
2218 						/* found it */
2219 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2220 						inserted = 1;
2221 						break;
2222 					}
2223 				}
2224 				if (inserted == 0) {
2225 					/*
2226 					 * must be put at end, use prevP
2227 					 * (all setup from loop) to setup
2228 					 * nextP.
2229 					 */
2230 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2231 				}
2232 			}
2233 			goto finish_express_del;
2234 		}
2235 		if (chk_flags & SCTP_DATA_UNORDERED) {
2236 			/* queue directly into socket buffer */
2237 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2238 			    control, mid);
2239 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2240 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2241 			    control,
2242 			    &stcb->sctp_socket->so_rcv, 1,
2243 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2244 
2245 		} else {
2246 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2247 			    mid);
2248 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2249 			if (*abort_flag) {
2250 				if (last_chunk) {
2251 					*m = NULL;
2252 				}
2253 				return (0);
2254 			}
2255 		}
2256 		goto finish_express_del;
2257 	}
2258 	/* If we reach here its a reassembly */
2259 	need_reasm_check = 1;
2260 	SCTPDBG(SCTP_DEBUG_XXX,
2261 	    "Queue data to stream for reasm control: %p MID: %u\n",
2262 	    control, mid);
2263 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2264 	if (*abort_flag) {
2265 		/*
2266 		 * the assoc is now gone and chk was put onto the reasm
2267 		 * queue, which has all been freed.
2268 		 */
2269 		if (last_chunk) {
2270 			*m = NULL;
2271 		}
2272 		return (0);
2273 	}
2274 finish_express_del:
2275 	/* Here we tidy up things */
2276 	if (tsn == (asoc->cumulative_tsn + 1)) {
2277 		/* Update cum-ack */
2278 		asoc->cumulative_tsn = tsn;
2279 	}
2280 	if (last_chunk) {
2281 		*m = NULL;
2282 	}
2283 	if (ordered) {
2284 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2285 	} else {
2286 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2287 	}
2288 	SCTP_STAT_INCR(sctps_recvdata);
2289 	/* Set it present please */
2290 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2291 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2292 	}
2293 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2294 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2295 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2296 	}
2297 	if (need_reasm_check) {
2298 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2299 		need_reasm_check = 0;
2300 	}
2301 	/* check the special flag for stream resets */
2302 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2303 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2304 		/*
2305 		 * we have finished working through the backlogged TSN's now
2306 		 * time to reset streams. 1: call reset function. 2: free
2307 		 * pending_reply space 3: distribute any chunks in
2308 		 * pending_reply_queue.
2309 		 */
2310 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2311 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2312 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2313 		SCTP_FREE(liste, SCTP_M_STRESET);
2314 		/* sa_ignore FREED_MEMORY */
2315 		liste = TAILQ_FIRST(&asoc->resetHead);
2316 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2317 			/* All can be removed */
2318 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2319 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2320 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2321 				if (*abort_flag) {
2322 					return (0);
2323 				}
2324 				if (need_reasm_check) {
2325 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2326 					need_reasm_check = 0;
2327 				}
2328 			}
2329 		} else {
2330 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2331 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2332 					break;
2333 				}
2334 				/*
2335 				 * if control->sinfo_tsn is <= liste->tsn we
2336 				 * can process it which is the NOT of
2337 				 * control->sinfo_tsn > liste->tsn
2338 				 */
2339 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2340 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2341 				if (*abort_flag) {
2342 					return (0);
2343 				}
2344 				if (need_reasm_check) {
2345 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2346 					need_reasm_check = 0;
2347 				}
2348 			}
2349 		}
2350 	}
2351 	return (1);
2352 }
2353 
2354 static const int8_t sctp_map_lookup_tab[256] = {
2355 	0, 1, 0, 2, 0, 1, 0, 3,
2356 	0, 1, 0, 2, 0, 1, 0, 4,
2357 	0, 1, 0, 2, 0, 1, 0, 3,
2358 	0, 1, 0, 2, 0, 1, 0, 5,
2359 	0, 1, 0, 2, 0, 1, 0, 3,
2360 	0, 1, 0, 2, 0, 1, 0, 4,
2361 	0, 1, 0, 2, 0, 1, 0, 3,
2362 	0, 1, 0, 2, 0, 1, 0, 6,
2363 	0, 1, 0, 2, 0, 1, 0, 3,
2364 	0, 1, 0, 2, 0, 1, 0, 4,
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 5,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 4,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 7,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 4,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 5,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 4,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 6,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 4,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 5,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 4,
2385 	0, 1, 0, 2, 0, 1, 0, 3,
2386 	0, 1, 0, 2, 0, 1, 0, 8
2387 };
2388 
2389 
2390 void
2391 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2392 {
2393 	/*
2394 	 * Now we also need to check the mapping array in a couple of ways.
2395 	 * 1) Did we move the cum-ack point?
2396 	 *
2397 	 * When you first glance at this you might think that all entries
2398 	 * that make up the position of the cum-ack would be in the
2399 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2400 	 * deliverable. Thats true with one exception, when its a fragmented
2401 	 * message we may not deliver the data until some threshold (or all
2402 	 * of it) is in place. So we must OR the nr_mapping_array and
2403 	 * mapping_array to get a true picture of the cum-ack.
2404 	 */
2405 	struct sctp_association *asoc;
2406 	int at;
2407 	uint8_t val;
2408 	int slide_from, slide_end, lgap, distance;
2409 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2410 
2411 	asoc = &stcb->asoc;
2412 
2413 	old_cumack = asoc->cumulative_tsn;
2414 	old_base = asoc->mapping_array_base_tsn;
2415 	old_highest = asoc->highest_tsn_inside_map;
2416 	/*
2417 	 * We could probably improve this a small bit by calculating the
2418 	 * offset of the current cum-ack as the starting point.
2419 	 */
2420 	at = 0;
2421 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2422 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2423 		if (val == 0xff) {
2424 			at += 8;
2425 		} else {
2426 			/* there is a 0 bit */
2427 			at += sctp_map_lookup_tab[val];
2428 			break;
2429 		}
2430 	}
2431 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2432 
2433 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2434 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2435 #ifdef INVARIANTS
2436 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2437 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2438 #else
2439 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2440 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2441 		sctp_print_mapping_array(asoc);
2442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2443 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2444 		}
2445 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2446 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2447 #endif
2448 	}
2449 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2450 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2451 	} else {
2452 		highest_tsn = asoc->highest_tsn_inside_map;
2453 	}
2454 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2455 		/* The complete array was completed by a single FR */
2456 		/* highest becomes the cum-ack */
2457 		int clr;
2458 #ifdef INVARIANTS
2459 		unsigned int i;
2460 #endif
2461 
2462 		/* clear the array */
2463 		clr = ((at + 7) >> 3);
2464 		if (clr > asoc->mapping_array_size) {
2465 			clr = asoc->mapping_array_size;
2466 		}
2467 		memset(asoc->mapping_array, 0, clr);
2468 		memset(asoc->nr_mapping_array, 0, clr);
2469 #ifdef INVARIANTS
2470 		for (i = 0; i < asoc->mapping_array_size; i++) {
2471 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2472 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2473 				sctp_print_mapping_array(asoc);
2474 			}
2475 		}
2476 #endif
2477 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2478 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2479 	} else if (at >= 8) {
2480 		/* we can slide the mapping array down */
2481 		/* slide_from holds where we hit the first NON 0xff byte */
2482 
2483 		/*
2484 		 * now calculate the ceiling of the move using our highest
2485 		 * TSN value
2486 		 */
2487 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2488 		slide_end = (lgap >> 3);
2489 		if (slide_end < slide_from) {
2490 			sctp_print_mapping_array(asoc);
2491 #ifdef INVARIANTS
2492 			panic("impossible slide");
2493 #else
2494 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2495 			    lgap, slide_end, slide_from, at);
2496 			return;
2497 #endif
2498 		}
2499 		if (slide_end > asoc->mapping_array_size) {
2500 #ifdef INVARIANTS
2501 			panic("would overrun buffer");
2502 #else
2503 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2504 			    asoc->mapping_array_size, slide_end);
2505 			slide_end = asoc->mapping_array_size;
2506 #endif
2507 		}
2508 		distance = (slide_end - slide_from) + 1;
2509 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2510 			sctp_log_map(old_base, old_cumack, old_highest,
2511 			    SCTP_MAP_PREPARE_SLIDE);
2512 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2513 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2514 		}
2515 		if (distance + slide_from > asoc->mapping_array_size ||
2516 		    distance < 0) {
2517 			/*
2518 			 * Here we do NOT slide forward the array so that
2519 			 * hopefully when more data comes in to fill it up
2520 			 * we will be able to slide it forward. Really I
2521 			 * don't think this should happen :-0
2522 			 */
2523 
2524 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2525 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2526 				    (uint32_t)asoc->mapping_array_size,
2527 				    SCTP_MAP_SLIDE_NONE);
2528 			}
2529 		} else {
2530 			int ii;
2531 
2532 			for (ii = 0; ii < distance; ii++) {
2533 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2534 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2535 
2536 			}
2537 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2538 				asoc->mapping_array[ii] = 0;
2539 				asoc->nr_mapping_array[ii] = 0;
2540 			}
2541 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2542 				asoc->highest_tsn_inside_map += (slide_from << 3);
2543 			}
2544 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2545 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2546 			}
2547 			asoc->mapping_array_base_tsn += (slide_from << 3);
2548 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2549 				sctp_log_map(asoc->mapping_array_base_tsn,
2550 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2551 				    SCTP_MAP_SLIDE_RESULT);
2552 			}
2553 		}
2554 	}
2555 }
2556 
2557 void
2558 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2559 {
2560 	struct sctp_association *asoc;
2561 	uint32_t highest_tsn;
2562 	int is_a_gap;
2563 
2564 	sctp_slide_mapping_arrays(stcb);
2565 	asoc = &stcb->asoc;
2566 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2567 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2568 	} else {
2569 		highest_tsn = asoc->highest_tsn_inside_map;
2570 	}
2571 	/* Is there a gap now? */
2572 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2573 
2574 	/*
2575 	 * Now we need to see if we need to queue a sack or just start the
2576 	 * timer (if allowed).
2577 	 */
2578 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2579 		/*
2580 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2581 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2582 		 * SACK
2583 		 */
2584 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2585 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2586 			    stcb->sctp_ep, stcb, NULL,
2587 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2588 		}
2589 		sctp_send_shutdown(stcb,
2590 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2591 		if (is_a_gap) {
2592 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2593 		}
2594 	} else {
2595 		/*
2596 		 * CMT DAC algorithm: increase number of packets received
2597 		 * since last ack
2598 		 */
2599 		stcb->asoc.cmt_dac_pkts_rcvd++;
2600 
2601 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2602 							 * SACK */
2603 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2604 							 * longer is one */
2605 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2606 		    (is_a_gap) ||	/* is still a gap */
2607 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2608 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2609 		    ) {
2610 
2611 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2612 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2613 			    (stcb->asoc.send_sack == 0) &&
2614 			    (stcb->asoc.numduptsns == 0) &&
2615 			    (stcb->asoc.delayed_ack) &&
2616 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2617 
2618 				/*
2619 				 * CMT DAC algorithm: With CMT, delay acks
2620 				 * even in the face of
2621 				 *
2622 				 * reordering. Therefore, if acks that do
2623 				 * not have to be sent because of the above
2624 				 * reasons, will be delayed. That is, acks
2625 				 * that would have been sent due to gap
2626 				 * reports will be delayed with DAC. Start
2627 				 * the delayed ack timer.
2628 				 */
2629 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2630 				    stcb->sctp_ep, stcb, NULL);
2631 			} else {
2632 				/*
2633 				 * Ok we must build a SACK since the timer
2634 				 * is pending, we got our first packet OR
2635 				 * there are gaps or duplicates.
2636 				 */
2637 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2638 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2639 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2640 			}
2641 		} else {
2642 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2643 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2644 				    stcb->sctp_ep, stcb, NULL);
2645 			}
2646 		}
2647 	}
2648 }
2649 
2650 int
2651 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2652     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2653     struct sctp_nets *net, uint32_t *high_tsn)
2654 {
2655 	struct sctp_chunkhdr *ch, chunk_buf;
2656 	struct sctp_association *asoc;
2657 	int num_chunks = 0;	/* number of control chunks processed */
2658 	int stop_proc = 0;
2659 	int break_flag, last_chunk;
2660 	int abort_flag = 0, was_a_gap;
2661 	struct mbuf *m;
2662 	uint32_t highest_tsn;
2663 	uint16_t chk_length;
2664 
2665 	/* set the rwnd */
2666 	sctp_set_rwnd(stcb, &stcb->asoc);
2667 
2668 	m = *mm;
2669 	SCTP_TCB_LOCK_ASSERT(stcb);
2670 	asoc = &stcb->asoc;
2671 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2672 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2673 	} else {
2674 		highest_tsn = asoc->highest_tsn_inside_map;
2675 	}
2676 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2677 	/*
2678 	 * setup where we got the last DATA packet from for any SACK that
2679 	 * may need to go out. Don't bump the net. This is done ONLY when a
2680 	 * chunk is assigned.
2681 	 */
2682 	asoc->last_data_chunk_from = net;
2683 
2684 	/*-
2685 	 * Now before we proceed we must figure out if this is a wasted
2686 	 * cluster... i.e. it is a small packet sent in and yet the driver
2687 	 * underneath allocated a full cluster for it. If so we must copy it
2688 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2689 	 * with cluster starvation.
2690 	 */
2691 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2692 		/* we only handle mbufs that are singletons.. not chains */
2693 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2694 		if (m) {
2695 			/* ok lets see if we can copy the data up */
2696 			caddr_t *from, *to;
2697 
2698 			/* get the pointers and copy */
2699 			to = mtod(m, caddr_t *);
2700 			from = mtod((*mm), caddr_t *);
2701 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2702 			/* copy the length and free up the old */
2703 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2704 			sctp_m_freem(*mm);
2705 			/* success, back copy */
2706 			*mm = m;
2707 		} else {
2708 			/* We are in trouble in the mbuf world .. yikes */
2709 			m = *mm;
2710 		}
2711 	}
2712 	/* get pointer to the first chunk header */
2713 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2714 	    sizeof(struct sctp_chunkhdr),
2715 	    (uint8_t *)&chunk_buf);
2716 	if (ch == NULL) {
2717 		return (1);
2718 	}
2719 	/*
2720 	 * process all DATA chunks...
2721 	 */
2722 	*high_tsn = asoc->cumulative_tsn;
2723 	break_flag = 0;
2724 	asoc->data_pkts_seen++;
2725 	while (stop_proc == 0) {
2726 		/* validate chunk length */
2727 		chk_length = ntohs(ch->chunk_length);
2728 		if (length - *offset < chk_length) {
2729 			/* all done, mutulated chunk */
2730 			stop_proc = 1;
2731 			continue;
2732 		}
2733 		if ((asoc->idata_supported == 1) &&
2734 		    (ch->chunk_type == SCTP_DATA)) {
2735 			struct mbuf *op_err;
2736 			char msg[SCTP_DIAG_INFO_LEN];
2737 
2738 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2739 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2740 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2741 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2742 			return (2);
2743 		}
2744 		if ((asoc->idata_supported == 0) &&
2745 		    (ch->chunk_type == SCTP_IDATA)) {
2746 			struct mbuf *op_err;
2747 			char msg[SCTP_DIAG_INFO_LEN];
2748 
2749 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2750 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2751 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2752 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2753 			return (2);
2754 		}
2755 		if ((ch->chunk_type == SCTP_DATA) ||
2756 		    (ch->chunk_type == SCTP_IDATA)) {
2757 			uint16_t clen;
2758 
2759 			if (ch->chunk_type == SCTP_DATA) {
2760 				clen = sizeof(struct sctp_data_chunk);
2761 			} else {
2762 				clen = sizeof(struct sctp_idata_chunk);
2763 			}
2764 			if (chk_length < clen) {
2765 				/*
2766 				 * Need to send an abort since we had a
2767 				 * invalid data chunk.
2768 				 */
2769 				struct mbuf *op_err;
2770 				char msg[SCTP_DIAG_INFO_LEN];
2771 
2772 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2773 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2774 				    chk_length);
2775 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2776 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2777 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2778 				return (2);
2779 			}
2780 #ifdef SCTP_AUDITING_ENABLED
2781 			sctp_audit_log(0xB1, 0);
2782 #endif
2783 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2784 				last_chunk = 1;
2785 			} else {
2786 				last_chunk = 0;
2787 			}
2788 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2789 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2790 			    last_chunk, ch->chunk_type)) {
2791 				num_chunks++;
2792 			}
2793 			if (abort_flag)
2794 				return (2);
2795 
2796 			if (break_flag) {
2797 				/*
2798 				 * Set because of out of rwnd space and no
2799 				 * drop rep space left.
2800 				 */
2801 				stop_proc = 1;
2802 				continue;
2803 			}
2804 		} else {
2805 			/* not a data chunk in the data region */
2806 			switch (ch->chunk_type) {
2807 			case SCTP_INITIATION:
2808 			case SCTP_INITIATION_ACK:
2809 			case SCTP_SELECTIVE_ACK:
2810 			case SCTP_NR_SELECTIVE_ACK:
2811 			case SCTP_HEARTBEAT_REQUEST:
2812 			case SCTP_HEARTBEAT_ACK:
2813 			case SCTP_ABORT_ASSOCIATION:
2814 			case SCTP_SHUTDOWN:
2815 			case SCTP_SHUTDOWN_ACK:
2816 			case SCTP_OPERATION_ERROR:
2817 			case SCTP_COOKIE_ECHO:
2818 			case SCTP_COOKIE_ACK:
2819 			case SCTP_ECN_ECHO:
2820 			case SCTP_ECN_CWR:
2821 			case SCTP_SHUTDOWN_COMPLETE:
2822 			case SCTP_AUTHENTICATION:
2823 			case SCTP_ASCONF_ACK:
2824 			case SCTP_PACKET_DROPPED:
2825 			case SCTP_STREAM_RESET:
2826 			case SCTP_FORWARD_CUM_TSN:
2827 			case SCTP_ASCONF:
2828 				{
2829 					/*
2830 					 * Now, what do we do with KNOWN
2831 					 * chunks that are NOT in the right
2832 					 * place?
2833 					 *
2834 					 * For now, I do nothing but ignore
2835 					 * them. We may later want to add
2836 					 * sysctl stuff to switch out and do
2837 					 * either an ABORT() or possibly
2838 					 * process them.
2839 					 */
2840 					struct mbuf *op_err;
2841 					char msg[SCTP_DIAG_INFO_LEN];
2842 
2843 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2844 					    ch->chunk_type);
2845 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2846 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2847 					return (2);
2848 				}
2849 			default:
2850 				/*
2851 				 * Unknown chunk type: use bit rules after
2852 				 * checking length
2853 				 */
2854 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2855 					/*
2856 					 * Need to send an abort since we
2857 					 * had a invalid chunk.
2858 					 */
2859 					struct mbuf *op_err;
2860 					char msg[SCTP_DIAG_INFO_LEN];
2861 
2862 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2863 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2864 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2865 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2866 					return (2);
2867 				}
2868 				if (ch->chunk_type & 0x40) {
2869 					/* Add a error report to the queue */
2870 					struct mbuf *op_err;
2871 					struct sctp_gen_error_cause *cause;
2872 
2873 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2874 					    0, M_NOWAIT, 1, MT_DATA);
2875 					if (op_err != NULL) {
2876 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2877 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2878 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2879 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2880 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2881 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2882 							sctp_queue_op_err(stcb, op_err);
2883 						} else {
2884 							sctp_m_freem(op_err);
2885 						}
2886 					}
2887 				}
2888 				if ((ch->chunk_type & 0x80) == 0) {
2889 					/* discard the rest of this packet */
2890 					stop_proc = 1;
2891 				}	/* else skip this bad chunk and
2892 					 * continue... */
2893 				break;
2894 			}	/* switch of chunk type */
2895 		}
2896 		*offset += SCTP_SIZE32(chk_length);
2897 		if ((*offset >= length) || stop_proc) {
2898 			/* no more data left in the mbuf chain */
2899 			stop_proc = 1;
2900 			continue;
2901 		}
2902 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2903 		    sizeof(struct sctp_chunkhdr),
2904 		    (uint8_t *)&chunk_buf);
2905 		if (ch == NULL) {
2906 			*offset = length;
2907 			stop_proc = 1;
2908 			continue;
2909 		}
2910 	}
2911 	if (break_flag) {
2912 		/*
2913 		 * we need to report rwnd overrun drops.
2914 		 */
2915 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2916 	}
2917 	if (num_chunks) {
2918 		/*
2919 		 * Did we get data, if so update the time for auto-close and
2920 		 * give peer credit for being alive.
2921 		 */
2922 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2923 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2924 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2925 			    stcb->asoc.overall_error_count,
2926 			    0,
2927 			    SCTP_FROM_SCTP_INDATA,
2928 			    __LINE__);
2929 		}
2930 		stcb->asoc.overall_error_count = 0;
2931 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2932 	}
2933 	/* now service all of the reassm queue if needed */
2934 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2935 		/* Assure that we ack right away */
2936 		stcb->asoc.send_sack = 1;
2937 	}
2938 	/* Start a sack timer or QUEUE a SACK for sending */
2939 	sctp_sack_check(stcb, was_a_gap);
2940 	return (0);
2941 }
2942 
2943 static int
2944 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2945     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2946     int *num_frs,
2947     uint32_t *biggest_newly_acked_tsn,
2948     uint32_t *this_sack_lowest_newack,
2949     int *rto_ok)
2950 {
2951 	struct sctp_tmit_chunk *tp1;
2952 	unsigned int theTSN;
2953 	int j, wake_him = 0, circled = 0;
2954 
2955 	/* Recover the tp1 we last saw */
2956 	tp1 = *p_tp1;
2957 	if (tp1 == NULL) {
2958 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2959 	}
2960 	for (j = frag_strt; j <= frag_end; j++) {
2961 		theTSN = j + last_tsn;
2962 		while (tp1) {
2963 			if (tp1->rec.data.doing_fast_retransmit)
2964 				(*num_frs) += 1;
2965 
2966 			/*-
2967 			 * CMT: CUCv2 algorithm. For each TSN being
2968 			 * processed from the sent queue, track the
2969 			 * next expected pseudo-cumack, or
2970 			 * rtx_pseudo_cumack, if required. Separate
2971 			 * cumack trackers for first transmissions,
2972 			 * and retransmissions.
2973 			 */
2974 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2975 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2976 			    (tp1->snd_count == 1)) {
2977 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2978 				tp1->whoTo->find_pseudo_cumack = 0;
2979 			}
2980 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2981 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2982 			    (tp1->snd_count > 1)) {
2983 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2984 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2985 			}
2986 			if (tp1->rec.data.tsn == theTSN) {
2987 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2988 					/*-
2989 					 * must be held until
2990 					 * cum-ack passes
2991 					 */
2992 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2993 						/*-
2994 						 * If it is less than RESEND, it is
2995 						 * now no-longer in flight.
2996 						 * Higher values may already be set
2997 						 * via previous Gap Ack Blocks...
2998 						 * i.e. ACKED or RESEND.
2999 						 */
3000 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3001 						    *biggest_newly_acked_tsn)) {
3002 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3003 						}
3004 						/*-
3005 						 * CMT: SFR algo (and HTNA) - set
3006 						 * saw_newack to 1 for dest being
3007 						 * newly acked. update
3008 						 * this_sack_highest_newack if
3009 						 * appropriate.
3010 						 */
3011 						if (tp1->rec.data.chunk_was_revoked == 0)
3012 							tp1->whoTo->saw_newack = 1;
3013 
3014 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3015 						    tp1->whoTo->this_sack_highest_newack)) {
3016 							tp1->whoTo->this_sack_highest_newack =
3017 							    tp1->rec.data.tsn;
3018 						}
3019 						/*-
3020 						 * CMT DAC algo: also update
3021 						 * this_sack_lowest_newack
3022 						 */
3023 						if (*this_sack_lowest_newack == 0) {
3024 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3025 								sctp_log_sack(*this_sack_lowest_newack,
3026 								    last_tsn,
3027 								    tp1->rec.data.tsn,
3028 								    0,
3029 								    0,
3030 								    SCTP_LOG_TSN_ACKED);
3031 							}
3032 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3033 						}
3034 						/*-
3035 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3036 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3037 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3038 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3039 						 * Separate pseudo_cumack trackers for first transmissions and
3040 						 * retransmissions.
3041 						 */
3042 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3043 							if (tp1->rec.data.chunk_was_revoked == 0) {
3044 								tp1->whoTo->new_pseudo_cumack = 1;
3045 							}
3046 							tp1->whoTo->find_pseudo_cumack = 1;
3047 						}
3048 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3049 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3050 						}
3051 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3052 							if (tp1->rec.data.chunk_was_revoked == 0) {
3053 								tp1->whoTo->new_pseudo_cumack = 1;
3054 							}
3055 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3056 						}
3057 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3058 							sctp_log_sack(*biggest_newly_acked_tsn,
3059 							    last_tsn,
3060 							    tp1->rec.data.tsn,
3061 							    frag_strt,
3062 							    frag_end,
3063 							    SCTP_LOG_TSN_ACKED);
3064 						}
3065 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3066 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3067 							    tp1->whoTo->flight_size,
3068 							    tp1->book_size,
3069 							    (uint32_t)(uintptr_t)tp1->whoTo,
3070 							    tp1->rec.data.tsn);
3071 						}
3072 						sctp_flight_size_decrease(tp1);
3073 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3074 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3075 							    tp1);
3076 						}
3077 						sctp_total_flight_decrease(stcb, tp1);
3078 
3079 						tp1->whoTo->net_ack += tp1->send_size;
3080 						if (tp1->snd_count < 2) {
3081 							/*-
3082 							 * True non-retransmitted chunk
3083 							 */
3084 							tp1->whoTo->net_ack2 += tp1->send_size;
3085 
3086 							/*-
3087 							 * update RTO too ?
3088 							 */
3089 							if (tp1->do_rtt) {
3090 								if (*rto_ok &&
3091 								    sctp_calculate_rto(stcb,
3092 								    &stcb->asoc,
3093 								    tp1->whoTo,
3094 								    &tp1->sent_rcv_time,
3095 								    SCTP_RTT_FROM_DATA)) {
3096 									*rto_ok = 0;
3097 								}
3098 								if (tp1->whoTo->rto_needed == 0) {
3099 									tp1->whoTo->rto_needed = 1;
3100 								}
3101 								tp1->do_rtt = 0;
3102 							}
3103 						}
3104 
3105 					}
3106 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3107 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3108 						    stcb->asoc.this_sack_highest_gap)) {
3109 							stcb->asoc.this_sack_highest_gap =
3110 							    tp1->rec.data.tsn;
3111 						}
3112 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3113 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3114 #ifdef SCTP_AUDITING_ENABLED
3115 							sctp_audit_log(0xB2,
3116 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3117 #endif
3118 						}
3119 					}
3120 					/*-
3121 					 * All chunks NOT UNSENT fall through here and are marked
3122 					 * (leave PR-SCTP ones that are to skip alone though)
3123 					 */
3124 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3125 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3126 						tp1->sent = SCTP_DATAGRAM_MARKED;
3127 					}
3128 					if (tp1->rec.data.chunk_was_revoked) {
3129 						/* deflate the cwnd */
3130 						tp1->whoTo->cwnd -= tp1->book_size;
3131 						tp1->rec.data.chunk_was_revoked = 0;
3132 					}
3133 					/* NR Sack code here */
3134 					if (nr_sacking &&
3135 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3136 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3137 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3138 #ifdef INVARIANTS
3139 						} else {
3140 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3141 #endif
3142 						}
3143 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3144 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3145 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3146 							stcb->asoc.trigger_reset = 1;
3147 						}
3148 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3149 						if (tp1->data) {
3150 							/*
3151 							 * sa_ignore
3152 							 * NO_NULL_CHK
3153 							 */
3154 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3155 							sctp_m_freem(tp1->data);
3156 							tp1->data = NULL;
3157 						}
3158 						wake_him++;
3159 					}
3160 				}
3161 				break;
3162 			}	/* if (tp1->tsn == theTSN) */
3163 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3164 				break;
3165 			}
3166 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3167 			if ((tp1 == NULL) && (circled == 0)) {
3168 				circled++;
3169 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3170 			}
3171 		}		/* end while (tp1) */
3172 		if (tp1 == NULL) {
3173 			circled = 0;
3174 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3175 		}
3176 		/* In case the fragments were not in order we must reset */
3177 	}			/* end for (j = fragStart */
3178 	*p_tp1 = tp1;
3179 	return (wake_him);	/* Return value only used for nr-sack */
3180 }
3181 
3182 
3183 static int
3184 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3185     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3186     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3187     int num_seg, int num_nr_seg, int *rto_ok)
3188 {
3189 	struct sctp_gap_ack_block *frag, block;
3190 	struct sctp_tmit_chunk *tp1;
3191 	int i;
3192 	int num_frs = 0;
3193 	int chunk_freed;
3194 	int non_revocable;
3195 	uint16_t frag_strt, frag_end, prev_frag_end;
3196 
3197 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3198 	prev_frag_end = 0;
3199 	chunk_freed = 0;
3200 
3201 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3202 		if (i == num_seg) {
3203 			prev_frag_end = 0;
3204 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3205 		}
3206 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3207 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3208 		*offset += sizeof(block);
3209 		if (frag == NULL) {
3210 			return (chunk_freed);
3211 		}
3212 		frag_strt = ntohs(frag->start);
3213 		frag_end = ntohs(frag->end);
3214 
3215 		if (frag_strt > frag_end) {
3216 			/* This gap report is malformed, skip it. */
3217 			continue;
3218 		}
3219 		if (frag_strt <= prev_frag_end) {
3220 			/* This gap report is not in order, so restart. */
3221 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3222 		}
3223 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3224 			*biggest_tsn_acked = last_tsn + frag_end;
3225 		}
3226 		if (i < num_seg) {
3227 			non_revocable = 0;
3228 		} else {
3229 			non_revocable = 1;
3230 		}
3231 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3232 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3233 		    this_sack_lowest_newack, rto_ok)) {
3234 			chunk_freed = 1;
3235 		}
3236 		prev_frag_end = frag_end;
3237 	}
3238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3239 		if (num_frs)
3240 			sctp_log_fr(*biggest_tsn_acked,
3241 			    *biggest_newly_acked_tsn,
3242 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3243 	}
3244 	return (chunk_freed);
3245 }
3246 
3247 static void
3248 sctp_check_for_revoked(struct sctp_tcb *stcb,
3249     struct sctp_association *asoc, uint32_t cumack,
3250     uint32_t biggest_tsn_acked)
3251 {
3252 	struct sctp_tmit_chunk *tp1;
3253 
3254 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3255 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3256 			/*
3257 			 * ok this guy is either ACK or MARKED. If it is
3258 			 * ACKED it has been previously acked but not this
3259 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3260 			 * again.
3261 			 */
3262 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3263 				break;
3264 			}
3265 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3266 				/* it has been revoked */
3267 				tp1->sent = SCTP_DATAGRAM_SENT;
3268 				tp1->rec.data.chunk_was_revoked = 1;
3269 				/*
3270 				 * We must add this stuff back in to assure
3271 				 * timers and such get started.
3272 				 */
3273 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3274 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3275 					    tp1->whoTo->flight_size,
3276 					    tp1->book_size,
3277 					    (uint32_t)(uintptr_t)tp1->whoTo,
3278 					    tp1->rec.data.tsn);
3279 				}
3280 				sctp_flight_size_increase(tp1);
3281 				sctp_total_flight_increase(stcb, tp1);
3282 				/*
3283 				 * We inflate the cwnd to compensate for our
3284 				 * artificial inflation of the flight_size.
3285 				 */
3286 				tp1->whoTo->cwnd += tp1->book_size;
3287 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3288 					sctp_log_sack(asoc->last_acked_seq,
3289 					    cumack,
3290 					    tp1->rec.data.tsn,
3291 					    0,
3292 					    0,
3293 					    SCTP_LOG_TSN_REVOKED);
3294 				}
3295 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3296 				/* it has been re-acked in this SACK */
3297 				tp1->sent = SCTP_DATAGRAM_ACKED;
3298 			}
3299 		}
3300 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3301 			break;
3302 	}
3303 }
3304 
3305 
3306 static void
3307 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3308     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3309 {
3310 	struct sctp_tmit_chunk *tp1;
3311 	int strike_flag = 0;
3312 	struct timeval now;
3313 	int tot_retrans = 0;
3314 	uint32_t sending_seq;
3315 	struct sctp_nets *net;
3316 	int num_dests_sacked = 0;
3317 
3318 	/*
3319 	 * select the sending_seq, this is either the next thing ready to be
3320 	 * sent but not transmitted, OR, the next seq we assign.
3321 	 */
3322 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3323 	if (tp1 == NULL) {
3324 		sending_seq = asoc->sending_seq;
3325 	} else {
3326 		sending_seq = tp1->rec.data.tsn;
3327 	}
3328 
3329 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3330 	if ((asoc->sctp_cmt_on_off > 0) &&
3331 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3332 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3333 			if (net->saw_newack)
3334 				num_dests_sacked++;
3335 		}
3336 	}
3337 	if (stcb->asoc.prsctp_supported) {
3338 		(void)SCTP_GETTIME_TIMEVAL(&now);
3339 	}
3340 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3341 		strike_flag = 0;
3342 		if (tp1->no_fr_allowed) {
3343 			/* this one had a timeout or something */
3344 			continue;
3345 		}
3346 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3348 				sctp_log_fr(biggest_tsn_newly_acked,
3349 				    tp1->rec.data.tsn,
3350 				    tp1->sent,
3351 				    SCTP_FR_LOG_CHECK_STRIKE);
3352 		}
3353 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3354 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3355 			/* done */
3356 			break;
3357 		}
3358 		if (stcb->asoc.prsctp_supported) {
3359 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3360 				/* Is it expired? */
3361 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3362 					/* Yes so drop it */
3363 					if (tp1->data != NULL) {
3364 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3365 						    SCTP_SO_NOT_LOCKED);
3366 					}
3367 					continue;
3368 				}
3369 			}
3370 
3371 		}
3372 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3373 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3374 			/* we are beyond the tsn in the sack  */
3375 			break;
3376 		}
3377 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3378 			/* either a RESEND, ACKED, or MARKED */
3379 			/* skip */
3380 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3381 				/* Continue strikin FWD-TSN chunks */
3382 				tp1->rec.data.fwd_tsn_cnt++;
3383 			}
3384 			continue;
3385 		}
3386 		/*
3387 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3388 		 */
3389 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3390 			/*
3391 			 * No new acks were receieved for data sent to this
3392 			 * dest. Therefore, according to the SFR algo for
3393 			 * CMT, no data sent to this dest can be marked for
3394 			 * FR using this SACK.
3395 			 */
3396 			continue;
3397 		} else if (tp1->whoTo &&
3398 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3399 			    tp1->whoTo->this_sack_highest_newack) &&
3400 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3401 			/*
3402 			 * CMT: New acks were receieved for data sent to
3403 			 * this dest. But no new acks were seen for data
3404 			 * sent after tp1. Therefore, according to the SFR
3405 			 * algo for CMT, tp1 cannot be marked for FR using
3406 			 * this SACK. This step covers part of the DAC algo
3407 			 * and the HTNA algo as well.
3408 			 */
3409 			continue;
3410 		}
3411 		/*
3412 		 * Here we check to see if we were have already done a FR
3413 		 * and if so we see if the biggest TSN we saw in the sack is
3414 		 * smaller than the recovery point. If so we don't strike
3415 		 * the tsn... otherwise we CAN strike the TSN.
3416 		 */
3417 		/*
3418 		 * @@@ JRI: Check for CMT if (accum_moved &&
3419 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3420 		 * 0)) {
3421 		 */
3422 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3423 			/*
3424 			 * Strike the TSN if in fast-recovery and cum-ack
3425 			 * moved.
3426 			 */
3427 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3428 				sctp_log_fr(biggest_tsn_newly_acked,
3429 				    tp1->rec.data.tsn,
3430 				    tp1->sent,
3431 				    SCTP_FR_LOG_STRIKE_CHUNK);
3432 			}
3433 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3434 				tp1->sent++;
3435 			}
3436 			if ((asoc->sctp_cmt_on_off > 0) &&
3437 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3438 				/*
3439 				 * CMT DAC algorithm: If SACK flag is set to
3440 				 * 0, then lowest_newack test will not pass
3441 				 * because it would have been set to the
3442 				 * cumack earlier. If not already to be
3443 				 * rtx'd, If not a mixed sack and if tp1 is
3444 				 * not between two sacked TSNs, then mark by
3445 				 * one more. NOTE that we are marking by one
3446 				 * additional time since the SACK DAC flag
3447 				 * indicates that two packets have been
3448 				 * received after this missing TSN.
3449 				 */
3450 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3451 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3452 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 						sctp_log_fr(16 + num_dests_sacked,
3454 						    tp1->rec.data.tsn,
3455 						    tp1->sent,
3456 						    SCTP_FR_LOG_STRIKE_CHUNK);
3457 					}
3458 					tp1->sent++;
3459 				}
3460 			}
3461 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3462 		    (asoc->sctp_cmt_on_off == 0)) {
3463 			/*
3464 			 * For those that have done a FR we must take
3465 			 * special consideration if we strike. I.e the
3466 			 * biggest_newly_acked must be higher than the
3467 			 * sending_seq at the time we did the FR.
3468 			 */
3469 			if (
3470 #ifdef SCTP_FR_TO_ALTERNATE
3471 			/*
3472 			 * If FR's go to new networks, then we must only do
3473 			 * this for singly homed asoc's. However if the FR's
3474 			 * go to the same network (Armando's work) then its
3475 			 * ok to FR multiple times.
3476 			 */
3477 			    (asoc->numnets < 2)
3478 #else
3479 			    (1)
3480 #endif
3481 			    ) {
3482 
3483 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3484 				    tp1->rec.data.fast_retran_tsn)) {
3485 					/*
3486 					 * Strike the TSN, since this ack is
3487 					 * beyond where things were when we
3488 					 * did a FR.
3489 					 */
3490 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3491 						sctp_log_fr(biggest_tsn_newly_acked,
3492 						    tp1->rec.data.tsn,
3493 						    tp1->sent,
3494 						    SCTP_FR_LOG_STRIKE_CHUNK);
3495 					}
3496 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3497 						tp1->sent++;
3498 					}
3499 					strike_flag = 1;
3500 					if ((asoc->sctp_cmt_on_off > 0) &&
3501 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3502 						/*
3503 						 * CMT DAC algorithm: If
3504 						 * SACK flag is set to 0,
3505 						 * then lowest_newack test
3506 						 * will not pass because it
3507 						 * would have been set to
3508 						 * the cumack earlier. If
3509 						 * not already to be rtx'd,
3510 						 * If not a mixed sack and
3511 						 * if tp1 is not between two
3512 						 * sacked TSNs, then mark by
3513 						 * one more. NOTE that we
3514 						 * are marking by one
3515 						 * additional time since the
3516 						 * SACK DAC flag indicates
3517 						 * that two packets have
3518 						 * been received after this
3519 						 * missing TSN.
3520 						 */
3521 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3522 						    (num_dests_sacked == 1) &&
3523 						    SCTP_TSN_GT(this_sack_lowest_newack,
3524 						    tp1->rec.data.tsn)) {
3525 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3526 								sctp_log_fr(32 + num_dests_sacked,
3527 								    tp1->rec.data.tsn,
3528 								    tp1->sent,
3529 								    SCTP_FR_LOG_STRIKE_CHUNK);
3530 							}
3531 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3532 								tp1->sent++;
3533 							}
3534 						}
3535 					}
3536 				}
3537 			}
3538 			/*
3539 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3540 			 * algo covers HTNA.
3541 			 */
3542 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3543 		    biggest_tsn_newly_acked)) {
3544 			/*
3545 			 * We don't strike these: This is the  HTNA
3546 			 * algorithm i.e. we don't strike If our TSN is
3547 			 * larger than the Highest TSN Newly Acked.
3548 			 */
3549 			;
3550 		} else {
3551 			/* Strike the TSN */
3552 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3553 				sctp_log_fr(biggest_tsn_newly_acked,
3554 				    tp1->rec.data.tsn,
3555 				    tp1->sent,
3556 				    SCTP_FR_LOG_STRIKE_CHUNK);
3557 			}
3558 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3559 				tp1->sent++;
3560 			}
3561 			if ((asoc->sctp_cmt_on_off > 0) &&
3562 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3563 				/*
3564 				 * CMT DAC algorithm: If SACK flag is set to
3565 				 * 0, then lowest_newack test will not pass
3566 				 * because it would have been set to the
3567 				 * cumack earlier. If not already to be
3568 				 * rtx'd, If not a mixed sack and if tp1 is
3569 				 * not between two sacked TSNs, then mark by
3570 				 * one more. NOTE that we are marking by one
3571 				 * additional time since the SACK DAC flag
3572 				 * indicates that two packets have been
3573 				 * received after this missing TSN.
3574 				 */
3575 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3576 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3577 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3578 						sctp_log_fr(48 + num_dests_sacked,
3579 						    tp1->rec.data.tsn,
3580 						    tp1->sent,
3581 						    SCTP_FR_LOG_STRIKE_CHUNK);
3582 					}
3583 					tp1->sent++;
3584 				}
3585 			}
3586 		}
3587 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3588 			struct sctp_nets *alt;
3589 
3590 			/* fix counts and things */
3591 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3592 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3593 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3594 				    tp1->book_size,
3595 				    (uint32_t)(uintptr_t)tp1->whoTo,
3596 				    tp1->rec.data.tsn);
3597 			}
3598 			if (tp1->whoTo) {
3599 				tp1->whoTo->net_ack++;
3600 				sctp_flight_size_decrease(tp1);
3601 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3602 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3603 					    tp1);
3604 				}
3605 			}
3606 
3607 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3608 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3609 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3610 			}
3611 			/* add back to the rwnd */
3612 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3613 
3614 			/* remove from the total flight */
3615 			sctp_total_flight_decrease(stcb, tp1);
3616 
3617 			if ((stcb->asoc.prsctp_supported) &&
3618 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3619 				/*
3620 				 * Has it been retransmitted tv_sec times? -
3621 				 * we store the retran count there.
3622 				 */
3623 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3624 					/* Yes, so drop it */
3625 					if (tp1->data != NULL) {
3626 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3627 						    SCTP_SO_NOT_LOCKED);
3628 					}
3629 					/* Make sure to flag we had a FR */
3630 					if (tp1->whoTo != NULL) {
3631 						tp1->whoTo->net_ack++;
3632 					}
3633 					continue;
3634 				}
3635 			}
3636 			/*
3637 			 * SCTP_PRINTF("OK, we are now ready to FR this
3638 			 * guy\n");
3639 			 */
3640 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3641 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3642 				    0, SCTP_FR_MARKED);
3643 			}
3644 			if (strike_flag) {
3645 				/* This is a subsequent FR */
3646 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3647 			}
3648 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3649 			if (asoc->sctp_cmt_on_off > 0) {
3650 				/*
3651 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3652 				 * If CMT is being used, then pick dest with
3653 				 * largest ssthresh for any retransmission.
3654 				 */
3655 				tp1->no_fr_allowed = 1;
3656 				alt = tp1->whoTo;
3657 				/* sa_ignore NO_NULL_CHK */
3658 				if (asoc->sctp_cmt_pf > 0) {
3659 					/*
3660 					 * JRS 5/18/07 - If CMT PF is on,
3661 					 * use the PF version of
3662 					 * find_alt_net()
3663 					 */
3664 					alt = sctp_find_alternate_net(stcb, alt, 2);
3665 				} else {
3666 					/*
3667 					 * JRS 5/18/07 - If only CMT is on,
3668 					 * use the CMT version of
3669 					 * find_alt_net()
3670 					 */
3671 					/* sa_ignore NO_NULL_CHK */
3672 					alt = sctp_find_alternate_net(stcb, alt, 1);
3673 				}
3674 				if (alt == NULL) {
3675 					alt = tp1->whoTo;
3676 				}
3677 				/*
3678 				 * CUCv2: If a different dest is picked for
3679 				 * the retransmission, then new
3680 				 * (rtx-)pseudo_cumack needs to be tracked
3681 				 * for orig dest. Let CUCv2 track new (rtx-)
3682 				 * pseudo-cumack always.
3683 				 */
3684 				if (tp1->whoTo) {
3685 					tp1->whoTo->find_pseudo_cumack = 1;
3686 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3687 				}
3688 
3689 			} else {	/* CMT is OFF */
3690 
3691 #ifdef SCTP_FR_TO_ALTERNATE
3692 				/* Can we find an alternate? */
3693 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3694 #else
3695 				/*
3696 				 * default behavior is to NOT retransmit
3697 				 * FR's to an alternate. Armando Caro's
3698 				 * paper details why.
3699 				 */
3700 				alt = tp1->whoTo;
3701 #endif
3702 			}
3703 
3704 			tp1->rec.data.doing_fast_retransmit = 1;
3705 			tot_retrans++;
3706 			/* mark the sending seq for possible subsequent FR's */
3707 			/*
3708 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3709 			 * (uint32_t)tpi->rec.data.tsn);
3710 			 */
3711 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3712 				/*
3713 				 * If the queue of send is empty then its
3714 				 * the next sequence number that will be
3715 				 * assigned so we subtract one from this to
3716 				 * get the one we last sent.
3717 				 */
3718 				tp1->rec.data.fast_retran_tsn = sending_seq;
3719 			} else {
3720 				/*
3721 				 * If there are chunks on the send queue
3722 				 * (unsent data that has made it from the
3723 				 * stream queues but not out the door, we
3724 				 * take the first one (which will have the
3725 				 * lowest TSN) and subtract one to get the
3726 				 * one we last sent.
3727 				 */
3728 				struct sctp_tmit_chunk *ttt;
3729 
3730 				ttt = TAILQ_FIRST(&asoc->send_queue);
3731 				tp1->rec.data.fast_retran_tsn =
3732 				    ttt->rec.data.tsn;
3733 			}
3734 
3735 			if (tp1->do_rtt) {
3736 				/*
3737 				 * this guy had a RTO calculation pending on
3738 				 * it, cancel it
3739 				 */
3740 				if ((tp1->whoTo != NULL) &&
3741 				    (tp1->whoTo->rto_needed == 0)) {
3742 					tp1->whoTo->rto_needed = 1;
3743 				}
3744 				tp1->do_rtt = 0;
3745 			}
3746 			if (alt != tp1->whoTo) {
3747 				/* yes, there is an alternate. */
3748 				sctp_free_remote_addr(tp1->whoTo);
3749 				/* sa_ignore FREED_MEMORY */
3750 				tp1->whoTo = alt;
3751 				atomic_add_int(&alt->ref_count, 1);
3752 			}
3753 		}
3754 	}
3755 }
3756 
3757 struct sctp_tmit_chunk *
3758 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3759     struct sctp_association *asoc)
3760 {
3761 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3762 	struct timeval now;
3763 	int now_filled = 0;
3764 
3765 	if (asoc->prsctp_supported == 0) {
3766 		return (NULL);
3767 	}
3768 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3769 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3770 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3771 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3772 			/* no chance to advance, out of here */
3773 			break;
3774 		}
3775 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3776 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3778 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3779 				    asoc->advanced_peer_ack_point,
3780 				    tp1->rec.data.tsn, 0, 0);
3781 			}
3782 		}
3783 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3784 			/*
3785 			 * We can't fwd-tsn past any that are reliable aka
3786 			 * retransmitted until the asoc fails.
3787 			 */
3788 			break;
3789 		}
3790 		if (!now_filled) {
3791 			(void)SCTP_GETTIME_TIMEVAL(&now);
3792 			now_filled = 1;
3793 		}
3794 		/*
3795 		 * now we got a chunk which is marked for another
3796 		 * retransmission to a PR-stream but has run out its chances
3797 		 * already maybe OR has been marked to skip now. Can we skip
3798 		 * it if its a resend?
3799 		 */
3800 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3801 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3802 			/*
3803 			 * Now is this one marked for resend and its time is
3804 			 * now up?
3805 			 */
3806 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3807 				/* Yes so drop it */
3808 				if (tp1->data) {
3809 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3810 					    1, SCTP_SO_NOT_LOCKED);
3811 				}
3812 			} else {
3813 				/*
3814 				 * No, we are done when hit one for resend
3815 				 * whos time as not expired.
3816 				 */
3817 				break;
3818 			}
3819 		}
3820 		/*
3821 		 * Ok now if this chunk is marked to drop it we can clean up
3822 		 * the chunk, advance our peer ack point and we can check
3823 		 * the next chunk.
3824 		 */
3825 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3826 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3827 			/* advance PeerAckPoint goes forward */
3828 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3829 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3830 				a_adv = tp1;
3831 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3832 				/* No update but we do save the chk */
3833 				a_adv = tp1;
3834 			}
3835 		} else {
3836 			/*
3837 			 * If it is still in RESEND we can advance no
3838 			 * further
3839 			 */
3840 			break;
3841 		}
3842 	}
3843 	return (a_adv);
3844 }
3845 
3846 static int
3847 sctp_fs_audit(struct sctp_association *asoc)
3848 {
3849 	struct sctp_tmit_chunk *chk;
3850 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3851 	int ret;
3852 #ifndef INVARIANTS
3853 	int entry_flight, entry_cnt;
3854 #endif
3855 
3856 	ret = 0;
3857 #ifndef INVARIANTS
3858 	entry_flight = asoc->total_flight;
3859 	entry_cnt = asoc->total_flight_count;
3860 #endif
3861 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3862 		return (0);
3863 
3864 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3865 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3866 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3867 			    chk->rec.data.tsn,
3868 			    chk->send_size,
3869 			    chk->snd_count);
3870 			inflight++;
3871 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3872 			resend++;
3873 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3874 			inbetween++;
3875 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3876 			above++;
3877 		} else {
3878 			acked++;
3879 		}
3880 	}
3881 
3882 	if ((inflight > 0) || (inbetween > 0)) {
3883 #ifdef INVARIANTS
3884 		panic("Flight size-express incorrect? \n");
3885 #else
3886 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3887 		    entry_flight, entry_cnt);
3888 
3889 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3890 		    inflight, inbetween, resend, above, acked);
3891 		ret = 1;
3892 #endif
3893 	}
3894 	return (ret);
3895 }
3896 
3897 
3898 static void
3899 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3900     struct sctp_association *asoc,
3901     struct sctp_tmit_chunk *tp1)
3902 {
3903 	tp1->window_probe = 0;
3904 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3905 		/* TSN's skipped we do NOT move back. */
3906 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3907 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3908 		    tp1->book_size,
3909 		    (uint32_t)(uintptr_t)tp1->whoTo,
3910 		    tp1->rec.data.tsn);
3911 		return;
3912 	}
3913 	/* First setup this by shrinking flight */
3914 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3915 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3916 		    tp1);
3917 	}
3918 	sctp_flight_size_decrease(tp1);
3919 	sctp_total_flight_decrease(stcb, tp1);
3920 	/* Now mark for resend */
3921 	tp1->sent = SCTP_DATAGRAM_RESEND;
3922 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3923 
3924 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3926 		    tp1->whoTo->flight_size,
3927 		    tp1->book_size,
3928 		    (uint32_t)(uintptr_t)tp1->whoTo,
3929 		    tp1->rec.data.tsn);
3930 	}
3931 }
3932 
3933 void
3934 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3935     uint32_t rwnd, int *abort_now, int ecne_seen)
3936 {
3937 	struct sctp_nets *net;
3938 	struct sctp_association *asoc;
3939 	struct sctp_tmit_chunk *tp1, *tp2;
3940 	uint32_t old_rwnd;
3941 	int win_probe_recovery = 0;
3942 	int win_probe_recovered = 0;
3943 	int j, done_once = 0;
3944 	int rto_ok = 1;
3945 	uint32_t send_s;
3946 
3947 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3948 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3949 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3950 	}
3951 	SCTP_TCB_LOCK_ASSERT(stcb);
3952 #ifdef SCTP_ASOCLOG_OF_TSNS
3953 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3954 	stcb->asoc.cumack_log_at++;
3955 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3956 		stcb->asoc.cumack_log_at = 0;
3957 	}
3958 #endif
3959 	asoc = &stcb->asoc;
3960 	old_rwnd = asoc->peers_rwnd;
3961 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3962 		/* old ack */
3963 		return;
3964 	} else if (asoc->last_acked_seq == cumack) {
3965 		/* Window update sack */
3966 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3967 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3968 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3969 			/* SWS sender side engages */
3970 			asoc->peers_rwnd = 0;
3971 		}
3972 		if (asoc->peers_rwnd > old_rwnd) {
3973 			goto again;
3974 		}
3975 		return;
3976 	}
3977 
3978 	/* First setup for CC stuff */
3979 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3980 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3981 			/* Drag along the window_tsn for cwr's */
3982 			net->cwr_window_tsn = cumack;
3983 		}
3984 		net->prev_cwnd = net->cwnd;
3985 		net->net_ack = 0;
3986 		net->net_ack2 = 0;
3987 
3988 		/*
3989 		 * CMT: Reset CUC and Fast recovery algo variables before
3990 		 * SACK processing
3991 		 */
3992 		net->new_pseudo_cumack = 0;
3993 		net->will_exit_fast_recovery = 0;
3994 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3995 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3996 		}
3997 	}
3998 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3999 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4000 		    sctpchunk_listhead);
4001 		send_s = tp1->rec.data.tsn + 1;
4002 	} else {
4003 		send_s = asoc->sending_seq;
4004 	}
4005 	if (SCTP_TSN_GE(cumack, send_s)) {
4006 		struct mbuf *op_err;
4007 		char msg[SCTP_DIAG_INFO_LEN];
4008 
4009 		*abort_now = 1;
4010 		/* XXX */
4011 		SCTP_SNPRINTF(msg, sizeof(msg),
4012 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4013 		    cumack, send_s);
4014 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4015 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4016 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4017 		return;
4018 	}
4019 	asoc->this_sack_highest_gap = cumack;
4020 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4021 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4022 		    stcb->asoc.overall_error_count,
4023 		    0,
4024 		    SCTP_FROM_SCTP_INDATA,
4025 		    __LINE__);
4026 	}
4027 	stcb->asoc.overall_error_count = 0;
4028 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4029 		/* process the new consecutive TSN first */
4030 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4031 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4032 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4033 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4034 				}
4035 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4036 					/*
4037 					 * If it is less than ACKED, it is
4038 					 * now no-longer in flight. Higher
4039 					 * values may occur during marking
4040 					 */
4041 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4042 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4043 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4044 							    tp1->whoTo->flight_size,
4045 							    tp1->book_size,
4046 							    (uint32_t)(uintptr_t)tp1->whoTo,
4047 							    tp1->rec.data.tsn);
4048 						}
4049 						sctp_flight_size_decrease(tp1);
4050 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4051 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4052 							    tp1);
4053 						}
4054 						/* sa_ignore NO_NULL_CHK */
4055 						sctp_total_flight_decrease(stcb, tp1);
4056 					}
4057 					tp1->whoTo->net_ack += tp1->send_size;
4058 					if (tp1->snd_count < 2) {
4059 						/*
4060 						 * True non-retransmitted
4061 						 * chunk
4062 						 */
4063 						tp1->whoTo->net_ack2 +=
4064 						    tp1->send_size;
4065 
4066 						/* update RTO too? */
4067 						if (tp1->do_rtt) {
4068 							if (rto_ok &&
4069 							    sctp_calculate_rto(stcb,
4070 							    &stcb->asoc,
4071 							    tp1->whoTo,
4072 							    &tp1->sent_rcv_time,
4073 							    SCTP_RTT_FROM_DATA)) {
4074 								rto_ok = 0;
4075 							}
4076 							if (tp1->whoTo->rto_needed == 0) {
4077 								tp1->whoTo->rto_needed = 1;
4078 							}
4079 							tp1->do_rtt = 0;
4080 						}
4081 					}
4082 					/*
4083 					 * CMT: CUCv2 algorithm. From the
4084 					 * cumack'd TSNs, for each TSN being
4085 					 * acked for the first time, set the
4086 					 * following variables for the
4087 					 * corresp destination.
4088 					 * new_pseudo_cumack will trigger a
4089 					 * cwnd update.
4090 					 * find_(rtx_)pseudo_cumack will
4091 					 * trigger search for the next
4092 					 * expected (rtx-)pseudo-cumack.
4093 					 */
4094 					tp1->whoTo->new_pseudo_cumack = 1;
4095 					tp1->whoTo->find_pseudo_cumack = 1;
4096 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4097 
4098 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4099 						/* sa_ignore NO_NULL_CHK */
4100 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4101 					}
4102 				}
4103 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4104 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4105 				}
4106 				if (tp1->rec.data.chunk_was_revoked) {
4107 					/* deflate the cwnd */
4108 					tp1->whoTo->cwnd -= tp1->book_size;
4109 					tp1->rec.data.chunk_was_revoked = 0;
4110 				}
4111 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4112 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4113 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4114 #ifdef INVARIANTS
4115 					} else {
4116 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4117 #endif
4118 					}
4119 				}
4120 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4121 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4122 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4123 					asoc->trigger_reset = 1;
4124 				}
4125 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4126 				if (tp1->data) {
4127 					/* sa_ignore NO_NULL_CHK */
4128 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4129 					sctp_m_freem(tp1->data);
4130 					tp1->data = NULL;
4131 				}
4132 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4133 					sctp_log_sack(asoc->last_acked_seq,
4134 					    cumack,
4135 					    tp1->rec.data.tsn,
4136 					    0,
4137 					    0,
4138 					    SCTP_LOG_FREE_SENT);
4139 				}
4140 				asoc->sent_queue_cnt--;
4141 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4142 			} else {
4143 				break;
4144 			}
4145 		}
4146 
4147 	}
4148 	/* sa_ignore NO_NULL_CHK */
4149 	if (stcb->sctp_socket) {
4150 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4151 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4152 			/* sa_ignore NO_NULL_CHK */
4153 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4154 		}
4155 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4156 	} else {
4157 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4158 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4159 		}
4160 	}
4161 
4162 	/* JRS - Use the congestion control given in the CC module */
4163 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4164 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4165 			if (net->net_ack2 > 0) {
4166 				/*
4167 				 * Karn's rule applies to clearing error
4168 				 * count, this is optional.
4169 				 */
4170 				net->error_count = 0;
4171 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4172 					/* addr came good */
4173 					net->dest_state |= SCTP_ADDR_REACHABLE;
4174 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4175 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4176 				}
4177 				if (net == stcb->asoc.primary_destination) {
4178 					if (stcb->asoc.alternate) {
4179 						/*
4180 						 * release the alternate,
4181 						 * primary is good
4182 						 */
4183 						sctp_free_remote_addr(stcb->asoc.alternate);
4184 						stcb->asoc.alternate = NULL;
4185 					}
4186 				}
4187 				if (net->dest_state & SCTP_ADDR_PF) {
4188 					net->dest_state &= ~SCTP_ADDR_PF;
4189 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4190 					    stcb->sctp_ep, stcb, net,
4191 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4192 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4193 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4194 					/* Done with this net */
4195 					net->net_ack = 0;
4196 				}
4197 				/* restore any doubled timers */
4198 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4199 				if (net->RTO < stcb->asoc.minrto) {
4200 					net->RTO = stcb->asoc.minrto;
4201 				}
4202 				if (net->RTO > stcb->asoc.maxrto) {
4203 					net->RTO = stcb->asoc.maxrto;
4204 				}
4205 			}
4206 		}
4207 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4208 	}
4209 	asoc->last_acked_seq = cumack;
4210 
4211 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4212 		/* nothing left in-flight */
4213 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 			net->flight_size = 0;
4215 			net->partial_bytes_acked = 0;
4216 		}
4217 		asoc->total_flight = 0;
4218 		asoc->total_flight_count = 0;
4219 	}
4220 
4221 	/* RWND update */
4222 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4223 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4224 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4225 		/* SWS sender side engages */
4226 		asoc->peers_rwnd = 0;
4227 	}
4228 	if (asoc->peers_rwnd > old_rwnd) {
4229 		win_probe_recovery = 1;
4230 	}
4231 	/* Now assure a timer where data is queued at */
4232 again:
4233 	j = 0;
4234 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4235 		if (win_probe_recovery && (net->window_probe)) {
4236 			win_probe_recovered = 1;
4237 			/*
4238 			 * Find first chunk that was used with window probe
4239 			 * and clear the sent
4240 			 */
4241 			/* sa_ignore FREED_MEMORY */
4242 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4243 				if (tp1->window_probe) {
4244 					/* move back to data send queue */
4245 					sctp_window_probe_recovery(stcb, asoc, tp1);
4246 					break;
4247 				}
4248 			}
4249 		}
4250 		if (net->flight_size) {
4251 			j++;
4252 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4253 			if (net->window_probe) {
4254 				net->window_probe = 0;
4255 			}
4256 		} else {
4257 			if (net->window_probe) {
4258 				/*
4259 				 * In window probes we must assure a timer
4260 				 * is still running there
4261 				 */
4262 				net->window_probe = 0;
4263 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4264 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4265 				}
4266 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4267 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4268 				    stcb, net,
4269 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4270 			}
4271 		}
4272 	}
4273 	if ((j == 0) &&
4274 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4275 	    (asoc->sent_queue_retran_cnt == 0) &&
4276 	    (win_probe_recovered == 0) &&
4277 	    (done_once == 0)) {
4278 		/*
4279 		 * huh, this should not happen unless all packets are
4280 		 * PR-SCTP and marked to skip of course.
4281 		 */
4282 		if (sctp_fs_audit(asoc)) {
4283 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4284 				net->flight_size = 0;
4285 			}
4286 			asoc->total_flight = 0;
4287 			asoc->total_flight_count = 0;
4288 			asoc->sent_queue_retran_cnt = 0;
4289 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4290 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4291 					sctp_flight_size_increase(tp1);
4292 					sctp_total_flight_increase(stcb, tp1);
4293 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4294 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4295 				}
4296 			}
4297 		}
4298 		done_once = 1;
4299 		goto again;
4300 	}
4301 	/**********************************/
4302 	/* Now what about shutdown issues */
4303 	/**********************************/
4304 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4305 		/* nothing left on sendqueue.. consider done */
4306 		/* clean up */
4307 		if ((asoc->stream_queue_cnt == 1) &&
4308 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4309 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4310 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4311 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4312 		}
4313 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4314 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4315 		    (asoc->stream_queue_cnt == 1) &&
4316 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4317 			struct mbuf *op_err;
4318 
4319 			*abort_now = 1;
4320 			/* XXX */
4321 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4322 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4323 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4324 			return;
4325 		}
4326 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4327 		    (asoc->stream_queue_cnt == 0)) {
4328 			struct sctp_nets *netp;
4329 
4330 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4331 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4332 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4333 			}
4334 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4335 			sctp_stop_timers_for_shutdown(stcb);
4336 			if (asoc->alternate) {
4337 				netp = asoc->alternate;
4338 			} else {
4339 				netp = asoc->primary_destination;
4340 			}
4341 			sctp_send_shutdown(stcb, netp);
4342 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4343 			    stcb->sctp_ep, stcb, netp);
4344 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4345 			    stcb->sctp_ep, stcb, NULL);
4346 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4347 		    (asoc->stream_queue_cnt == 0)) {
4348 			struct sctp_nets *netp;
4349 
4350 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4351 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4352 			sctp_stop_timers_for_shutdown(stcb);
4353 			if (asoc->alternate) {
4354 				netp = asoc->alternate;
4355 			} else {
4356 				netp = asoc->primary_destination;
4357 			}
4358 			sctp_send_shutdown_ack(stcb, netp);
4359 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4360 			    stcb->sctp_ep, stcb, netp);
4361 		}
4362 	}
4363 	/*********************************************/
4364 	/* Here we perform PR-SCTP procedures        */
4365 	/* (section 4.2)                             */
4366 	/*********************************************/
4367 	/* C1. update advancedPeerAckPoint */
4368 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4369 		asoc->advanced_peer_ack_point = cumack;
4370 	}
4371 	/* PR-Sctp issues need to be addressed too */
4372 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4373 		struct sctp_tmit_chunk *lchk;
4374 		uint32_t old_adv_peer_ack_point;
4375 
4376 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4377 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4378 		/* C3. See if we need to send a Fwd-TSN */
4379 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4380 			/*
4381 			 * ISSUE with ECN, see FWD-TSN processing.
4382 			 */
4383 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4384 				send_forward_tsn(stcb, asoc);
4385 			} else if (lchk) {
4386 				/* try to FR fwd-tsn's that get lost too */
4387 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4388 					send_forward_tsn(stcb, asoc);
4389 				}
4390 			}
4391 		}
4392 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4393 			if (lchk->whoTo != NULL) {
4394 				break;
4395 			}
4396 		}
4397 		if (lchk != NULL) {
4398 			/* Assure a timer is up */
4399 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4400 			    stcb->sctp_ep, stcb, lchk->whoTo);
4401 		}
4402 	}
4403 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4404 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4405 		    rwnd,
4406 		    stcb->asoc.peers_rwnd,
4407 		    stcb->asoc.total_flight,
4408 		    stcb->asoc.total_output_queue_size);
4409 	}
4410 }
4411 
4412 void
4413 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4414     struct sctp_tcb *stcb,
4415     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4416     int *abort_now, uint8_t flags,
4417     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4418 {
4419 	struct sctp_association *asoc;
4420 	struct sctp_tmit_chunk *tp1, *tp2;
4421 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4422 	uint16_t wake_him = 0;
4423 	uint32_t send_s = 0;
4424 	long j;
4425 	int accum_moved = 0;
4426 	int will_exit_fast_recovery = 0;
4427 	uint32_t a_rwnd, old_rwnd;
4428 	int win_probe_recovery = 0;
4429 	int win_probe_recovered = 0;
4430 	struct sctp_nets *net = NULL;
4431 	int done_once;
4432 	int rto_ok = 1;
4433 	uint8_t reneged_all = 0;
4434 	uint8_t cmt_dac_flag;
4435 
4436 	/*
4437 	 * we take any chance we can to service our queues since we cannot
4438 	 * get awoken when the socket is read from :<
4439 	 */
4440 	/*
4441 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4442 	 * old sack, if so discard. 2) If there is nothing left in the send
4443 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4444 	 * too, update any rwnd change and verify no timers are running.
4445 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4446 	 * moved process these first and note that it moved. 4) Process any
4447 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4448 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4449 	 * sync up flightsizes and things, stop all timers and also check
4450 	 * for shutdown_pending state. If so then go ahead and send off the
4451 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4452 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4453 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4454 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4455 	 * if in shutdown_recv state.
4456 	 */
4457 	SCTP_TCB_LOCK_ASSERT(stcb);
4458 	/* CMT DAC algo */
4459 	this_sack_lowest_newack = 0;
4460 	SCTP_STAT_INCR(sctps_slowpath_sack);
4461 	last_tsn = cum_ack;
4462 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4463 #ifdef SCTP_ASOCLOG_OF_TSNS
4464 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4465 	stcb->asoc.cumack_log_at++;
4466 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4467 		stcb->asoc.cumack_log_at = 0;
4468 	}
4469 #endif
4470 	a_rwnd = rwnd;
4471 
4472 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4473 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4474 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4475 	}
4476 
4477 	old_rwnd = stcb->asoc.peers_rwnd;
4478 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4479 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4480 		    stcb->asoc.overall_error_count,
4481 		    0,
4482 		    SCTP_FROM_SCTP_INDATA,
4483 		    __LINE__);
4484 	}
4485 	stcb->asoc.overall_error_count = 0;
4486 	asoc = &stcb->asoc;
4487 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4488 		sctp_log_sack(asoc->last_acked_seq,
4489 		    cum_ack,
4490 		    0,
4491 		    num_seg,
4492 		    num_dup,
4493 		    SCTP_LOG_NEW_SACK);
4494 	}
4495 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4496 		uint16_t i;
4497 		uint32_t *dupdata, dblock;
4498 
4499 		for (i = 0; i < num_dup; i++) {
4500 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4501 			    sizeof(uint32_t), (uint8_t *)&dblock);
4502 			if (dupdata == NULL) {
4503 				break;
4504 			}
4505 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4506 		}
4507 	}
4508 	/* reality check */
4509 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4510 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4511 		    sctpchunk_listhead);
4512 		send_s = tp1->rec.data.tsn + 1;
4513 	} else {
4514 		tp1 = NULL;
4515 		send_s = asoc->sending_seq;
4516 	}
4517 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4518 		struct mbuf *op_err;
4519 		char msg[SCTP_DIAG_INFO_LEN];
4520 
4521 		/*
4522 		 * no way, we have not even sent this TSN out yet. Peer is
4523 		 * hopelessly messed up with us.
4524 		 */
4525 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4526 		    cum_ack, send_s);
4527 		if (tp1) {
4528 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4529 			    tp1->rec.data.tsn, (void *)tp1);
4530 		}
4531 hopeless_peer:
4532 		*abort_now = 1;
4533 		/* XXX */
4534 		SCTP_SNPRINTF(msg, sizeof(msg),
4535 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4536 		    cum_ack, send_s);
4537 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4538 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4539 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4540 		return;
4541 	}
4542 	/**********************/
4543 	/* 1) check the range */
4544 	/**********************/
4545 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4546 		/* acking something behind */
4547 		return;
4548 	}
4549 
4550 	/* update the Rwnd of the peer */
4551 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4552 	    TAILQ_EMPTY(&asoc->send_queue) &&
4553 	    (asoc->stream_queue_cnt == 0)) {
4554 		/* nothing left on send/sent and strmq */
4555 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4556 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4557 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4558 		}
4559 		asoc->peers_rwnd = a_rwnd;
4560 		if (asoc->sent_queue_retran_cnt) {
4561 			asoc->sent_queue_retran_cnt = 0;
4562 		}
4563 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4564 			/* SWS sender side engages */
4565 			asoc->peers_rwnd = 0;
4566 		}
4567 		/* stop any timers */
4568 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4569 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4570 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4571 			net->partial_bytes_acked = 0;
4572 			net->flight_size = 0;
4573 		}
4574 		asoc->total_flight = 0;
4575 		asoc->total_flight_count = 0;
4576 		return;
4577 	}
4578 	/*
4579 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4580 	 * things. The total byte count acked is tracked in netAckSz AND
4581 	 * netAck2 is used to track the total bytes acked that are un-
4582 	 * amibguious and were never retransmitted. We track these on a per
4583 	 * destination address basis.
4584 	 */
4585 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4586 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4587 			/* Drag along the window_tsn for cwr's */
4588 			net->cwr_window_tsn = cum_ack;
4589 		}
4590 		net->prev_cwnd = net->cwnd;
4591 		net->net_ack = 0;
4592 		net->net_ack2 = 0;
4593 
4594 		/*
4595 		 * CMT: Reset CUC and Fast recovery algo variables before
4596 		 * SACK processing
4597 		 */
4598 		net->new_pseudo_cumack = 0;
4599 		net->will_exit_fast_recovery = 0;
4600 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4601 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4602 		}
4603 
4604 		/*
4605 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4606 		 * to be greater than the cumack. Also reset saw_newack to 0
4607 		 * for all dests.
4608 		 */
4609 		net->saw_newack = 0;
4610 		net->this_sack_highest_newack = last_tsn;
4611 	}
4612 	/* process the new consecutive TSN first */
4613 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4614 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4615 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4616 				accum_moved = 1;
4617 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4618 					/*
4619 					 * If it is less than ACKED, it is
4620 					 * now no-longer in flight. Higher
4621 					 * values may occur during marking
4622 					 */
4623 					if ((tp1->whoTo->dest_state &
4624 					    SCTP_ADDR_UNCONFIRMED) &&
4625 					    (tp1->snd_count < 2)) {
4626 						/*
4627 						 * If there was no retran
4628 						 * and the address is
4629 						 * un-confirmed and we sent
4630 						 * there and are now
4631 						 * sacked.. its confirmed,
4632 						 * mark it so.
4633 						 */
4634 						tp1->whoTo->dest_state &=
4635 						    ~SCTP_ADDR_UNCONFIRMED;
4636 					}
4637 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4638 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4639 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4640 							    tp1->whoTo->flight_size,
4641 							    tp1->book_size,
4642 							    (uint32_t)(uintptr_t)tp1->whoTo,
4643 							    tp1->rec.data.tsn);
4644 						}
4645 						sctp_flight_size_decrease(tp1);
4646 						sctp_total_flight_decrease(stcb, tp1);
4647 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4648 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4649 							    tp1);
4650 						}
4651 					}
4652 					tp1->whoTo->net_ack += tp1->send_size;
4653 
4654 					/* CMT SFR and DAC algos */
4655 					this_sack_lowest_newack = tp1->rec.data.tsn;
4656 					tp1->whoTo->saw_newack = 1;
4657 
4658 					if (tp1->snd_count < 2) {
4659 						/*
4660 						 * True non-retransmitted
4661 						 * chunk
4662 						 */
4663 						tp1->whoTo->net_ack2 +=
4664 						    tp1->send_size;
4665 
4666 						/* update RTO too? */
4667 						if (tp1->do_rtt) {
4668 							if (rto_ok &&
4669 							    sctp_calculate_rto(stcb,
4670 							    &stcb->asoc,
4671 							    tp1->whoTo,
4672 							    &tp1->sent_rcv_time,
4673 							    SCTP_RTT_FROM_DATA)) {
4674 								rto_ok = 0;
4675 							}
4676 							if (tp1->whoTo->rto_needed == 0) {
4677 								tp1->whoTo->rto_needed = 1;
4678 							}
4679 							tp1->do_rtt = 0;
4680 						}
4681 					}
4682 					/*
4683 					 * CMT: CUCv2 algorithm. From the
4684 					 * cumack'd TSNs, for each TSN being
4685 					 * acked for the first time, set the
4686 					 * following variables for the
4687 					 * corresp destination.
4688 					 * new_pseudo_cumack will trigger a
4689 					 * cwnd update.
4690 					 * find_(rtx_)pseudo_cumack will
4691 					 * trigger search for the next
4692 					 * expected (rtx-)pseudo-cumack.
4693 					 */
4694 					tp1->whoTo->new_pseudo_cumack = 1;
4695 					tp1->whoTo->find_pseudo_cumack = 1;
4696 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4697 
4698 
4699 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4700 						sctp_log_sack(asoc->last_acked_seq,
4701 						    cum_ack,
4702 						    tp1->rec.data.tsn,
4703 						    0,
4704 						    0,
4705 						    SCTP_LOG_TSN_ACKED);
4706 					}
4707 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4708 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4709 					}
4710 				}
4711 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4712 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4713 #ifdef SCTP_AUDITING_ENABLED
4714 					sctp_audit_log(0xB3,
4715 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4716 #endif
4717 				}
4718 				if (tp1->rec.data.chunk_was_revoked) {
4719 					/* deflate the cwnd */
4720 					tp1->whoTo->cwnd -= tp1->book_size;
4721 					tp1->rec.data.chunk_was_revoked = 0;
4722 				}
4723 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4724 					tp1->sent = SCTP_DATAGRAM_ACKED;
4725 				}
4726 			}
4727 		} else {
4728 			break;
4729 		}
4730 	}
4731 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4732 	/* always set this up to cum-ack */
4733 	asoc->this_sack_highest_gap = last_tsn;
4734 
4735 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4736 
4737 		/*
4738 		 * thisSackHighestGap will increase while handling NEW
4739 		 * segments this_sack_highest_newack will increase while
4740 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4741 		 * used for CMT DAC algo. saw_newack will also change.
4742 		 */
4743 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4744 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4745 		    num_seg, num_nr_seg, &rto_ok)) {
4746 			wake_him++;
4747 		}
4748 		/*
4749 		 * validate the biggest_tsn_acked in the gap acks if strict
4750 		 * adherence is wanted.
4751 		 */
4752 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4753 			/*
4754 			 * peer is either confused or we are under attack.
4755 			 * We must abort.
4756 			 */
4757 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4758 			    biggest_tsn_acked, send_s);
4759 			goto hopeless_peer;
4760 		}
4761 	}
4762 	/*******************************************/
4763 	/* cancel ALL T3-send timer if accum moved */
4764 	/*******************************************/
4765 	if (asoc->sctp_cmt_on_off > 0) {
4766 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4767 			if (net->new_pseudo_cumack)
4768 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4769 				    stcb, net,
4770 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4771 
4772 		}
4773 	} else {
4774 		if (accum_moved) {
4775 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4776 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4777 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4778 			}
4779 		}
4780 	}
4781 	/********************************************/
4782 	/* drop the acked chunks from the sentqueue */
4783 	/********************************************/
4784 	asoc->last_acked_seq = cum_ack;
4785 
4786 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4787 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4788 			break;
4789 		}
4790 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4791 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4792 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4793 #ifdef INVARIANTS
4794 			} else {
4795 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4796 #endif
4797 			}
4798 		}
4799 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4800 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4801 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4802 			asoc->trigger_reset = 1;
4803 		}
4804 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4805 		if (PR_SCTP_ENABLED(tp1->flags)) {
4806 			if (asoc->pr_sctp_cnt != 0)
4807 				asoc->pr_sctp_cnt--;
4808 		}
4809 		asoc->sent_queue_cnt--;
4810 		if (tp1->data) {
4811 			/* sa_ignore NO_NULL_CHK */
4812 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4813 			sctp_m_freem(tp1->data);
4814 			tp1->data = NULL;
4815 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4816 				asoc->sent_queue_cnt_removeable--;
4817 			}
4818 		}
4819 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4820 			sctp_log_sack(asoc->last_acked_seq,
4821 			    cum_ack,
4822 			    tp1->rec.data.tsn,
4823 			    0,
4824 			    0,
4825 			    SCTP_LOG_FREE_SENT);
4826 		}
4827 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4828 		wake_him++;
4829 	}
4830 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4831 #ifdef INVARIANTS
4832 		panic("Warning flight size is positive and should be 0");
4833 #else
4834 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4835 		    asoc->total_flight);
4836 #endif
4837 		asoc->total_flight = 0;
4838 	}
4839 
4840 	/* sa_ignore NO_NULL_CHK */
4841 	if ((wake_him) && (stcb->sctp_socket)) {
4842 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4843 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4844 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4845 		}
4846 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4847 	} else {
4848 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4849 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4850 		}
4851 	}
4852 
4853 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4854 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4855 			/* Setup so we will exit RFC2582 fast recovery */
4856 			will_exit_fast_recovery = 1;
4857 		}
4858 	}
4859 	/*
4860 	 * Check for revoked fragments:
4861 	 *
4862 	 * if Previous sack - Had no frags then we can't have any revoked if
4863 	 * Previous sack - Had frag's then - If we now have frags aka
4864 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4865 	 * some of them. else - The peer revoked all ACKED fragments, since
4866 	 * we had some before and now we have NONE.
4867 	 */
4868 
4869 	if (num_seg) {
4870 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4871 		asoc->saw_sack_with_frags = 1;
4872 	} else if (asoc->saw_sack_with_frags) {
4873 		int cnt_revoked = 0;
4874 
4875 		/* Peer revoked all dg's marked or acked */
4876 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4877 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4878 				tp1->sent = SCTP_DATAGRAM_SENT;
4879 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4880 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4881 					    tp1->whoTo->flight_size,
4882 					    tp1->book_size,
4883 					    (uint32_t)(uintptr_t)tp1->whoTo,
4884 					    tp1->rec.data.tsn);
4885 				}
4886 				sctp_flight_size_increase(tp1);
4887 				sctp_total_flight_increase(stcb, tp1);
4888 				tp1->rec.data.chunk_was_revoked = 1;
4889 				/*
4890 				 * To ensure that this increase in
4891 				 * flightsize, which is artificial, does not
4892 				 * throttle the sender, we also increase the
4893 				 * cwnd artificially.
4894 				 */
4895 				tp1->whoTo->cwnd += tp1->book_size;
4896 				cnt_revoked++;
4897 			}
4898 		}
4899 		if (cnt_revoked) {
4900 			reneged_all = 1;
4901 		}
4902 		asoc->saw_sack_with_frags = 0;
4903 	}
4904 	if (num_nr_seg > 0)
4905 		asoc->saw_sack_with_nr_frags = 1;
4906 	else
4907 		asoc->saw_sack_with_nr_frags = 0;
4908 
4909 	/* JRS - Use the congestion control given in the CC module */
4910 	if (ecne_seen == 0) {
4911 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4912 			if (net->net_ack2 > 0) {
4913 				/*
4914 				 * Karn's rule applies to clearing error
4915 				 * count, this is optional.
4916 				 */
4917 				net->error_count = 0;
4918 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4919 					/* addr came good */
4920 					net->dest_state |= SCTP_ADDR_REACHABLE;
4921 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4922 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4923 				}
4924 
4925 				if (net == stcb->asoc.primary_destination) {
4926 					if (stcb->asoc.alternate) {
4927 						/*
4928 						 * release the alternate,
4929 						 * primary is good
4930 						 */
4931 						sctp_free_remote_addr(stcb->asoc.alternate);
4932 						stcb->asoc.alternate = NULL;
4933 					}
4934 				}
4935 
4936 				if (net->dest_state & SCTP_ADDR_PF) {
4937 					net->dest_state &= ~SCTP_ADDR_PF;
4938 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4939 					    stcb->sctp_ep, stcb, net,
4940 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4941 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4942 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4943 					/* Done with this net */
4944 					net->net_ack = 0;
4945 				}
4946 				/* restore any doubled timers */
4947 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4948 				if (net->RTO < stcb->asoc.minrto) {
4949 					net->RTO = stcb->asoc.minrto;
4950 				}
4951 				if (net->RTO > stcb->asoc.maxrto) {
4952 					net->RTO = stcb->asoc.maxrto;
4953 				}
4954 			}
4955 		}
4956 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4957 	}
4958 
4959 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4960 		/* nothing left in-flight */
4961 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4962 			/* stop all timers */
4963 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4964 			    stcb, net,
4965 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4966 			net->flight_size = 0;
4967 			net->partial_bytes_acked = 0;
4968 		}
4969 		asoc->total_flight = 0;
4970 		asoc->total_flight_count = 0;
4971 	}
4972 
4973 	/**********************************/
4974 	/* Now what about shutdown issues */
4975 	/**********************************/
4976 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4977 		/* nothing left on sendqueue.. consider done */
4978 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4979 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4980 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4981 		}
4982 		asoc->peers_rwnd = a_rwnd;
4983 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4984 			/* SWS sender side engages */
4985 			asoc->peers_rwnd = 0;
4986 		}
4987 		/* clean up */
4988 		if ((asoc->stream_queue_cnt == 1) &&
4989 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4990 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4991 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4992 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4993 		}
4994 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4995 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4996 		    (asoc->stream_queue_cnt == 1) &&
4997 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4998 			struct mbuf *op_err;
4999 
5000 			*abort_now = 1;
5001 			/* XXX */
5002 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5003 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5004 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5005 			return;
5006 		}
5007 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5008 		    (asoc->stream_queue_cnt == 0)) {
5009 			struct sctp_nets *netp;
5010 
5011 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5012 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5013 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5014 			}
5015 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5016 			sctp_stop_timers_for_shutdown(stcb);
5017 			if (asoc->alternate) {
5018 				netp = asoc->alternate;
5019 			} else {
5020 				netp = asoc->primary_destination;
5021 			}
5022 			sctp_send_shutdown(stcb, netp);
5023 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5024 			    stcb->sctp_ep, stcb, netp);
5025 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5026 			    stcb->sctp_ep, stcb, NULL);
5027 			return;
5028 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5029 		    (asoc->stream_queue_cnt == 0)) {
5030 			struct sctp_nets *netp;
5031 
5032 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5033 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5034 			sctp_stop_timers_for_shutdown(stcb);
5035 			if (asoc->alternate) {
5036 				netp = asoc->alternate;
5037 			} else {
5038 				netp = asoc->primary_destination;
5039 			}
5040 			sctp_send_shutdown_ack(stcb, netp);
5041 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5042 			    stcb->sctp_ep, stcb, netp);
5043 			return;
5044 		}
5045 	}
5046 	/*
5047 	 * Now here we are going to recycle net_ack for a different use...
5048 	 * HEADS UP.
5049 	 */
5050 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5051 		net->net_ack = 0;
5052 	}
5053 
5054 	/*
5055 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5056 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5057 	 * automatically ensure that.
5058 	 */
5059 	if ((asoc->sctp_cmt_on_off > 0) &&
5060 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5061 	    (cmt_dac_flag == 0)) {
5062 		this_sack_lowest_newack = cum_ack;
5063 	}
5064 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5065 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5066 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5067 	}
5068 	/* JRS - Use the congestion control given in the CC module */
5069 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5070 
5071 	/* Now are we exiting loss recovery ? */
5072 	if (will_exit_fast_recovery) {
5073 		/* Ok, we must exit fast recovery */
5074 		asoc->fast_retran_loss_recovery = 0;
5075 	}
5076 	if ((asoc->sat_t3_loss_recovery) &&
5077 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5078 		/* end satellite t3 loss recovery */
5079 		asoc->sat_t3_loss_recovery = 0;
5080 	}
5081 	/*
5082 	 * CMT Fast recovery
5083 	 */
5084 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5085 		if (net->will_exit_fast_recovery) {
5086 			/* Ok, we must exit fast recovery */
5087 			net->fast_retran_loss_recovery = 0;
5088 		}
5089 	}
5090 
5091 	/* Adjust and set the new rwnd value */
5092 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5093 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5094 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5095 	}
5096 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5097 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5098 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5099 		/* SWS sender side engages */
5100 		asoc->peers_rwnd = 0;
5101 	}
5102 	if (asoc->peers_rwnd > old_rwnd) {
5103 		win_probe_recovery = 1;
5104 	}
5105 
5106 	/*
5107 	 * Now we must setup so we have a timer up for anyone with
5108 	 * outstanding data.
5109 	 */
5110 	done_once = 0;
5111 again:
5112 	j = 0;
5113 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5114 		if (win_probe_recovery && (net->window_probe)) {
5115 			win_probe_recovered = 1;
5116 			/*-
5117 			 * Find first chunk that was used with
5118 			 * window probe and clear the event. Put
5119 			 * it back into the send queue as if has
5120 			 * not been sent.
5121 			 */
5122 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5123 				if (tp1->window_probe) {
5124 					sctp_window_probe_recovery(stcb, asoc, tp1);
5125 					break;
5126 				}
5127 			}
5128 		}
5129 		if (net->flight_size) {
5130 			j++;
5131 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5132 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5133 				    stcb->sctp_ep, stcb, net);
5134 			}
5135 			if (net->window_probe) {
5136 				net->window_probe = 0;
5137 			}
5138 		} else {
5139 			if (net->window_probe) {
5140 				/*
5141 				 * In window probes we must assure a timer
5142 				 * is still running there
5143 				 */
5144 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5145 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5146 					    stcb->sctp_ep, stcb, net);
5147 
5148 				}
5149 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5150 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5151 				    stcb, net,
5152 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5153 			}
5154 		}
5155 	}
5156 	if ((j == 0) &&
5157 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5158 	    (asoc->sent_queue_retran_cnt == 0) &&
5159 	    (win_probe_recovered == 0) &&
5160 	    (done_once == 0)) {
5161 		/*
5162 		 * huh, this should not happen unless all packets are
5163 		 * PR-SCTP and marked to skip of course.
5164 		 */
5165 		if (sctp_fs_audit(asoc)) {
5166 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5167 				net->flight_size = 0;
5168 			}
5169 			asoc->total_flight = 0;
5170 			asoc->total_flight_count = 0;
5171 			asoc->sent_queue_retran_cnt = 0;
5172 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5173 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5174 					sctp_flight_size_increase(tp1);
5175 					sctp_total_flight_increase(stcb, tp1);
5176 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5177 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5178 				}
5179 			}
5180 		}
5181 		done_once = 1;
5182 		goto again;
5183 	}
5184 	/*********************************************/
5185 	/* Here we perform PR-SCTP procedures        */
5186 	/* (section 4.2)                             */
5187 	/*********************************************/
5188 	/* C1. update advancedPeerAckPoint */
5189 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5190 		asoc->advanced_peer_ack_point = cum_ack;
5191 	}
5192 	/* C2. try to further move advancedPeerAckPoint ahead */
5193 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5194 		struct sctp_tmit_chunk *lchk;
5195 		uint32_t old_adv_peer_ack_point;
5196 
5197 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5198 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5199 		/* C3. See if we need to send a Fwd-TSN */
5200 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5201 			/*
5202 			 * ISSUE with ECN, see FWD-TSN processing.
5203 			 */
5204 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5205 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5206 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5207 				    old_adv_peer_ack_point);
5208 			}
5209 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5210 				send_forward_tsn(stcb, asoc);
5211 			} else if (lchk) {
5212 				/* try to FR fwd-tsn's that get lost too */
5213 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5214 					send_forward_tsn(stcb, asoc);
5215 				}
5216 			}
5217 		}
5218 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5219 			if (lchk->whoTo != NULL) {
5220 				break;
5221 			}
5222 		}
5223 		if (lchk != NULL) {
5224 			/* Assure a timer is up */
5225 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5226 			    stcb->sctp_ep, stcb, lchk->whoTo);
5227 		}
5228 	}
5229 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5230 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5231 		    a_rwnd,
5232 		    stcb->asoc.peers_rwnd,
5233 		    stcb->asoc.total_flight,
5234 		    stcb->asoc.total_output_queue_size);
5235 	}
5236 }
5237 
5238 void
5239 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5240 {
5241 	/* Copy cum-ack */
5242 	uint32_t cum_ack, a_rwnd;
5243 
5244 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5245 	/* Arrange so a_rwnd does NOT change */
5246 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5247 
5248 	/* Now call the express sack handling */
5249 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5250 }
5251 
5252 static void
5253 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5254     struct sctp_stream_in *strmin)
5255 {
5256 	struct sctp_queued_to_read *control, *ncontrol;
5257 	struct sctp_association *asoc;
5258 	uint32_t mid;
5259 	int need_reasm_check = 0;
5260 
5261 	asoc = &stcb->asoc;
5262 	mid = strmin->last_mid_delivered;
5263 	/*
5264 	 * First deliver anything prior to and including the stream no that
5265 	 * came in.
5266 	 */
5267 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5268 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5269 			/* this is deliverable now */
5270 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5271 				if (control->on_strm_q) {
5272 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5273 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5274 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5275 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5276 #ifdef INVARIANTS
5277 					} else {
5278 						panic("strmin: %p ctl: %p unknown %d",
5279 						    strmin, control, control->on_strm_q);
5280 #endif
5281 					}
5282 					control->on_strm_q = 0;
5283 				}
5284 				/* subtract pending on streams */
5285 				if (asoc->size_on_all_streams >= control->length) {
5286 					asoc->size_on_all_streams -= control->length;
5287 				} else {
5288 #ifdef INVARIANTS
5289 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5290 #else
5291 					asoc->size_on_all_streams = 0;
5292 #endif
5293 				}
5294 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5295 				/* deliver it to at least the delivery-q */
5296 				if (stcb->sctp_socket) {
5297 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5298 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5299 					    control,
5300 					    &stcb->sctp_socket->so_rcv,
5301 					    1, SCTP_READ_LOCK_HELD,
5302 					    SCTP_SO_NOT_LOCKED);
5303 				}
5304 			} else {
5305 				/* Its a fragmented message */
5306 				if (control->first_frag_seen) {
5307 					/*
5308 					 * Make it so this is next to
5309 					 * deliver, we restore later
5310 					 */
5311 					strmin->last_mid_delivered = control->mid - 1;
5312 					need_reasm_check = 1;
5313 					break;
5314 				}
5315 			}
5316 		} else {
5317 			/* no more delivery now. */
5318 			break;
5319 		}
5320 	}
5321 	if (need_reasm_check) {
5322 		int ret;
5323 
5324 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5325 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5326 			/* Restore the next to deliver unless we are ahead */
5327 			strmin->last_mid_delivered = mid;
5328 		}
5329 		if (ret == 0) {
5330 			/* Left the front Partial one on */
5331 			return;
5332 		}
5333 		need_reasm_check = 0;
5334 	}
5335 	/*
5336 	 * now we must deliver things in queue the normal way  if any are
5337 	 * now ready.
5338 	 */
5339 	mid = strmin->last_mid_delivered + 1;
5340 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5341 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5342 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5343 				/* this is deliverable now */
5344 				if (control->on_strm_q) {
5345 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5346 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5347 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5348 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5349 #ifdef INVARIANTS
5350 					} else {
5351 						panic("strmin: %p ctl: %p unknown %d",
5352 						    strmin, control, control->on_strm_q);
5353 #endif
5354 					}
5355 					control->on_strm_q = 0;
5356 				}
5357 				/* subtract pending on streams */
5358 				if (asoc->size_on_all_streams >= control->length) {
5359 					asoc->size_on_all_streams -= control->length;
5360 				} else {
5361 #ifdef INVARIANTS
5362 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5363 #else
5364 					asoc->size_on_all_streams = 0;
5365 #endif
5366 				}
5367 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5368 				/* deliver it to at least the delivery-q */
5369 				strmin->last_mid_delivered = control->mid;
5370 				if (stcb->sctp_socket) {
5371 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5372 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5373 					    control,
5374 					    &stcb->sctp_socket->so_rcv, 1,
5375 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5376 
5377 				}
5378 				mid = strmin->last_mid_delivered + 1;
5379 			} else {
5380 				/* Its a fragmented message */
5381 				if (control->first_frag_seen) {
5382 					/*
5383 					 * Make it so this is next to
5384 					 * deliver
5385 					 */
5386 					strmin->last_mid_delivered = control->mid - 1;
5387 					need_reasm_check = 1;
5388 					break;
5389 				}
5390 			}
5391 		} else {
5392 			break;
5393 		}
5394 	}
5395 	if (need_reasm_check) {
5396 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5397 	}
5398 }
5399 
5400 
5401 
5402 static void
5403 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5404     struct sctp_association *asoc,
5405     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5406 {
5407 	struct sctp_queued_to_read *control;
5408 	struct sctp_stream_in *strm;
5409 	struct sctp_tmit_chunk *chk, *nchk;
5410 	int cnt_removed = 0;
5411 
5412 	/*
5413 	 * For now large messages held on the stream reasm that are complete
5414 	 * will be tossed too. We could in theory do more work to spin
5415 	 * through and stop after dumping one msg aka seeing the start of a
5416 	 * new msg at the head, and call the delivery function... to see if
5417 	 * it can be delivered... But for now we just dump everything on the
5418 	 * queue.
5419 	 */
5420 	strm = &asoc->strmin[stream];
5421 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5422 	if (control == NULL) {
5423 		/* Not found */
5424 		return;
5425 	}
5426 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5427 		return;
5428 	}
5429 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5430 		/* Purge hanging chunks */
5431 		if (!asoc->idata_supported && (ordered == 0)) {
5432 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5433 				break;
5434 			}
5435 		}
5436 		cnt_removed++;
5437 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5438 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5439 			asoc->size_on_reasm_queue -= chk->send_size;
5440 		} else {
5441 #ifdef INVARIANTS
5442 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5443 #else
5444 			asoc->size_on_reasm_queue = 0;
5445 #endif
5446 		}
5447 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5448 		if (chk->data) {
5449 			sctp_m_freem(chk->data);
5450 			chk->data = NULL;
5451 		}
5452 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5453 	}
5454 	if (!TAILQ_EMPTY(&control->reasm)) {
5455 		/* This has to be old data, unordered */
5456 		if (control->data) {
5457 			sctp_m_freem(control->data);
5458 			control->data = NULL;
5459 		}
5460 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5461 		chk = TAILQ_FIRST(&control->reasm);
5462 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5463 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5464 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5465 			    chk, SCTP_READ_LOCK_HELD);
5466 		}
5467 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5468 		return;
5469 	}
5470 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5471 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5472 		if (asoc->size_on_all_streams >= control->length) {
5473 			asoc->size_on_all_streams -= control->length;
5474 		} else {
5475 #ifdef INVARIANTS
5476 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5477 #else
5478 			asoc->size_on_all_streams = 0;
5479 #endif
5480 		}
5481 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5482 		control->on_strm_q = 0;
5483 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5484 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5485 		control->on_strm_q = 0;
5486 #ifdef INVARIANTS
5487 	} else if (control->on_strm_q) {
5488 		panic("strm: %p ctl: %p unknown %d",
5489 		    strm, control, control->on_strm_q);
5490 #endif
5491 	}
5492 	control->on_strm_q = 0;
5493 	if (control->on_read_q == 0) {
5494 		sctp_free_remote_addr(control->whoFrom);
5495 		if (control->data) {
5496 			sctp_m_freem(control->data);
5497 			control->data = NULL;
5498 		}
5499 		sctp_free_a_readq(stcb, control);
5500 	}
5501 }
5502 
5503 void
5504 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5505     struct sctp_forward_tsn_chunk *fwd,
5506     int *abort_flag, struct mbuf *m, int offset)
5507 {
5508 	/* The pr-sctp fwd tsn */
5509 	/*
5510 	 * here we will perform all the data receiver side steps for
5511 	 * processing FwdTSN, as required in by pr-sctp draft:
5512 	 *
5513 	 * Assume we get FwdTSN(x):
5514 	 *
5515 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5516 	 * + others we have 3) examine and update re-ordering queue on
5517 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5518 	 * report where we are.
5519 	 */
5520 	struct sctp_association *asoc;
5521 	uint32_t new_cum_tsn, gap;
5522 	unsigned int i, fwd_sz, m_size;
5523 	uint32_t str_seq;
5524 	struct sctp_stream_in *strm;
5525 	struct sctp_queued_to_read *control, *sv;
5526 
5527 	asoc = &stcb->asoc;
5528 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5529 		SCTPDBG(SCTP_DEBUG_INDATA1,
5530 		    "Bad size too small/big fwd-tsn\n");
5531 		return;
5532 	}
5533 	m_size = (stcb->asoc.mapping_array_size << 3);
5534 	/*************************************************************/
5535 	/* 1. Here we update local cumTSN and shift the bitmap array */
5536 	/*************************************************************/
5537 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5538 
5539 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5540 		/* Already got there ... */
5541 		return;
5542 	}
5543 	/*
5544 	 * now we know the new TSN is more advanced, let's find the actual
5545 	 * gap
5546 	 */
5547 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5548 	asoc->cumulative_tsn = new_cum_tsn;
5549 	if (gap >= m_size) {
5550 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5551 			struct mbuf *op_err;
5552 			char msg[SCTP_DIAG_INFO_LEN];
5553 
5554 			/*
5555 			 * out of range (of single byte chunks in the rwnd I
5556 			 * give out). This must be an attacker.
5557 			 */
5558 			*abort_flag = 1;
5559 			SCTP_SNPRINTF(msg, sizeof(msg),
5560 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5561 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5562 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5563 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5564 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5565 			return;
5566 		}
5567 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5568 
5569 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5570 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5571 		asoc->highest_tsn_inside_map = new_cum_tsn;
5572 
5573 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5574 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5575 
5576 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5577 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5578 		}
5579 	} else {
5580 		SCTP_TCB_LOCK_ASSERT(stcb);
5581 		for (i = 0; i <= gap; i++) {
5582 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5583 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5584 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5585 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5586 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5587 				}
5588 			}
5589 		}
5590 	}
5591 	/*************************************************************/
5592 	/* 2. Clear up re-assembly queue                             */
5593 	/*************************************************************/
5594 
5595 	/* This is now done as part of clearing up the stream/seq */
5596 	if (asoc->idata_supported == 0) {
5597 		uint16_t sid;
5598 
5599 		/* Flush all the un-ordered data based on cum-tsn */
5600 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5601 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5602 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5603 		}
5604 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5605 	}
5606 	/*******************************************************/
5607 	/* 3. Update the PR-stream re-ordering queues and fix  */
5608 	/* delivery issues as needed.                       */
5609 	/*******************************************************/
5610 	fwd_sz -= sizeof(*fwd);
5611 	if (m && fwd_sz) {
5612 		/* New method. */
5613 		unsigned int num_str;
5614 		uint32_t mid, cur_mid;
5615 		uint16_t sid;
5616 		uint16_t ordered, flags;
5617 		struct sctp_strseq *stseq, strseqbuf;
5618 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5619 
5620 		offset += sizeof(*fwd);
5621 
5622 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5623 		if (asoc->idata_supported) {
5624 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5625 		} else {
5626 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5627 		}
5628 		for (i = 0; i < num_str; i++) {
5629 			if (asoc->idata_supported) {
5630 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5631 				    sizeof(struct sctp_strseq_mid),
5632 				    (uint8_t *)&strseqbuf_m);
5633 				offset += sizeof(struct sctp_strseq_mid);
5634 				if (stseq_m == NULL) {
5635 					break;
5636 				}
5637 				sid = ntohs(stseq_m->sid);
5638 				mid = ntohl(stseq_m->mid);
5639 				flags = ntohs(stseq_m->flags);
5640 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5641 					ordered = 0;
5642 				} else {
5643 					ordered = 1;
5644 				}
5645 			} else {
5646 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5647 				    sizeof(struct sctp_strseq),
5648 				    (uint8_t *)&strseqbuf);
5649 				offset += sizeof(struct sctp_strseq);
5650 				if (stseq == NULL) {
5651 					break;
5652 				}
5653 				sid = ntohs(stseq->sid);
5654 				mid = (uint32_t)ntohs(stseq->ssn);
5655 				ordered = 1;
5656 			}
5657 			/* Convert */
5658 
5659 			/* now process */
5660 
5661 			/*
5662 			 * Ok we now look for the stream/seq on the read
5663 			 * queue where its not all delivered. If we find it
5664 			 * we transmute the read entry into a PDI_ABORTED.
5665 			 */
5666 			if (sid >= asoc->streamincnt) {
5667 				/* screwed up streams, stop!  */
5668 				break;
5669 			}
5670 			if ((asoc->str_of_pdapi == sid) &&
5671 			    (asoc->ssn_of_pdapi == mid)) {
5672 				/*
5673 				 * If this is the one we were partially
5674 				 * delivering now then we no longer are.
5675 				 * Note this will change with the reassembly
5676 				 * re-write.
5677 				 */
5678 				asoc->fragmented_delivery_inprogress = 0;
5679 			}
5680 			strm = &asoc->strmin[sid];
5681 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5682 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5683 			}
5684 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5685 				if ((control->sinfo_stream == sid) &&
5686 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5687 					str_seq = (sid << 16) | (0x0000ffff & mid);
5688 					control->pdapi_aborted = 1;
5689 					sv = stcb->asoc.control_pdapi;
5690 					control->end_added = 1;
5691 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5692 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5693 						if (asoc->size_on_all_streams >= control->length) {
5694 							asoc->size_on_all_streams -= control->length;
5695 						} else {
5696 #ifdef INVARIANTS
5697 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5698 #else
5699 							asoc->size_on_all_streams = 0;
5700 #endif
5701 						}
5702 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5703 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5704 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5705 #ifdef INVARIANTS
5706 					} else if (control->on_strm_q) {
5707 						panic("strm: %p ctl: %p unknown %d",
5708 						    strm, control, control->on_strm_q);
5709 #endif
5710 					}
5711 					control->on_strm_q = 0;
5712 					stcb->asoc.control_pdapi = control;
5713 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5714 					    stcb,
5715 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5716 					    (void *)&str_seq,
5717 					    SCTP_SO_NOT_LOCKED);
5718 					stcb->asoc.control_pdapi = sv;
5719 					break;
5720 				} else if ((control->sinfo_stream == sid) &&
5721 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5722 					/* We are past our victim SSN */
5723 					break;
5724 				}
5725 			}
5726 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5727 				/* Update the sequence number */
5728 				strm->last_mid_delivered = mid;
5729 			}
5730 			/* now kick the stream the new way */
5731 			/* sa_ignore NO_NULL_CHK */
5732 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5733 		}
5734 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5735 	}
5736 	/*
5737 	 * Now slide thing forward.
5738 	 */
5739 	sctp_slide_mapping_arrays(stcb);
5740 }
5741