xref: /freebsd/sys/netinet/sctp_indata.c (revision b4af4f93c682e445bf159f0d1ec90b636296c946)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int hold_rlock);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		SCTP_SNPRINTF(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid);
444 	} else {
445 		SCTP_SNPRINTF(msg, sizeof(msg),
446 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
447 		    opspot,
448 		    control->fsn_included,
449 		    chk->rec.data.tsn,
450 		    chk->rec.data.sid,
451 		    chk->rec.data.fsn,
452 		    (uint16_t)chk->rec.data.mid);
453 	}
454 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 	sctp_m_freem(chk->data);
456 	chk->data = NULL;
457 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 	*abort_flag = 1;
461 }
462 
463 static void
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
465 {
466 	/*
467 	 * The control could not be placed and must be cleaned.
468 	 */
469 	struct sctp_tmit_chunk *chk, *nchk;
470 
471 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
473 		if (chk->data)
474 			sctp_m_freem(chk->data);
475 		chk->data = NULL;
476 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 	}
478 	sctp_free_remote_addr(control->whoFrom);
479 	if (control->data) {
480 		sctp_m_freem(control->data);
481 		control->data = NULL;
482 	}
483 	sctp_free_a_readq(stcb, control);
484 }
485 
486 /*
487  * Queue the chunk either right into the socket buffer if it is the next one
488  * to go OR put it in the correct place in the delivery queue.  If we do
489  * append to the so_buf, keep doing so until we are out of order as
490  * long as the control's entered are non-fragmented.
491  */
492 static void
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494     struct sctp_association *asoc,
495     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
496 {
497 	/*
498 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 	 * all the data in one stream this could happen quite rapidly. One
500 	 * could use the TSN to keep track of things, but this scheme breaks
501 	 * down in the other type of stream usage that could occur. Send a
502 	 * single msg to stream 0, send 4Billion messages to stream 1, now
503 	 * send a message to stream 0. You have a situation where the TSN
504 	 * has wrapped but not in the stream. Is this worth worrying about
505 	 * or should we just change our queue sort at the bottom to be by
506 	 * TSN.
507 	 *
508 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 	 * assignment this could happen... and I don't see how this would be
511 	 * a violation. So for now I am undecided an will leave the sort by
512 	 * SSN alone. Maybe a hybred approach is the answer
513 	 *
514 	 */
515 	struct sctp_queued_to_read *at;
516 	int queue_needed;
517 	uint32_t nxt_todel;
518 	struct mbuf *op_err;
519 	struct sctp_stream_in *strm;
520 	char msg[SCTP_DIAG_INFO_LEN];
521 
522 	strm = &asoc->strmin[control->sinfo_stream];
523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
525 	}
526 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 		/* The incoming sseq is behind where we last delivered? */
528 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 		    strm->last_mid_delivered, control->mid);
530 		/*
531 		 * throw it in the stream so it gets cleaned up in
532 		 * association destruction
533 		 */
534 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 		if (asoc->idata_supported) {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 			    strm->last_mid_delivered, control->sinfo_tsn,
538 			    control->sinfo_stream, control->mid);
539 		} else {
540 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 			    (uint16_t)strm->last_mid_delivered,
542 			    control->sinfo_tsn,
543 			    control->sinfo_stream,
544 			    (uint16_t)control->mid);
545 		}
546 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 		*abort_flag = 1;
550 		return;
551 
552 	}
553 	queue_needed = 1;
554 	asoc->size_on_all_streams += control->length;
555 	sctp_ucount_incr(asoc->cnt_on_all_streams);
556 	nxt_todel = strm->last_mid_delivered + 1;
557 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 		/* can be delivered right away? */
559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
560 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
561 		}
562 		/* EY it wont be queued if it could be delivered directly */
563 		queue_needed = 0;
564 		if (asoc->size_on_all_streams >= control->length) {
565 			asoc->size_on_all_streams -= control->length;
566 		} else {
567 #ifdef INVARIANTS
568 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
569 #else
570 			asoc->size_on_all_streams = 0;
571 #endif
572 		}
573 		sctp_ucount_decr(asoc->cnt_on_all_streams);
574 		strm->last_mid_delivered++;
575 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
576 		sctp_add_to_readq(stcb->sctp_ep, stcb,
577 		    control,
578 		    &stcb->sctp_socket->so_rcv, 1,
579 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
580 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
581 			/* all delivered */
582 			nxt_todel = strm->last_mid_delivered + 1;
583 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
584 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
585 				if (control->on_strm_q == SCTP_ON_ORDERED) {
586 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
587 					if (asoc->size_on_all_streams >= control->length) {
588 						asoc->size_on_all_streams -= control->length;
589 					} else {
590 #ifdef INVARIANTS
591 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
592 #else
593 						asoc->size_on_all_streams = 0;
594 #endif
595 					}
596 					sctp_ucount_decr(asoc->cnt_on_all_streams);
597 #ifdef INVARIANTS
598 				} else {
599 					panic("Huh control: %p is on_strm_q: %d",
600 					    control, control->on_strm_q);
601 #endif
602 				}
603 				control->on_strm_q = 0;
604 				strm->last_mid_delivered++;
605 				/*
606 				 * We ignore the return of deliver_data here
607 				 * since we always can hold the chunk on the
608 				 * d-queue. And we have a finite number that
609 				 * can be delivered from the strq.
610 				 */
611 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
612 					sctp_log_strm_del(control, NULL,
613 					    SCTP_STR_LOG_FROM_IMMED_DEL);
614 				}
615 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
616 				sctp_add_to_readq(stcb->sctp_ep, stcb,
617 				    control,
618 				    &stcb->sctp_socket->so_rcv, 1,
619 				    SCTP_READ_LOCK_NOT_HELD,
620 				    SCTP_SO_LOCKED);
621 				continue;
622 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
623 				*need_reasm = 1;
624 			}
625 			break;
626 		}
627 	}
628 	if (queue_needed) {
629 		/*
630 		 * Ok, we did not deliver this guy, find the correct place
631 		 * to put it on the queue.
632 		 */
633 		if (sctp_place_control_in_stream(strm, asoc, control)) {
634 			SCTP_SNPRINTF(msg, sizeof(msg),
635 			    "Queue to str MID: %u duplicate", control->mid);
636 			sctp_clean_up_control(stcb, control);
637 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
638 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
639 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
640 			*abort_flag = 1;
641 		}
642 	}
643 }
644 
645 
646 static void
647 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
648 {
649 	struct mbuf *m, *prev = NULL;
650 	struct sctp_tcb *stcb;
651 
652 	stcb = control->stcb;
653 	control->held_length = 0;
654 	control->length = 0;
655 	m = control->data;
656 	while (m) {
657 		if (SCTP_BUF_LEN(m) == 0) {
658 			/* Skip mbufs with NO length */
659 			if (prev == NULL) {
660 				/* First one */
661 				control->data = sctp_m_free(m);
662 				m = control->data;
663 			} else {
664 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
665 				m = SCTP_BUF_NEXT(prev);
666 			}
667 			if (m == NULL) {
668 				control->tail_mbuf = prev;
669 			}
670 			continue;
671 		}
672 		prev = m;
673 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
674 		if (control->on_read_q) {
675 			/*
676 			 * On read queue so we must increment the SB stuff,
677 			 * we assume caller has done any locks of SB.
678 			 */
679 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
680 		}
681 		m = SCTP_BUF_NEXT(m);
682 	}
683 	if (prev) {
684 		control->tail_mbuf = prev;
685 	}
686 }
687 
688 static void
689 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
690 {
691 	struct mbuf *prev = NULL;
692 	struct sctp_tcb *stcb;
693 
694 	stcb = control->stcb;
695 	if (stcb == NULL) {
696 #ifdef INVARIANTS
697 		panic("Control broken");
698 #else
699 		return;
700 #endif
701 	}
702 	if (control->tail_mbuf == NULL) {
703 		/* TSNH */
704 		sctp_m_freem(control->data);
705 		control->data = m;
706 		sctp_setup_tail_pointer(control);
707 		return;
708 	}
709 	control->tail_mbuf->m_next = m;
710 	while (m) {
711 		if (SCTP_BUF_LEN(m) == 0) {
712 			/* Skip mbufs with NO length */
713 			if (prev == NULL) {
714 				/* First one */
715 				control->tail_mbuf->m_next = sctp_m_free(m);
716 				m = control->tail_mbuf->m_next;
717 			} else {
718 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
719 				m = SCTP_BUF_NEXT(prev);
720 			}
721 			if (m == NULL) {
722 				control->tail_mbuf = prev;
723 			}
724 			continue;
725 		}
726 		prev = m;
727 		if (control->on_read_q) {
728 			/*
729 			 * On read queue so we must increment the SB stuff,
730 			 * we assume caller has done any locks of SB.
731 			 */
732 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
733 		}
734 		*added += SCTP_BUF_LEN(m);
735 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
736 		m = SCTP_BUF_NEXT(m);
737 	}
738 	if (prev) {
739 		control->tail_mbuf = prev;
740 	}
741 }
742 
743 static void
744 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
745 {
746 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
747 	nc->sinfo_stream = control->sinfo_stream;
748 	nc->mid = control->mid;
749 	TAILQ_INIT(&nc->reasm);
750 	nc->top_fsn = control->top_fsn;
751 	nc->mid = control->mid;
752 	nc->sinfo_flags = control->sinfo_flags;
753 	nc->sinfo_ppid = control->sinfo_ppid;
754 	nc->sinfo_context = control->sinfo_context;
755 	nc->fsn_included = 0xffffffff;
756 	nc->sinfo_tsn = control->sinfo_tsn;
757 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
758 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
759 	nc->whoFrom = control->whoFrom;
760 	atomic_add_int(&nc->whoFrom->ref_count, 1);
761 	nc->stcb = control->stcb;
762 	nc->port_from = control->port_from;
763 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
764 }
765 
766 static void
767 sctp_reset_a_control(struct sctp_queued_to_read *control,
768     struct sctp_inpcb *inp, uint32_t tsn)
769 {
770 	control->fsn_included = tsn;
771 	if (control->on_read_q) {
772 		/*
773 		 * We have to purge it from there, hopefully this will work
774 		 * :-)
775 		 */
776 		TAILQ_REMOVE(&inp->read_queue, control, next);
777 		control->on_read_q = 0;
778 	}
779 }
780 
781 static int
782 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
783     struct sctp_association *asoc,
784     struct sctp_stream_in *strm,
785     struct sctp_queued_to_read *control,
786     uint32_t pd_point,
787     int inp_read_lock_held)
788 {
789 	/*
790 	 * Special handling for the old un-ordered data chunk. All the
791 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
792 	 * to see if we have it all. If you return one, no other control
793 	 * entries on the un-ordered queue will be looked at. In theory
794 	 * there should be no others entries in reality, unless the guy is
795 	 * sending both unordered NDATA and unordered DATA...
796 	 */
797 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
798 	uint32_t fsn;
799 	struct sctp_queued_to_read *nc;
800 	int cnt_added;
801 
802 	if (control->first_frag_seen == 0) {
803 		/* Nothing we can do, we have not seen the first piece yet */
804 		return (1);
805 	}
806 	/* Collapse any we can */
807 	cnt_added = 0;
808 restart:
809 	fsn = control->fsn_included + 1;
810 	/* Now what can we add? */
811 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
812 		if (chk->rec.data.fsn == fsn) {
813 			/* Ok lets add it */
814 			sctp_alloc_a_readq(stcb, nc);
815 			if (nc == NULL) {
816 				break;
817 			}
818 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
819 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
820 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
821 			fsn++;
822 			cnt_added++;
823 			chk = NULL;
824 			if (control->end_added) {
825 				/* We are done */
826 				if (!TAILQ_EMPTY(&control->reasm)) {
827 					/*
828 					 * Ok we have to move anything left
829 					 * on the control queue to a new
830 					 * control.
831 					 */
832 					sctp_build_readq_entry_from_ctl(nc, control);
833 					tchk = TAILQ_FIRST(&control->reasm);
834 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
835 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
836 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
837 							asoc->size_on_reasm_queue -= tchk->send_size;
838 						} else {
839 #ifdef INVARIANTS
840 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
841 #else
842 							asoc->size_on_reasm_queue = 0;
843 #endif
844 						}
845 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
846 						nc->first_frag_seen = 1;
847 						nc->fsn_included = tchk->rec.data.fsn;
848 						nc->data = tchk->data;
849 						nc->sinfo_ppid = tchk->rec.data.ppid;
850 						nc->sinfo_tsn = tchk->rec.data.tsn;
851 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
852 						tchk->data = NULL;
853 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
854 						sctp_setup_tail_pointer(nc);
855 						tchk = TAILQ_FIRST(&control->reasm);
856 					}
857 					/* Spin the rest onto the queue */
858 					while (tchk) {
859 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
860 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
861 						tchk = TAILQ_FIRST(&control->reasm);
862 					}
863 					/*
864 					 * Now lets add it to the queue
865 					 * after removing control
866 					 */
867 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
868 					nc->on_strm_q = SCTP_ON_UNORDERED;
869 					if (control->on_strm_q) {
870 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
871 						control->on_strm_q = 0;
872 					}
873 				}
874 				if (control->pdapi_started) {
875 					strm->pd_api_started = 0;
876 					control->pdapi_started = 0;
877 				}
878 				if (control->on_strm_q) {
879 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
880 					control->on_strm_q = 0;
881 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
882 				}
883 				if (control->on_read_q == 0) {
884 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 					    &stcb->sctp_socket->so_rcv, control->end_added,
886 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 				}
888 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
889 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
890 					/*
891 					 * Switch to the new guy and
892 					 * continue
893 					 */
894 					control = nc;
895 					goto restart;
896 				} else {
897 					if (nc->on_strm_q == 0) {
898 						sctp_free_a_readq(stcb, nc);
899 					}
900 				}
901 				return (1);
902 			} else {
903 				sctp_free_a_readq(stcb, nc);
904 			}
905 		} else {
906 			/* Can't add more */
907 			break;
908 		}
909 	}
910 	if (cnt_added && strm->pd_api_started) {
911 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
912 	}
913 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 		strm->pd_api_started = 1;
915 		control->pdapi_started = 1;
916 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 		    &stcb->sctp_socket->so_rcv, control->end_added,
918 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 		return (0);
921 	} else {
922 		return (1);
923 	}
924 }
925 
926 static void
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928     struct sctp_association *asoc,
929     struct sctp_queued_to_read *control,
930     struct sctp_tmit_chunk *chk,
931     int *abort_flag)
932 {
933 	struct sctp_tmit_chunk *at;
934 	int inserted;
935 
936 	/*
937 	 * Here we need to place the chunk into the control structure sorted
938 	 * in the correct order.
939 	 */
940 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 		/* Its the very first one. */
942 		SCTPDBG(SCTP_DEBUG_XXX,
943 		    "chunk is a first fsn: %u becomes fsn_included\n",
944 		    chk->rec.data.fsn);
945 		at = TAILQ_FIRST(&control->reasm);
946 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
947 			/*
948 			 * The first chunk in the reassembly is a smaller
949 			 * TSN than this one, even though this has a first,
950 			 * it must be from a subsequent msg.
951 			 */
952 			goto place_chunk;
953 		}
954 		if (control->first_frag_seen) {
955 			/*
956 			 * In old un-ordered we can reassembly on one
957 			 * control multiple messages. As long as the next
958 			 * FIRST is greater then the old first (TSN i.e. FSN
959 			 * wise)
960 			 */
961 			struct mbuf *tdata;
962 			uint32_t tmp;
963 
964 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
965 				/*
966 				 * Easy way the start of a new guy beyond
967 				 * the lowest
968 				 */
969 				goto place_chunk;
970 			}
971 			if ((chk->rec.data.fsn == control->fsn_included) ||
972 			    (control->pdapi_started)) {
973 				/*
974 				 * Ok this should not happen, if it does we
975 				 * started the pd-api on the higher TSN
976 				 * (since the equals part is a TSN failure
977 				 * it must be that).
978 				 *
979 				 * We are completly hosed in that case since
980 				 * I have no way to recover. This really
981 				 * will only happen if we can get more TSN's
982 				 * higher before the pd-api-point.
983 				 */
984 				sctp_abort_in_reasm(stcb, control, chk,
985 				    abort_flag,
986 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
987 
988 				return;
989 			}
990 			/*
991 			 * Ok we have two firsts and the one we just got is
992 			 * smaller than the one we previously placed.. yuck!
993 			 * We must swap them out.
994 			 */
995 			/* swap the mbufs */
996 			tdata = control->data;
997 			control->data = chk->data;
998 			chk->data = tdata;
999 			/* Save the lengths */
1000 			chk->send_size = control->length;
1001 			/* Recompute length of control and tail pointer */
1002 			sctp_setup_tail_pointer(control);
1003 			/* Fix the FSN included */
1004 			tmp = control->fsn_included;
1005 			control->fsn_included = chk->rec.data.fsn;
1006 			chk->rec.data.fsn = tmp;
1007 			/* Fix the TSN included */
1008 			tmp = control->sinfo_tsn;
1009 			control->sinfo_tsn = chk->rec.data.tsn;
1010 			chk->rec.data.tsn = tmp;
1011 			/* Fix the PPID included */
1012 			tmp = control->sinfo_ppid;
1013 			control->sinfo_ppid = chk->rec.data.ppid;
1014 			chk->rec.data.ppid = tmp;
1015 			/* Fix tail pointer */
1016 			goto place_chunk;
1017 		}
1018 		control->first_frag_seen = 1;
1019 		control->fsn_included = chk->rec.data.fsn;
1020 		control->top_fsn = chk->rec.data.fsn;
1021 		control->sinfo_tsn = chk->rec.data.tsn;
1022 		control->sinfo_ppid = chk->rec.data.ppid;
1023 		control->data = chk->data;
1024 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1025 		chk->data = NULL;
1026 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 		sctp_setup_tail_pointer(control);
1028 		return;
1029 	}
1030 place_chunk:
1031 	inserted = 0;
1032 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1033 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1034 			/*
1035 			 * This one in queue is bigger than the new one,
1036 			 * insert the new one before at.
1037 			 */
1038 			asoc->size_on_reasm_queue += chk->send_size;
1039 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 			inserted = 1;
1041 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1042 			break;
1043 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1044 			/*
1045 			 * They sent a duplicate fsn number. This really
1046 			 * should not happen since the FSN is a TSN and it
1047 			 * should have been dropped earlier.
1048 			 */
1049 			sctp_abort_in_reasm(stcb, control, chk,
1050 			    abort_flag,
1051 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1052 			return;
1053 		}
1054 
1055 	}
1056 	if (inserted == 0) {
1057 		/* Its at the end */
1058 		asoc->size_on_reasm_queue += chk->send_size;
1059 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1060 		control->top_fsn = chk->rec.data.fsn;
1061 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1062 	}
1063 }
1064 
1065 static int
1066 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1067     struct sctp_stream_in *strm, int inp_read_lock_held)
1068 {
1069 	/*
1070 	 * Given a stream, strm, see if any of the SSN's on it that are
1071 	 * fragmented are ready to deliver. If so go ahead and place them on
1072 	 * the read queue. In so placing if we have hit the end, then we
1073 	 * need to remove them from the stream's queue.
1074 	 */
1075 	struct sctp_queued_to_read *control, *nctl = NULL;
1076 	uint32_t next_to_del;
1077 	uint32_t pd_point;
1078 	int ret = 0;
1079 
1080 	if (stcb->sctp_socket) {
1081 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1082 		    stcb->sctp_ep->partial_delivery_point);
1083 	} else {
1084 		pd_point = stcb->sctp_ep->partial_delivery_point;
1085 	}
1086 	control = TAILQ_FIRST(&strm->uno_inqueue);
1087 
1088 	if ((control != NULL) &&
1089 	    (asoc->idata_supported == 0)) {
1090 		/* Special handling needed for "old" data format */
1091 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1092 			goto done_un;
1093 		}
1094 	}
1095 	if (strm->pd_api_started) {
1096 		/* Can't add more */
1097 		return (0);
1098 	}
1099 	while (control) {
1100 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1101 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1102 		nctl = TAILQ_NEXT(control, next_instrm);
1103 		if (control->end_added) {
1104 			/* We just put the last bit on */
1105 			if (control->on_strm_q) {
1106 #ifdef INVARIANTS
1107 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1108 					panic("Huh control: %p on_q: %d -- not unordered?",
1109 					    control, control->on_strm_q);
1110 				}
1111 #endif
1112 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1113 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1114 				if (asoc->size_on_all_streams >= control->length) {
1115 					asoc->size_on_all_streams -= control->length;
1116 				} else {
1117 #ifdef INVARIANTS
1118 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1119 #else
1120 					asoc->size_on_all_streams = 0;
1121 #endif
1122 				}
1123 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1124 				control->on_strm_q = 0;
1125 			}
1126 			if (control->on_read_q == 0) {
1127 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1128 				    control,
1129 				    &stcb->sctp_socket->so_rcv, control->end_added,
1130 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1131 			}
1132 		} else {
1133 			/* Can we do a PD-API for this un-ordered guy? */
1134 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1135 				strm->pd_api_started = 1;
1136 				control->pdapi_started = 1;
1137 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1138 				    control,
1139 				    &stcb->sctp_socket->so_rcv, control->end_added,
1140 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 
1142 				break;
1143 			}
1144 		}
1145 		control = nctl;
1146 	}
1147 done_un:
1148 	control = TAILQ_FIRST(&strm->inqueue);
1149 	if (strm->pd_api_started) {
1150 		/* Can't add more */
1151 		return (0);
1152 	}
1153 	if (control == NULL) {
1154 		return (ret);
1155 	}
1156 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1157 		/*
1158 		 * Ok the guy at the top was being partially delivered
1159 		 * completed, so we remove it. Note the pd_api flag was
1160 		 * taken off when the chunk was merged on in
1161 		 * sctp_queue_data_for_reasm below.
1162 		 */
1163 		nctl = TAILQ_NEXT(control, next_instrm);
1164 		SCTPDBG(SCTP_DEBUG_XXX,
1165 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1166 		    control, control->end_added, control->mid,
1167 		    control->top_fsn, control->fsn_included,
1168 		    strm->last_mid_delivered);
1169 		if (control->end_added) {
1170 			if (control->on_strm_q) {
1171 #ifdef INVARIANTS
1172 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1173 					panic("Huh control: %p on_q: %d -- not ordered?",
1174 					    control, control->on_strm_q);
1175 				}
1176 #endif
1177 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1178 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1179 				if (asoc->size_on_all_streams >= control->length) {
1180 					asoc->size_on_all_streams -= control->length;
1181 				} else {
1182 #ifdef INVARIANTS
1183 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1184 #else
1185 					asoc->size_on_all_streams = 0;
1186 #endif
1187 				}
1188 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1189 				control->on_strm_q = 0;
1190 			}
1191 			if (strm->pd_api_started && control->pdapi_started) {
1192 				control->pdapi_started = 0;
1193 				strm->pd_api_started = 0;
1194 			}
1195 			if (control->on_read_q == 0) {
1196 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1197 				    control,
1198 				    &stcb->sctp_socket->so_rcv, control->end_added,
1199 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1200 			}
1201 			control = nctl;
1202 		}
1203 	}
1204 	if (strm->pd_api_started) {
1205 		/*
1206 		 * Can't add more must have gotten an un-ordered above being
1207 		 * partially delivered.
1208 		 */
1209 		return (0);
1210 	}
1211 deliver_more:
1212 	next_to_del = strm->last_mid_delivered + 1;
1213 	if (control) {
1214 		SCTPDBG(SCTP_DEBUG_XXX,
1215 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1216 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1217 		    next_to_del);
1218 		nctl = TAILQ_NEXT(control, next_instrm);
1219 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1220 		    (control->first_frag_seen)) {
1221 			int done;
1222 
1223 			/* Ok we can deliver it onto the stream. */
1224 			if (control->end_added) {
1225 				/* We are done with it afterwards */
1226 				if (control->on_strm_q) {
1227 #ifdef INVARIANTS
1228 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1229 						panic("Huh control: %p on_q: %d -- not ordered?",
1230 						    control, control->on_strm_q);
1231 					}
1232 #endif
1233 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1234 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1235 					if (asoc->size_on_all_streams >= control->length) {
1236 						asoc->size_on_all_streams -= control->length;
1237 					} else {
1238 #ifdef INVARIANTS
1239 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1240 #else
1241 						asoc->size_on_all_streams = 0;
1242 #endif
1243 					}
1244 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1245 					control->on_strm_q = 0;
1246 				}
1247 				ret++;
1248 			}
1249 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1250 				/*
1251 				 * A singleton now slipping through - mark
1252 				 * it non-revokable too
1253 				 */
1254 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1255 			} else if (control->end_added == 0) {
1256 				/*
1257 				 * Check if we can defer adding until its
1258 				 * all there
1259 				 */
1260 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1261 					/*
1262 					 * Don't need it or cannot add more
1263 					 * (one being delivered that way)
1264 					 */
1265 					goto out;
1266 				}
1267 			}
1268 			done = (control->end_added) && (control->last_frag_seen);
1269 			if (control->on_read_q == 0) {
1270 				if (!done) {
1271 					if (asoc->size_on_all_streams >= control->length) {
1272 						asoc->size_on_all_streams -= control->length;
1273 					} else {
1274 #ifdef INVARIANTS
1275 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1276 #else
1277 						asoc->size_on_all_streams = 0;
1278 #endif
1279 					}
1280 					strm->pd_api_started = 1;
1281 					control->pdapi_started = 1;
1282 				}
1283 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1284 				    control,
1285 				    &stcb->sctp_socket->so_rcv, control->end_added,
1286 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1287 			}
1288 			strm->last_mid_delivered = next_to_del;
1289 			if (done) {
1290 				control = nctl;
1291 				goto deliver_more;
1292 			}
1293 		}
1294 	}
1295 out:
1296 	return (ret);
1297 }
1298 
1299 
1300 uint32_t
1301 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1302     struct sctp_stream_in *strm,
1303     struct sctp_tcb *stcb, struct sctp_association *asoc,
1304     struct sctp_tmit_chunk *chk, int hold_rlock)
1305 {
1306 	/*
1307 	 * Given a control and a chunk, merge the data from the chk onto the
1308 	 * control and free up the chunk resources.
1309 	 */
1310 	uint32_t added = 0;
1311 	int i_locked = 0;
1312 
1313 	if (control->on_read_q && (hold_rlock == 0)) {
1314 		/*
1315 		 * Its being pd-api'd so we must do some locks.
1316 		 */
1317 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1318 		i_locked = 1;
1319 	}
1320 	if (control->data == NULL) {
1321 		control->data = chk->data;
1322 		sctp_setup_tail_pointer(control);
1323 	} else {
1324 		sctp_add_to_tail_pointer(control, chk->data, &added);
1325 	}
1326 	control->fsn_included = chk->rec.data.fsn;
1327 	asoc->size_on_reasm_queue -= chk->send_size;
1328 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1329 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1330 	chk->data = NULL;
1331 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1332 		control->first_frag_seen = 1;
1333 		control->sinfo_tsn = chk->rec.data.tsn;
1334 		control->sinfo_ppid = chk->rec.data.ppid;
1335 	}
1336 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1337 		/* Its complete */
1338 		if ((control->on_strm_q) && (control->on_read_q)) {
1339 			if (control->pdapi_started) {
1340 				control->pdapi_started = 0;
1341 				strm->pd_api_started = 0;
1342 			}
1343 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1344 				/* Unordered */
1345 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1346 				control->on_strm_q = 0;
1347 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1348 				/* Ordered */
1349 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1350 				/*
1351 				 * Don't need to decrement
1352 				 * size_on_all_streams, since control is on
1353 				 * the read queue.
1354 				 */
1355 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1356 				control->on_strm_q = 0;
1357 #ifdef INVARIANTS
1358 			} else if (control->on_strm_q) {
1359 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1360 				    control->on_strm_q);
1361 #endif
1362 			}
1363 		}
1364 		control->end_added = 1;
1365 		control->last_frag_seen = 1;
1366 	}
1367 	if (i_locked) {
1368 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1369 	}
1370 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1371 	return (added);
1372 }
1373 
1374 /*
1375  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1376  * queue, see if anthing can be delivered. If so pull it off (or as much as
1377  * we can. If we run out of space then we must dump what we can and set the
1378  * appropriate flag to say we queued what we could.
1379  */
1380 static void
1381 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1382     struct sctp_queued_to_read *control,
1383     struct sctp_tmit_chunk *chk,
1384     int created_control,
1385     int *abort_flag, uint32_t tsn)
1386 {
1387 	uint32_t next_fsn;
1388 	struct sctp_tmit_chunk *at, *nat;
1389 	struct sctp_stream_in *strm;
1390 	int do_wakeup, unordered;
1391 	uint32_t lenadded;
1392 
1393 	strm = &asoc->strmin[control->sinfo_stream];
1394 	/*
1395 	 * For old un-ordered data chunks.
1396 	 */
1397 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1398 		unordered = 1;
1399 	} else {
1400 		unordered = 0;
1401 	}
1402 	/* Must be added to the stream-in queue */
1403 	if (created_control) {
1404 		if ((unordered == 0) || (asoc->idata_supported)) {
1405 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1406 		}
1407 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1408 			/* Duplicate SSN? */
1409 			sctp_abort_in_reasm(stcb, control, chk,
1410 			    abort_flag,
1411 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1412 			sctp_clean_up_control(stcb, control);
1413 			return;
1414 		}
1415 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1416 			/*
1417 			 * Ok we created this control and now lets validate
1418 			 * that its legal i.e. there is a B bit set, if not
1419 			 * and we have up to the cum-ack then its invalid.
1420 			 */
1421 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1422 				sctp_abort_in_reasm(stcb, control, chk,
1423 				    abort_flag,
1424 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1425 				return;
1426 			}
1427 		}
1428 	}
1429 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1430 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1431 		return;
1432 	}
1433 	/*
1434 	 * Ok we must queue the chunk into the reasembly portion: o if its
1435 	 * the first it goes to the control mbuf. o if its not first but the
1436 	 * next in sequence it goes to the control, and each succeeding one
1437 	 * in order also goes. o if its not in order we place it on the list
1438 	 * in its place.
1439 	 */
1440 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1441 		/* Its the very first one. */
1442 		SCTPDBG(SCTP_DEBUG_XXX,
1443 		    "chunk is a first fsn: %u becomes fsn_included\n",
1444 		    chk->rec.data.fsn);
1445 		if (control->first_frag_seen) {
1446 			/*
1447 			 * Error on senders part, they either sent us two
1448 			 * data chunks with FIRST, or they sent two
1449 			 * un-ordered chunks that were fragmented at the
1450 			 * same time in the same stream.
1451 			 */
1452 			sctp_abort_in_reasm(stcb, control, chk,
1453 			    abort_flag,
1454 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1455 			return;
1456 		}
1457 		control->first_frag_seen = 1;
1458 		control->sinfo_ppid = chk->rec.data.ppid;
1459 		control->sinfo_tsn = chk->rec.data.tsn;
1460 		control->fsn_included = chk->rec.data.fsn;
1461 		control->data = chk->data;
1462 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1463 		chk->data = NULL;
1464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1465 		sctp_setup_tail_pointer(control);
1466 		asoc->size_on_all_streams += control->length;
1467 	} else {
1468 		/* Place the chunk in our list */
1469 		int inserted = 0;
1470 
1471 		if (control->last_frag_seen == 0) {
1472 			/* Still willing to raise highest FSN seen */
1473 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1474 				SCTPDBG(SCTP_DEBUG_XXX,
1475 				    "We have a new top_fsn: %u\n",
1476 				    chk->rec.data.fsn);
1477 				control->top_fsn = chk->rec.data.fsn;
1478 			}
1479 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1480 				SCTPDBG(SCTP_DEBUG_XXX,
1481 				    "The last fsn is now in place fsn: %u\n",
1482 				    chk->rec.data.fsn);
1483 				control->last_frag_seen = 1;
1484 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1485 					SCTPDBG(SCTP_DEBUG_XXX,
1486 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1487 					    chk->rec.data.fsn,
1488 					    control->top_fsn);
1489 					sctp_abort_in_reasm(stcb, control, chk,
1490 					    abort_flag,
1491 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1492 					return;
1493 				}
1494 			}
1495 			if (asoc->idata_supported || control->first_frag_seen) {
1496 				/*
1497 				 * For IDATA we always check since we know
1498 				 * that the first fragment is 0. For old
1499 				 * DATA we have to receive the first before
1500 				 * we know the first FSN (which is the TSN).
1501 				 */
1502 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1503 					/*
1504 					 * We have already delivered up to
1505 					 * this so its a dup
1506 					 */
1507 					sctp_abort_in_reasm(stcb, control, chk,
1508 					    abort_flag,
1509 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510 					return;
1511 				}
1512 			}
1513 		} else {
1514 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 				/* Second last? huh? */
1516 				SCTPDBG(SCTP_DEBUG_XXX,
1517 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1518 				    chk->rec.data.fsn, control->top_fsn);
1519 				sctp_abort_in_reasm(stcb, control,
1520 				    chk, abort_flag,
1521 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1522 				return;
1523 			}
1524 			if (asoc->idata_supported || control->first_frag_seen) {
1525 				/*
1526 				 * For IDATA we always check since we know
1527 				 * that the first fragment is 0. For old
1528 				 * DATA we have to receive the first before
1529 				 * we know the first FSN (which is the TSN).
1530 				 */
1531 
1532 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1533 					/*
1534 					 * We have already delivered up to
1535 					 * this so its a dup
1536 					 */
1537 					SCTPDBG(SCTP_DEBUG_XXX,
1538 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1539 					    chk->rec.data.fsn, control->fsn_included);
1540 					sctp_abort_in_reasm(stcb, control, chk,
1541 					    abort_flag,
1542 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1543 					return;
1544 				}
1545 			}
1546 			/*
1547 			 * validate not beyond top FSN if we have seen last
1548 			 * one
1549 			 */
1550 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1551 				SCTPDBG(SCTP_DEBUG_XXX,
1552 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1553 				    chk->rec.data.fsn,
1554 				    control->top_fsn);
1555 				sctp_abort_in_reasm(stcb, control, chk,
1556 				    abort_flag,
1557 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1558 				return;
1559 			}
1560 		}
1561 		/*
1562 		 * If we reach here, we need to place the new chunk in the
1563 		 * reassembly for this control.
1564 		 */
1565 		SCTPDBG(SCTP_DEBUG_XXX,
1566 		    "chunk is a not first fsn: %u needs to be inserted\n",
1567 		    chk->rec.data.fsn);
1568 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1569 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1570 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1571 					/* Last not at the end? huh? */
1572 					SCTPDBG(SCTP_DEBUG_XXX,
1573 					    "Last fragment not last in list: -- abort\n");
1574 					sctp_abort_in_reasm(stcb, control,
1575 					    chk, abort_flag,
1576 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1577 					return;
1578 				}
1579 				/*
1580 				 * This one in queue is bigger than the new
1581 				 * one, insert the new one before at.
1582 				 */
1583 				SCTPDBG(SCTP_DEBUG_XXX,
1584 				    "Insert it before fsn: %u\n",
1585 				    at->rec.data.fsn);
1586 				asoc->size_on_reasm_queue += chk->send_size;
1587 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1588 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1589 				inserted = 1;
1590 				break;
1591 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1592 				/*
1593 				 * Gak, He sent me a duplicate str seq
1594 				 * number
1595 				 */
1596 				/*
1597 				 * foo bar, I guess I will just free this
1598 				 * new guy, should we abort too? FIX ME
1599 				 * MAYBE? Or it COULD be that the SSN's have
1600 				 * wrapped. Maybe I should compare to TSN
1601 				 * somehow... sigh for now just blow away
1602 				 * the chunk!
1603 				 */
1604 				SCTPDBG(SCTP_DEBUG_XXX,
1605 				    "Duplicate to fsn: %u -- abort\n",
1606 				    at->rec.data.fsn);
1607 				sctp_abort_in_reasm(stcb, control,
1608 				    chk, abort_flag,
1609 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1610 				return;
1611 			}
1612 		}
1613 		if (inserted == 0) {
1614 			/* Goes on the end */
1615 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1616 			    chk->rec.data.fsn);
1617 			asoc->size_on_reasm_queue += chk->send_size;
1618 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1619 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1620 		}
1621 	}
1622 	/*
1623 	 * Ok lets see if we can suck any up into the control structure that
1624 	 * are in seq if it makes sense.
1625 	 */
1626 	do_wakeup = 0;
1627 	/*
1628 	 * If the first fragment has not been seen there is no sense in
1629 	 * looking.
1630 	 */
1631 	if (control->first_frag_seen) {
1632 		next_fsn = control->fsn_included + 1;
1633 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1634 			if (at->rec.data.fsn == next_fsn) {
1635 				/* We can add this one now to the control */
1636 				SCTPDBG(SCTP_DEBUG_XXX,
1637 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1638 				    control, at,
1639 				    at->rec.data.fsn,
1640 				    next_fsn, control->fsn_included);
1641 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1642 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1643 				if (control->on_read_q) {
1644 					do_wakeup = 1;
1645 				} else {
1646 					/*
1647 					 * We only add to the
1648 					 * size-on-all-streams if its not on
1649 					 * the read q. The read q flag will
1650 					 * cause a sballoc so its accounted
1651 					 * for there.
1652 					 */
1653 					asoc->size_on_all_streams += lenadded;
1654 				}
1655 				next_fsn++;
1656 				if (control->end_added && control->pdapi_started) {
1657 					if (strm->pd_api_started) {
1658 						strm->pd_api_started = 0;
1659 						control->pdapi_started = 0;
1660 					}
1661 					if (control->on_read_q == 0) {
1662 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1663 						    control,
1664 						    &stcb->sctp_socket->so_rcv, control->end_added,
1665 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1666 					}
1667 					break;
1668 				}
1669 			} else {
1670 				break;
1671 			}
1672 		}
1673 	}
1674 	if (do_wakeup) {
1675 		/* Need to wakeup the reader */
1676 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1677 	}
1678 }
1679 
1680 static struct sctp_queued_to_read *
1681 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1682 {
1683 	struct sctp_queued_to_read *control;
1684 
1685 	if (ordered) {
1686 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1687 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1688 				break;
1689 			}
1690 		}
1691 	} else {
1692 		if (idata_supported) {
1693 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1694 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1695 					break;
1696 				}
1697 			}
1698 		} else {
1699 			control = TAILQ_FIRST(&strm->uno_inqueue);
1700 		}
1701 	}
1702 	return (control);
1703 }
1704 
1705 static int
1706 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1707     struct mbuf **m, int offset, int chk_length,
1708     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1709     int *break_flag, int last_chunk, uint8_t chk_type)
1710 {
1711 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1712 	struct sctp_stream_in *strm;
1713 	uint32_t tsn, fsn, gap, mid;
1714 	struct mbuf *dmbuf;
1715 	int the_len;
1716 	int need_reasm_check = 0;
1717 	uint16_t sid;
1718 	struct mbuf *op_err;
1719 	char msg[SCTP_DIAG_INFO_LEN];
1720 	struct sctp_queued_to_read *control, *ncontrol;
1721 	uint32_t ppid;
1722 	uint8_t chk_flags;
1723 	struct sctp_stream_reset_list *liste;
1724 	int ordered;
1725 	size_t clen;
1726 	int created_control = 0;
1727 
1728 	if (chk_type == SCTP_IDATA) {
1729 		struct sctp_idata_chunk *chunk, chunk_buf;
1730 
1731 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1732 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1733 		chk_flags = chunk->ch.chunk_flags;
1734 		clen = sizeof(struct sctp_idata_chunk);
1735 		tsn = ntohl(chunk->dp.tsn);
1736 		sid = ntohs(chunk->dp.sid);
1737 		mid = ntohl(chunk->dp.mid);
1738 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1739 			fsn = 0;
1740 			ppid = chunk->dp.ppid_fsn.ppid;
1741 		} else {
1742 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1743 			ppid = 0xffffffff;	/* Use as an invalid value. */
1744 		}
1745 	} else {
1746 		struct sctp_data_chunk *chunk, chunk_buf;
1747 
1748 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1749 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1750 		chk_flags = chunk->ch.chunk_flags;
1751 		clen = sizeof(struct sctp_data_chunk);
1752 		tsn = ntohl(chunk->dp.tsn);
1753 		sid = ntohs(chunk->dp.sid);
1754 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1755 		fsn = tsn;
1756 		ppid = chunk->dp.ppid;
1757 	}
1758 	if ((size_t)chk_length == clen) {
1759 		/*
1760 		 * Need to send an abort since we had a empty data chunk.
1761 		 */
1762 		op_err = sctp_generate_no_user_data_cause(tsn);
1763 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1764 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1765 		*abort_flag = 1;
1766 		return (0);
1767 	}
1768 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1769 		asoc->send_sack = 1;
1770 	}
1771 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1772 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1773 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1774 	}
1775 	if (stcb == NULL) {
1776 		return (0);
1777 	}
1778 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1779 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1780 		/* It is a duplicate */
1781 		SCTP_STAT_INCR(sctps_recvdupdata);
1782 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1783 			/* Record a dup for the next outbound sack */
1784 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1785 			asoc->numduptsns++;
1786 		}
1787 		asoc->send_sack = 1;
1788 		return (0);
1789 	}
1790 	/* Calculate the number of TSN's between the base and this TSN */
1791 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1792 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1793 		/* Can't hold the bit in the mapping at max array, toss it */
1794 		return (0);
1795 	}
1796 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1797 		SCTP_TCB_LOCK_ASSERT(stcb);
1798 		if (sctp_expand_mapping_array(asoc, gap)) {
1799 			/* Can't expand, drop it */
1800 			return (0);
1801 		}
1802 	}
1803 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1804 		*high_tsn = tsn;
1805 	}
1806 	/* See if we have received this one already */
1807 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1808 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1809 		SCTP_STAT_INCR(sctps_recvdupdata);
1810 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1811 			/* Record a dup for the next outbound sack */
1812 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1813 			asoc->numduptsns++;
1814 		}
1815 		asoc->send_sack = 1;
1816 		return (0);
1817 	}
1818 	/*
1819 	 * Check to see about the GONE flag, duplicates would cause a sack
1820 	 * to be sent up above
1821 	 */
1822 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1823 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1824 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1825 		/*
1826 		 * wait a minute, this guy is gone, there is no longer a
1827 		 * receiver. Send peer an ABORT!
1828 		 */
1829 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1830 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1831 		*abort_flag = 1;
1832 		return (0);
1833 	}
1834 	/*
1835 	 * Now before going further we see if there is room. If NOT then we
1836 	 * MAY let one through only IF this TSN is the one we are waiting
1837 	 * for on a partial delivery API.
1838 	 */
1839 
1840 	/* Is the stream valid? */
1841 	if (sid >= asoc->streamincnt) {
1842 		struct sctp_error_invalid_stream *cause;
1843 
1844 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1845 		    0, M_NOWAIT, 1, MT_DATA);
1846 		if (op_err != NULL) {
1847 			/* add some space up front so prepend will work well */
1848 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1849 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1850 			/*
1851 			 * Error causes are just param's and this one has
1852 			 * two back to back phdr, one with the error type
1853 			 * and size, the other with the streamid and a rsvd
1854 			 */
1855 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1856 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1857 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1858 			cause->stream_id = htons(sid);
1859 			cause->reserved = htons(0);
1860 			sctp_queue_op_err(stcb, op_err);
1861 		}
1862 		SCTP_STAT_INCR(sctps_badsid);
1863 		SCTP_TCB_LOCK_ASSERT(stcb);
1864 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1865 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1866 			asoc->highest_tsn_inside_nr_map = tsn;
1867 		}
1868 		if (tsn == (asoc->cumulative_tsn + 1)) {
1869 			/* Update cum-ack */
1870 			asoc->cumulative_tsn = tsn;
1871 		}
1872 		return (0);
1873 	}
1874 	/*
1875 	 * If its a fragmented message, lets see if we can find the control
1876 	 * on the reassembly queues.
1877 	 */
1878 	if ((chk_type == SCTP_IDATA) &&
1879 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1880 	    (fsn == 0)) {
1881 		/*
1882 		 * The first *must* be fsn 0, and other (middle/end) pieces
1883 		 * can *not* be fsn 0. XXX: This can happen in case of a
1884 		 * wrap around. Ignore is for now.
1885 		 */
1886 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1887 		goto err_out;
1888 	}
1889 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1890 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1891 	    chk_flags, control);
1892 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1893 		/* See if we can find the re-assembly entity */
1894 		if (control != NULL) {
1895 			/* We found something, does it belong? */
1896 			if (ordered && (mid != control->mid)) {
1897 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1898 		err_out:
1899 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1900 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1901 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1902 				*abort_flag = 1;
1903 				return (0);
1904 			}
1905 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1906 				/*
1907 				 * We can't have a switched order with an
1908 				 * unordered chunk
1909 				 */
1910 				SCTP_SNPRINTF(msg, sizeof(msg),
1911 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1912 				    tsn);
1913 				goto err_out;
1914 			}
1915 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1916 				/*
1917 				 * We can't have a switched unordered with a
1918 				 * ordered chunk
1919 				 */
1920 				SCTP_SNPRINTF(msg, sizeof(msg),
1921 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1922 				    tsn);
1923 				goto err_out;
1924 			}
1925 		}
1926 	} else {
1927 		/*
1928 		 * Its a complete segment. Lets validate we don't have a
1929 		 * re-assembly going on with the same Stream/Seq (for
1930 		 * ordered) or in the same Stream for unordered.
1931 		 */
1932 		if (control != NULL) {
1933 			if (ordered || asoc->idata_supported) {
1934 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1935 				    chk_flags, mid);
1936 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1937 				goto err_out;
1938 			} else {
1939 				if ((tsn == control->fsn_included + 1) &&
1940 				    (control->end_added == 0)) {
1941 					SCTP_SNPRINTF(msg, sizeof(msg),
1942 					    "Illegal message sequence, missing end for MID: %8.8x",
1943 					    control->fsn_included);
1944 					goto err_out;
1945 				} else {
1946 					control = NULL;
1947 				}
1948 			}
1949 		}
1950 	}
1951 	/* now do the tests */
1952 	if (((asoc->cnt_on_all_streams +
1953 	    asoc->cnt_on_reasm_queue +
1954 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1955 	    (((int)asoc->my_rwnd) <= 0)) {
1956 		/*
1957 		 * When we have NO room in the rwnd we check to make sure
1958 		 * the reader is doing its job...
1959 		 */
1960 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1961 			/* some to read, wake-up */
1962 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1963 		}
1964 		/* now is it in the mapping array of what we have accepted? */
1965 		if (chk_type == SCTP_DATA) {
1966 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1967 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1968 				/* Nope not in the valid range dump it */
1969 		dump_packet:
1970 				sctp_set_rwnd(stcb, asoc);
1971 				if ((asoc->cnt_on_all_streams +
1972 				    asoc->cnt_on_reasm_queue +
1973 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1974 					SCTP_STAT_INCR(sctps_datadropchklmt);
1975 				} else {
1976 					SCTP_STAT_INCR(sctps_datadroprwnd);
1977 				}
1978 				*break_flag = 1;
1979 				return (0);
1980 			}
1981 		} else {
1982 			if (control == NULL) {
1983 				goto dump_packet;
1984 			}
1985 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1986 				goto dump_packet;
1987 			}
1988 		}
1989 	}
1990 #ifdef SCTP_ASOCLOG_OF_TSNS
1991 	SCTP_TCB_LOCK_ASSERT(stcb);
1992 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1993 		asoc->tsn_in_at = 0;
1994 		asoc->tsn_in_wrapped = 1;
1995 	}
1996 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1997 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1998 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1999 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2000 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2001 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2002 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2003 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2004 	asoc->tsn_in_at++;
2005 #endif
2006 	/*
2007 	 * Before we continue lets validate that we are not being fooled by
2008 	 * an evil attacker. We can only have Nk chunks based on our TSN
2009 	 * spread allowed by the mapping array N * 8 bits, so there is no
2010 	 * way our stream sequence numbers could have wrapped. We of course
2011 	 * only validate the FIRST fragment so the bit must be set.
2012 	 */
2013 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2014 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2015 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2016 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2017 		/* The incoming sseq is behind where we last delivered? */
2018 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2019 		    mid, asoc->strmin[sid].last_mid_delivered);
2020 
2021 		if (asoc->idata_supported) {
2022 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2023 			    asoc->strmin[sid].last_mid_delivered,
2024 			    tsn,
2025 			    sid,
2026 			    mid);
2027 		} else {
2028 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2029 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2030 			    tsn,
2031 			    sid,
2032 			    (uint16_t)mid);
2033 		}
2034 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2035 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2036 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2037 		*abort_flag = 1;
2038 		return (0);
2039 	}
2040 	if (chk_type == SCTP_IDATA) {
2041 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2042 	} else {
2043 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2044 	}
2045 	if (last_chunk == 0) {
2046 		if (chk_type == SCTP_IDATA) {
2047 			dmbuf = SCTP_M_COPYM(*m,
2048 			    (offset + sizeof(struct sctp_idata_chunk)),
2049 			    the_len, M_NOWAIT);
2050 		} else {
2051 			dmbuf = SCTP_M_COPYM(*m,
2052 			    (offset + sizeof(struct sctp_data_chunk)),
2053 			    the_len, M_NOWAIT);
2054 		}
2055 #ifdef SCTP_MBUF_LOGGING
2056 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2057 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2058 		}
2059 #endif
2060 	} else {
2061 		/* We can steal the last chunk */
2062 		int l_len;
2063 
2064 		dmbuf = *m;
2065 		/* lop off the top part */
2066 		if (chk_type == SCTP_IDATA) {
2067 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2068 		} else {
2069 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2070 		}
2071 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2072 			l_len = SCTP_BUF_LEN(dmbuf);
2073 		} else {
2074 			/*
2075 			 * need to count up the size hopefully does not hit
2076 			 * this to often :-0
2077 			 */
2078 			struct mbuf *lat;
2079 
2080 			l_len = 0;
2081 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2082 				l_len += SCTP_BUF_LEN(lat);
2083 			}
2084 		}
2085 		if (l_len > the_len) {
2086 			/* Trim the end round bytes off  too */
2087 			m_adj(dmbuf, -(l_len - the_len));
2088 		}
2089 	}
2090 	if (dmbuf == NULL) {
2091 		SCTP_STAT_INCR(sctps_nomem);
2092 		return (0);
2093 	}
2094 	/*
2095 	 * Now no matter what, we need a control, get one if we don't have
2096 	 * one (we may have gotten it above when we found the message was
2097 	 * fragmented
2098 	 */
2099 	if (control == NULL) {
2100 		sctp_alloc_a_readq(stcb, control);
2101 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2102 		    ppid,
2103 		    sid,
2104 		    chk_flags,
2105 		    NULL, fsn, mid);
2106 		if (control == NULL) {
2107 			SCTP_STAT_INCR(sctps_nomem);
2108 			return (0);
2109 		}
2110 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2111 			struct mbuf *mm;
2112 
2113 			control->data = dmbuf;
2114 			control->tail_mbuf = NULL;
2115 			for (mm = control->data; mm; mm = mm->m_next) {
2116 				control->length += SCTP_BUF_LEN(mm);
2117 				if (SCTP_BUF_NEXT(mm) == NULL) {
2118 					control->tail_mbuf = mm;
2119 				}
2120 			}
2121 			control->end_added = 1;
2122 			control->last_frag_seen = 1;
2123 			control->first_frag_seen = 1;
2124 			control->fsn_included = fsn;
2125 			control->top_fsn = fsn;
2126 		}
2127 		created_control = 1;
2128 	}
2129 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2130 	    chk_flags, ordered, mid, control);
2131 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2132 	    TAILQ_EMPTY(&asoc->resetHead) &&
2133 	    ((ordered == 0) ||
2134 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2135 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2136 		/* Candidate for express delivery */
2137 		/*
2138 		 * Its not fragmented, No PD-API is up, Nothing in the
2139 		 * delivery queue, Its un-ordered OR ordered and the next to
2140 		 * deliver AND nothing else is stuck on the stream queue,
2141 		 * And there is room for it in the socket buffer. Lets just
2142 		 * stuff it up the buffer....
2143 		 */
2144 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2145 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2146 			asoc->highest_tsn_inside_nr_map = tsn;
2147 		}
2148 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2149 		    control, mid);
2150 
2151 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2152 		    control, &stcb->sctp_socket->so_rcv,
2153 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2154 
2155 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2156 			/* for ordered, bump what we delivered */
2157 			asoc->strmin[sid].last_mid_delivered++;
2158 		}
2159 		SCTP_STAT_INCR(sctps_recvexpress);
2160 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2161 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2162 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2163 		}
2164 		control = NULL;
2165 		goto finish_express_del;
2166 	}
2167 
2168 	/* Now will we need a chunk too? */
2169 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2170 		sctp_alloc_a_chunk(stcb, chk);
2171 		if (chk == NULL) {
2172 			/* No memory so we drop the chunk */
2173 			SCTP_STAT_INCR(sctps_nomem);
2174 			if (last_chunk == 0) {
2175 				/* we copied it, free the copy */
2176 				sctp_m_freem(dmbuf);
2177 			}
2178 			return (0);
2179 		}
2180 		chk->rec.data.tsn = tsn;
2181 		chk->no_fr_allowed = 0;
2182 		chk->rec.data.fsn = fsn;
2183 		chk->rec.data.mid = mid;
2184 		chk->rec.data.sid = sid;
2185 		chk->rec.data.ppid = ppid;
2186 		chk->rec.data.context = stcb->asoc.context;
2187 		chk->rec.data.doing_fast_retransmit = 0;
2188 		chk->rec.data.rcv_flags = chk_flags;
2189 		chk->asoc = asoc;
2190 		chk->send_size = the_len;
2191 		chk->whoTo = net;
2192 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2193 		    chk,
2194 		    control, mid);
2195 		atomic_add_int(&net->ref_count, 1);
2196 		chk->data = dmbuf;
2197 	}
2198 	/* Set the appropriate TSN mark */
2199 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2200 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2201 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2202 			asoc->highest_tsn_inside_nr_map = tsn;
2203 		}
2204 	} else {
2205 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2206 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2207 			asoc->highest_tsn_inside_map = tsn;
2208 		}
2209 	}
2210 	/* Now is it complete (i.e. not fragmented)? */
2211 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2212 		/*
2213 		 * Special check for when streams are resetting. We could be
2214 		 * more smart about this and check the actual stream to see
2215 		 * if it is not being reset.. that way we would not create a
2216 		 * HOLB when amongst streams being reset and those not being
2217 		 * reset.
2218 		 *
2219 		 */
2220 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2221 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2222 			/*
2223 			 * yep its past where we need to reset... go ahead
2224 			 * and queue it.
2225 			 */
2226 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2227 				/* first one on */
2228 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2229 			} else {
2230 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2231 				unsigned char inserted = 0;
2232 
2233 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2234 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2235 
2236 						continue;
2237 					} else {
2238 						/* found it */
2239 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2240 						inserted = 1;
2241 						break;
2242 					}
2243 				}
2244 				if (inserted == 0) {
2245 					/*
2246 					 * must be put at end, use prevP
2247 					 * (all setup from loop) to setup
2248 					 * nextP.
2249 					 */
2250 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2251 				}
2252 			}
2253 			goto finish_express_del;
2254 		}
2255 		if (chk_flags & SCTP_DATA_UNORDERED) {
2256 			/* queue directly into socket buffer */
2257 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2258 			    control, mid);
2259 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2260 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2261 			    control,
2262 			    &stcb->sctp_socket->so_rcv, 1,
2263 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2264 
2265 		} else {
2266 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2267 			    mid);
2268 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2269 			if (*abort_flag) {
2270 				if (last_chunk) {
2271 					*m = NULL;
2272 				}
2273 				return (0);
2274 			}
2275 		}
2276 		goto finish_express_del;
2277 	}
2278 	/* If we reach here its a reassembly */
2279 	need_reasm_check = 1;
2280 	SCTPDBG(SCTP_DEBUG_XXX,
2281 	    "Queue data to stream for reasm control: %p MID: %u\n",
2282 	    control, mid);
2283 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2284 	if (*abort_flag) {
2285 		/*
2286 		 * the assoc is now gone and chk was put onto the reasm
2287 		 * queue, which has all been freed.
2288 		 */
2289 		if (last_chunk) {
2290 			*m = NULL;
2291 		}
2292 		return (0);
2293 	}
2294 finish_express_del:
2295 	/* Here we tidy up things */
2296 	if (tsn == (asoc->cumulative_tsn + 1)) {
2297 		/* Update cum-ack */
2298 		asoc->cumulative_tsn = tsn;
2299 	}
2300 	if (last_chunk) {
2301 		*m = NULL;
2302 	}
2303 	if (ordered) {
2304 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2305 	} else {
2306 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2307 	}
2308 	SCTP_STAT_INCR(sctps_recvdata);
2309 	/* Set it present please */
2310 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2311 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2312 	}
2313 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2314 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2315 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2316 	}
2317 	if (need_reasm_check) {
2318 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2319 		need_reasm_check = 0;
2320 	}
2321 	/* check the special flag for stream resets */
2322 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2323 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2324 		/*
2325 		 * we have finished working through the backlogged TSN's now
2326 		 * time to reset streams. 1: call reset function. 2: free
2327 		 * pending_reply space 3: distribute any chunks in
2328 		 * pending_reply_queue.
2329 		 */
2330 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2331 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2332 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2333 		SCTP_FREE(liste, SCTP_M_STRESET);
2334 		/* sa_ignore FREED_MEMORY */
2335 		liste = TAILQ_FIRST(&asoc->resetHead);
2336 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2337 			/* All can be removed */
2338 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2339 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2340 				strm = &asoc->strmin[control->sinfo_stream];
2341 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2342 				if (*abort_flag) {
2343 					return (0);
2344 				}
2345 				if (need_reasm_check) {
2346 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2347 					need_reasm_check = 0;
2348 				}
2349 			}
2350 		} else {
2351 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2352 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2353 					break;
2354 				}
2355 				/*
2356 				 * if control->sinfo_tsn is <= liste->tsn we
2357 				 * can process it which is the NOT of
2358 				 * control->sinfo_tsn > liste->tsn
2359 				 */
2360 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2361 				strm = &asoc->strmin[control->sinfo_stream];
2362 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2363 				if (*abort_flag) {
2364 					return (0);
2365 				}
2366 				if (need_reasm_check) {
2367 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2368 					need_reasm_check = 0;
2369 				}
2370 			}
2371 		}
2372 	}
2373 	return (1);
2374 }
2375 
2376 static const int8_t sctp_map_lookup_tab[256] = {
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 5,
2381 	0, 1, 0, 2, 0, 1, 0, 3,
2382 	0, 1, 0, 2, 0, 1, 0, 4,
2383 	0, 1, 0, 2, 0, 1, 0, 3,
2384 	0, 1, 0, 2, 0, 1, 0, 6,
2385 	0, 1, 0, 2, 0, 1, 0, 3,
2386 	0, 1, 0, 2, 0, 1, 0, 4,
2387 	0, 1, 0, 2, 0, 1, 0, 3,
2388 	0, 1, 0, 2, 0, 1, 0, 5,
2389 	0, 1, 0, 2, 0, 1, 0, 3,
2390 	0, 1, 0, 2, 0, 1, 0, 4,
2391 	0, 1, 0, 2, 0, 1, 0, 3,
2392 	0, 1, 0, 2, 0, 1, 0, 7,
2393 	0, 1, 0, 2, 0, 1, 0, 3,
2394 	0, 1, 0, 2, 0, 1, 0, 4,
2395 	0, 1, 0, 2, 0, 1, 0, 3,
2396 	0, 1, 0, 2, 0, 1, 0, 5,
2397 	0, 1, 0, 2, 0, 1, 0, 3,
2398 	0, 1, 0, 2, 0, 1, 0, 4,
2399 	0, 1, 0, 2, 0, 1, 0, 3,
2400 	0, 1, 0, 2, 0, 1, 0, 6,
2401 	0, 1, 0, 2, 0, 1, 0, 3,
2402 	0, 1, 0, 2, 0, 1, 0, 4,
2403 	0, 1, 0, 2, 0, 1, 0, 3,
2404 	0, 1, 0, 2, 0, 1, 0, 5,
2405 	0, 1, 0, 2, 0, 1, 0, 3,
2406 	0, 1, 0, 2, 0, 1, 0, 4,
2407 	0, 1, 0, 2, 0, 1, 0, 3,
2408 	0, 1, 0, 2, 0, 1, 0, 8
2409 };
2410 
2411 
2412 void
2413 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2414 {
2415 	/*
2416 	 * Now we also need to check the mapping array in a couple of ways.
2417 	 * 1) Did we move the cum-ack point?
2418 	 *
2419 	 * When you first glance at this you might think that all entries
2420 	 * that make up the position of the cum-ack would be in the
2421 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2422 	 * deliverable. Thats true with one exception, when its a fragmented
2423 	 * message we may not deliver the data until some threshold (or all
2424 	 * of it) is in place. So we must OR the nr_mapping_array and
2425 	 * mapping_array to get a true picture of the cum-ack.
2426 	 */
2427 	struct sctp_association *asoc;
2428 	int at;
2429 	uint8_t val;
2430 	int slide_from, slide_end, lgap, distance;
2431 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2432 
2433 	asoc = &stcb->asoc;
2434 
2435 	old_cumack = asoc->cumulative_tsn;
2436 	old_base = asoc->mapping_array_base_tsn;
2437 	old_highest = asoc->highest_tsn_inside_map;
2438 	/*
2439 	 * We could probably improve this a small bit by calculating the
2440 	 * offset of the current cum-ack as the starting point.
2441 	 */
2442 	at = 0;
2443 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2444 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2445 		if (val == 0xff) {
2446 			at += 8;
2447 		} else {
2448 			/* there is a 0 bit */
2449 			at += sctp_map_lookup_tab[val];
2450 			break;
2451 		}
2452 	}
2453 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2454 
2455 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2456 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2457 #ifdef INVARIANTS
2458 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2459 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2460 #else
2461 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2462 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2463 		sctp_print_mapping_array(asoc);
2464 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2465 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2466 		}
2467 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2468 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2469 #endif
2470 	}
2471 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2472 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2473 	} else {
2474 		highest_tsn = asoc->highest_tsn_inside_map;
2475 	}
2476 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2477 		/* The complete array was completed by a single FR */
2478 		/* highest becomes the cum-ack */
2479 		int clr;
2480 #ifdef INVARIANTS
2481 		unsigned int i;
2482 #endif
2483 
2484 		/* clear the array */
2485 		clr = ((at + 7) >> 3);
2486 		if (clr > asoc->mapping_array_size) {
2487 			clr = asoc->mapping_array_size;
2488 		}
2489 		memset(asoc->mapping_array, 0, clr);
2490 		memset(asoc->nr_mapping_array, 0, clr);
2491 #ifdef INVARIANTS
2492 		for (i = 0; i < asoc->mapping_array_size; i++) {
2493 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2494 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2495 				sctp_print_mapping_array(asoc);
2496 			}
2497 		}
2498 #endif
2499 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2500 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2501 	} else if (at >= 8) {
2502 		/* we can slide the mapping array down */
2503 		/* slide_from holds where we hit the first NON 0xff byte */
2504 
2505 		/*
2506 		 * now calculate the ceiling of the move using our highest
2507 		 * TSN value
2508 		 */
2509 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2510 		slide_end = (lgap >> 3);
2511 		if (slide_end < slide_from) {
2512 			sctp_print_mapping_array(asoc);
2513 #ifdef INVARIANTS
2514 			panic("impossible slide");
2515 #else
2516 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2517 			    lgap, slide_end, slide_from, at);
2518 			return;
2519 #endif
2520 		}
2521 		if (slide_end > asoc->mapping_array_size) {
2522 #ifdef INVARIANTS
2523 			panic("would overrun buffer");
2524 #else
2525 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2526 			    asoc->mapping_array_size, slide_end);
2527 			slide_end = asoc->mapping_array_size;
2528 #endif
2529 		}
2530 		distance = (slide_end - slide_from) + 1;
2531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2532 			sctp_log_map(old_base, old_cumack, old_highest,
2533 			    SCTP_MAP_PREPARE_SLIDE);
2534 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2535 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2536 		}
2537 		if (distance + slide_from > asoc->mapping_array_size ||
2538 		    distance < 0) {
2539 			/*
2540 			 * Here we do NOT slide forward the array so that
2541 			 * hopefully when more data comes in to fill it up
2542 			 * we will be able to slide it forward. Really I
2543 			 * don't think this should happen :-0
2544 			 */
2545 
2546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2547 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2548 				    (uint32_t)asoc->mapping_array_size,
2549 				    SCTP_MAP_SLIDE_NONE);
2550 			}
2551 		} else {
2552 			int ii;
2553 
2554 			for (ii = 0; ii < distance; ii++) {
2555 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2556 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2557 
2558 			}
2559 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2560 				asoc->mapping_array[ii] = 0;
2561 				asoc->nr_mapping_array[ii] = 0;
2562 			}
2563 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2564 				asoc->highest_tsn_inside_map += (slide_from << 3);
2565 			}
2566 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2567 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2568 			}
2569 			asoc->mapping_array_base_tsn += (slide_from << 3);
2570 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2571 				sctp_log_map(asoc->mapping_array_base_tsn,
2572 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2573 				    SCTP_MAP_SLIDE_RESULT);
2574 			}
2575 		}
2576 	}
2577 }
2578 
2579 void
2580 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2581 {
2582 	struct sctp_association *asoc;
2583 	uint32_t highest_tsn;
2584 	int is_a_gap;
2585 
2586 	sctp_slide_mapping_arrays(stcb);
2587 	asoc = &stcb->asoc;
2588 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2589 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2590 	} else {
2591 		highest_tsn = asoc->highest_tsn_inside_map;
2592 	}
2593 	/* Is there a gap now? */
2594 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2595 
2596 	/*
2597 	 * Now we need to see if we need to queue a sack or just start the
2598 	 * timer (if allowed).
2599 	 */
2600 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2601 		/*
2602 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2603 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2604 		 * SACK
2605 		 */
2606 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2607 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2608 			    stcb->sctp_ep, stcb, NULL,
2609 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2610 		}
2611 		sctp_send_shutdown(stcb,
2612 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2613 		if (is_a_gap) {
2614 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2615 		}
2616 	} else {
2617 		/*
2618 		 * CMT DAC algorithm: increase number of packets received
2619 		 * since last ack
2620 		 */
2621 		stcb->asoc.cmt_dac_pkts_rcvd++;
2622 
2623 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2624 							 * SACK */
2625 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2626 							 * longer is one */
2627 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2628 		    (is_a_gap) ||	/* is still a gap */
2629 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2630 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2631 		    ) {
2632 
2633 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2634 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2635 			    (stcb->asoc.send_sack == 0) &&
2636 			    (stcb->asoc.numduptsns == 0) &&
2637 			    (stcb->asoc.delayed_ack) &&
2638 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2639 
2640 				/*
2641 				 * CMT DAC algorithm: With CMT, delay acks
2642 				 * even in the face of
2643 				 *
2644 				 * reordering. Therefore, if acks that do
2645 				 * not have to be sent because of the above
2646 				 * reasons, will be delayed. That is, acks
2647 				 * that would have been sent due to gap
2648 				 * reports will be delayed with DAC. Start
2649 				 * the delayed ack timer.
2650 				 */
2651 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2652 				    stcb->sctp_ep, stcb, NULL);
2653 			} else {
2654 				/*
2655 				 * Ok we must build a SACK since the timer
2656 				 * is pending, we got our first packet OR
2657 				 * there are gaps or duplicates.
2658 				 */
2659 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2660 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2661 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2662 			}
2663 		} else {
2664 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2665 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2666 				    stcb->sctp_ep, stcb, NULL);
2667 			}
2668 		}
2669 	}
2670 }
2671 
2672 int
2673 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2674     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2675     struct sctp_nets *net, uint32_t *high_tsn)
2676 {
2677 	struct sctp_chunkhdr *ch, chunk_buf;
2678 	struct sctp_association *asoc;
2679 	int num_chunks = 0;	/* number of control chunks processed */
2680 	int stop_proc = 0;
2681 	int break_flag, last_chunk;
2682 	int abort_flag = 0, was_a_gap;
2683 	struct mbuf *m;
2684 	uint32_t highest_tsn;
2685 	uint16_t chk_length;
2686 
2687 	/* set the rwnd */
2688 	sctp_set_rwnd(stcb, &stcb->asoc);
2689 
2690 	m = *mm;
2691 	SCTP_TCB_LOCK_ASSERT(stcb);
2692 	asoc = &stcb->asoc;
2693 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2694 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2695 	} else {
2696 		highest_tsn = asoc->highest_tsn_inside_map;
2697 	}
2698 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2699 	/*
2700 	 * setup where we got the last DATA packet from for any SACK that
2701 	 * may need to go out. Don't bump the net. This is done ONLY when a
2702 	 * chunk is assigned.
2703 	 */
2704 	asoc->last_data_chunk_from = net;
2705 
2706 	/*-
2707 	 * Now before we proceed we must figure out if this is a wasted
2708 	 * cluster... i.e. it is a small packet sent in and yet the driver
2709 	 * underneath allocated a full cluster for it. If so we must copy it
2710 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2711 	 * with cluster starvation.
2712 	 */
2713 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2714 		/* we only handle mbufs that are singletons.. not chains */
2715 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2716 		if (m) {
2717 			/* ok lets see if we can copy the data up */
2718 			caddr_t *from, *to;
2719 
2720 			/* get the pointers and copy */
2721 			to = mtod(m, caddr_t *);
2722 			from = mtod((*mm), caddr_t *);
2723 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2724 			/* copy the length and free up the old */
2725 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2726 			sctp_m_freem(*mm);
2727 			/* success, back copy */
2728 			*mm = m;
2729 		} else {
2730 			/* We are in trouble in the mbuf world .. yikes */
2731 			m = *mm;
2732 		}
2733 	}
2734 	/* get pointer to the first chunk header */
2735 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2736 	    sizeof(struct sctp_chunkhdr),
2737 	    (uint8_t *)&chunk_buf);
2738 	if (ch == NULL) {
2739 		return (1);
2740 	}
2741 	/*
2742 	 * process all DATA chunks...
2743 	 */
2744 	*high_tsn = asoc->cumulative_tsn;
2745 	break_flag = 0;
2746 	asoc->data_pkts_seen++;
2747 	while (stop_proc == 0) {
2748 		/* validate chunk length */
2749 		chk_length = ntohs(ch->chunk_length);
2750 		if (length - *offset < chk_length) {
2751 			/* all done, mutulated chunk */
2752 			stop_proc = 1;
2753 			continue;
2754 		}
2755 		if ((asoc->idata_supported == 1) &&
2756 		    (ch->chunk_type == SCTP_DATA)) {
2757 			struct mbuf *op_err;
2758 			char msg[SCTP_DIAG_INFO_LEN];
2759 
2760 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2761 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2762 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2763 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2764 			return (2);
2765 		}
2766 		if ((asoc->idata_supported == 0) &&
2767 		    (ch->chunk_type == SCTP_IDATA)) {
2768 			struct mbuf *op_err;
2769 			char msg[SCTP_DIAG_INFO_LEN];
2770 
2771 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2772 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2773 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2774 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2775 			return (2);
2776 		}
2777 		if ((ch->chunk_type == SCTP_DATA) ||
2778 		    (ch->chunk_type == SCTP_IDATA)) {
2779 			uint16_t clen;
2780 
2781 			if (ch->chunk_type == SCTP_DATA) {
2782 				clen = sizeof(struct sctp_data_chunk);
2783 			} else {
2784 				clen = sizeof(struct sctp_idata_chunk);
2785 			}
2786 			if (chk_length < clen) {
2787 				/*
2788 				 * Need to send an abort since we had a
2789 				 * invalid data chunk.
2790 				 */
2791 				struct mbuf *op_err;
2792 				char msg[SCTP_DIAG_INFO_LEN];
2793 
2794 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2795 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2796 				    chk_length);
2797 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2798 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2799 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2800 				return (2);
2801 			}
2802 #ifdef SCTP_AUDITING_ENABLED
2803 			sctp_audit_log(0xB1, 0);
2804 #endif
2805 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2806 				last_chunk = 1;
2807 			} else {
2808 				last_chunk = 0;
2809 			}
2810 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2811 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2812 			    last_chunk, ch->chunk_type)) {
2813 				num_chunks++;
2814 			}
2815 			if (abort_flag)
2816 				return (2);
2817 
2818 			if (break_flag) {
2819 				/*
2820 				 * Set because of out of rwnd space and no
2821 				 * drop rep space left.
2822 				 */
2823 				stop_proc = 1;
2824 				continue;
2825 			}
2826 		} else {
2827 			/* not a data chunk in the data region */
2828 			switch (ch->chunk_type) {
2829 			case SCTP_INITIATION:
2830 			case SCTP_INITIATION_ACK:
2831 			case SCTP_SELECTIVE_ACK:
2832 			case SCTP_NR_SELECTIVE_ACK:
2833 			case SCTP_HEARTBEAT_REQUEST:
2834 			case SCTP_HEARTBEAT_ACK:
2835 			case SCTP_ABORT_ASSOCIATION:
2836 			case SCTP_SHUTDOWN:
2837 			case SCTP_SHUTDOWN_ACK:
2838 			case SCTP_OPERATION_ERROR:
2839 			case SCTP_COOKIE_ECHO:
2840 			case SCTP_COOKIE_ACK:
2841 			case SCTP_ECN_ECHO:
2842 			case SCTP_ECN_CWR:
2843 			case SCTP_SHUTDOWN_COMPLETE:
2844 			case SCTP_AUTHENTICATION:
2845 			case SCTP_ASCONF_ACK:
2846 			case SCTP_PACKET_DROPPED:
2847 			case SCTP_STREAM_RESET:
2848 			case SCTP_FORWARD_CUM_TSN:
2849 			case SCTP_ASCONF:
2850 				{
2851 					/*
2852 					 * Now, what do we do with KNOWN
2853 					 * chunks that are NOT in the right
2854 					 * place?
2855 					 *
2856 					 * For now, I do nothing but ignore
2857 					 * them. We may later want to add
2858 					 * sysctl stuff to switch out and do
2859 					 * either an ABORT() or possibly
2860 					 * process them.
2861 					 */
2862 					struct mbuf *op_err;
2863 					char msg[SCTP_DIAG_INFO_LEN];
2864 
2865 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2866 					    ch->chunk_type);
2867 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2868 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2869 					return (2);
2870 				}
2871 			default:
2872 				/*
2873 				 * Unknown chunk type: use bit rules after
2874 				 * checking length
2875 				 */
2876 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2877 					/*
2878 					 * Need to send an abort since we
2879 					 * had a invalid chunk.
2880 					 */
2881 					struct mbuf *op_err;
2882 					char msg[SCTP_DIAG_INFO_LEN];
2883 
2884 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2885 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2886 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2887 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2888 					return (2);
2889 				}
2890 				if (ch->chunk_type & 0x40) {
2891 					/* Add a error report to the queue */
2892 					struct mbuf *op_err;
2893 					struct sctp_gen_error_cause *cause;
2894 
2895 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2896 					    0, M_NOWAIT, 1, MT_DATA);
2897 					if (op_err != NULL) {
2898 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2899 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2900 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2901 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2902 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2903 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2904 							sctp_queue_op_err(stcb, op_err);
2905 						} else {
2906 							sctp_m_freem(op_err);
2907 						}
2908 					}
2909 				}
2910 				if ((ch->chunk_type & 0x80) == 0) {
2911 					/* discard the rest of this packet */
2912 					stop_proc = 1;
2913 				}	/* else skip this bad chunk and
2914 					 * continue... */
2915 				break;
2916 			}	/* switch of chunk type */
2917 		}
2918 		*offset += SCTP_SIZE32(chk_length);
2919 		if ((*offset >= length) || stop_proc) {
2920 			/* no more data left in the mbuf chain */
2921 			stop_proc = 1;
2922 			continue;
2923 		}
2924 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2925 		    sizeof(struct sctp_chunkhdr),
2926 		    (uint8_t *)&chunk_buf);
2927 		if (ch == NULL) {
2928 			*offset = length;
2929 			stop_proc = 1;
2930 			continue;
2931 		}
2932 	}
2933 	if (break_flag) {
2934 		/*
2935 		 * we need to report rwnd overrun drops.
2936 		 */
2937 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2938 	}
2939 	if (num_chunks) {
2940 		/*
2941 		 * Did we get data, if so update the time for auto-close and
2942 		 * give peer credit for being alive.
2943 		 */
2944 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2945 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2946 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2947 			    stcb->asoc.overall_error_count,
2948 			    0,
2949 			    SCTP_FROM_SCTP_INDATA,
2950 			    __LINE__);
2951 		}
2952 		stcb->asoc.overall_error_count = 0;
2953 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2954 	}
2955 	/* now service all of the reassm queue if needed */
2956 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2957 		/* Assure that we ack right away */
2958 		stcb->asoc.send_sack = 1;
2959 	}
2960 	/* Start a sack timer or QUEUE a SACK for sending */
2961 	sctp_sack_check(stcb, was_a_gap);
2962 	return (0);
2963 }
2964 
2965 static int
2966 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2967     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2968     int *num_frs,
2969     uint32_t *biggest_newly_acked_tsn,
2970     uint32_t *this_sack_lowest_newack,
2971     int *rto_ok)
2972 {
2973 	struct sctp_tmit_chunk *tp1;
2974 	unsigned int theTSN;
2975 	int j, wake_him = 0, circled = 0;
2976 
2977 	/* Recover the tp1 we last saw */
2978 	tp1 = *p_tp1;
2979 	if (tp1 == NULL) {
2980 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2981 	}
2982 	for (j = frag_strt; j <= frag_end; j++) {
2983 		theTSN = j + last_tsn;
2984 		while (tp1) {
2985 			if (tp1->rec.data.doing_fast_retransmit)
2986 				(*num_frs) += 1;
2987 
2988 			/*-
2989 			 * CMT: CUCv2 algorithm. For each TSN being
2990 			 * processed from the sent queue, track the
2991 			 * next expected pseudo-cumack, or
2992 			 * rtx_pseudo_cumack, if required. Separate
2993 			 * cumack trackers for first transmissions,
2994 			 * and retransmissions.
2995 			 */
2996 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2997 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2998 			    (tp1->snd_count == 1)) {
2999 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3000 				tp1->whoTo->find_pseudo_cumack = 0;
3001 			}
3002 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3003 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3004 			    (tp1->snd_count > 1)) {
3005 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3006 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3007 			}
3008 			if (tp1->rec.data.tsn == theTSN) {
3009 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3010 					/*-
3011 					 * must be held until
3012 					 * cum-ack passes
3013 					 */
3014 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3015 						/*-
3016 						 * If it is less than RESEND, it is
3017 						 * now no-longer in flight.
3018 						 * Higher values may already be set
3019 						 * via previous Gap Ack Blocks...
3020 						 * i.e. ACKED or RESEND.
3021 						 */
3022 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3023 						    *biggest_newly_acked_tsn)) {
3024 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3025 						}
3026 						/*-
3027 						 * CMT: SFR algo (and HTNA) - set
3028 						 * saw_newack to 1 for dest being
3029 						 * newly acked. update
3030 						 * this_sack_highest_newack if
3031 						 * appropriate.
3032 						 */
3033 						if (tp1->rec.data.chunk_was_revoked == 0)
3034 							tp1->whoTo->saw_newack = 1;
3035 
3036 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3037 						    tp1->whoTo->this_sack_highest_newack)) {
3038 							tp1->whoTo->this_sack_highest_newack =
3039 							    tp1->rec.data.tsn;
3040 						}
3041 						/*-
3042 						 * CMT DAC algo: also update
3043 						 * this_sack_lowest_newack
3044 						 */
3045 						if (*this_sack_lowest_newack == 0) {
3046 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3047 								sctp_log_sack(*this_sack_lowest_newack,
3048 								    last_tsn,
3049 								    tp1->rec.data.tsn,
3050 								    0,
3051 								    0,
3052 								    SCTP_LOG_TSN_ACKED);
3053 							}
3054 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3055 						}
3056 						/*-
3057 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3058 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3059 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3060 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3061 						 * Separate pseudo_cumack trackers for first transmissions and
3062 						 * retransmissions.
3063 						 */
3064 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3065 							if (tp1->rec.data.chunk_was_revoked == 0) {
3066 								tp1->whoTo->new_pseudo_cumack = 1;
3067 							}
3068 							tp1->whoTo->find_pseudo_cumack = 1;
3069 						}
3070 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3071 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3072 						}
3073 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3074 							if (tp1->rec.data.chunk_was_revoked == 0) {
3075 								tp1->whoTo->new_pseudo_cumack = 1;
3076 							}
3077 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3078 						}
3079 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3080 							sctp_log_sack(*biggest_newly_acked_tsn,
3081 							    last_tsn,
3082 							    tp1->rec.data.tsn,
3083 							    frag_strt,
3084 							    frag_end,
3085 							    SCTP_LOG_TSN_ACKED);
3086 						}
3087 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3088 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3089 							    tp1->whoTo->flight_size,
3090 							    tp1->book_size,
3091 							    (uint32_t)(uintptr_t)tp1->whoTo,
3092 							    tp1->rec.data.tsn);
3093 						}
3094 						sctp_flight_size_decrease(tp1);
3095 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3096 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3097 							    tp1);
3098 						}
3099 						sctp_total_flight_decrease(stcb, tp1);
3100 
3101 						tp1->whoTo->net_ack += tp1->send_size;
3102 						if (tp1->snd_count < 2) {
3103 							/*-
3104 							 * True non-retransmitted chunk
3105 							 */
3106 							tp1->whoTo->net_ack2 += tp1->send_size;
3107 
3108 							/*-
3109 							 * update RTO too ?
3110 							 */
3111 							if (tp1->do_rtt) {
3112 								if (*rto_ok &&
3113 								    sctp_calculate_rto(stcb,
3114 								    &stcb->asoc,
3115 								    tp1->whoTo,
3116 								    &tp1->sent_rcv_time,
3117 								    SCTP_RTT_FROM_DATA)) {
3118 									*rto_ok = 0;
3119 								}
3120 								if (tp1->whoTo->rto_needed == 0) {
3121 									tp1->whoTo->rto_needed = 1;
3122 								}
3123 								tp1->do_rtt = 0;
3124 							}
3125 						}
3126 
3127 					}
3128 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3129 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3130 						    stcb->asoc.this_sack_highest_gap)) {
3131 							stcb->asoc.this_sack_highest_gap =
3132 							    tp1->rec.data.tsn;
3133 						}
3134 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3135 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3136 #ifdef SCTP_AUDITING_ENABLED
3137 							sctp_audit_log(0xB2,
3138 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3139 #endif
3140 						}
3141 					}
3142 					/*-
3143 					 * All chunks NOT UNSENT fall through here and are marked
3144 					 * (leave PR-SCTP ones that are to skip alone though)
3145 					 */
3146 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3147 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3148 						tp1->sent = SCTP_DATAGRAM_MARKED;
3149 					}
3150 					if (tp1->rec.data.chunk_was_revoked) {
3151 						/* deflate the cwnd */
3152 						tp1->whoTo->cwnd -= tp1->book_size;
3153 						tp1->rec.data.chunk_was_revoked = 0;
3154 					}
3155 					/* NR Sack code here */
3156 					if (nr_sacking &&
3157 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3158 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3159 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3160 #ifdef INVARIANTS
3161 						} else {
3162 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3163 #endif
3164 						}
3165 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3166 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3167 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3168 							stcb->asoc.trigger_reset = 1;
3169 						}
3170 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3171 						if (tp1->data) {
3172 							/*
3173 							 * sa_ignore
3174 							 * NO_NULL_CHK
3175 							 */
3176 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3177 							sctp_m_freem(tp1->data);
3178 							tp1->data = NULL;
3179 						}
3180 						wake_him++;
3181 					}
3182 				}
3183 				break;
3184 			}	/* if (tp1->tsn == theTSN) */
3185 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3186 				break;
3187 			}
3188 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3189 			if ((tp1 == NULL) && (circled == 0)) {
3190 				circled++;
3191 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3192 			}
3193 		}		/* end while (tp1) */
3194 		if (tp1 == NULL) {
3195 			circled = 0;
3196 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3197 		}
3198 		/* In case the fragments were not in order we must reset */
3199 	}			/* end for (j = fragStart */
3200 	*p_tp1 = tp1;
3201 	return (wake_him);	/* Return value only used for nr-sack */
3202 }
3203 
3204 
3205 static int
3206 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3207     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3208     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3209     int num_seg, int num_nr_seg, int *rto_ok)
3210 {
3211 	struct sctp_gap_ack_block *frag, block;
3212 	struct sctp_tmit_chunk *tp1;
3213 	int i;
3214 	int num_frs = 0;
3215 	int chunk_freed;
3216 	int non_revocable;
3217 	uint16_t frag_strt, frag_end, prev_frag_end;
3218 
3219 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3220 	prev_frag_end = 0;
3221 	chunk_freed = 0;
3222 
3223 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3224 		if (i == num_seg) {
3225 			prev_frag_end = 0;
3226 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3227 		}
3228 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3229 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3230 		*offset += sizeof(block);
3231 		if (frag == NULL) {
3232 			return (chunk_freed);
3233 		}
3234 		frag_strt = ntohs(frag->start);
3235 		frag_end = ntohs(frag->end);
3236 
3237 		if (frag_strt > frag_end) {
3238 			/* This gap report is malformed, skip it. */
3239 			continue;
3240 		}
3241 		if (frag_strt <= prev_frag_end) {
3242 			/* This gap report is not in order, so restart. */
3243 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3244 		}
3245 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3246 			*biggest_tsn_acked = last_tsn + frag_end;
3247 		}
3248 		if (i < num_seg) {
3249 			non_revocable = 0;
3250 		} else {
3251 			non_revocable = 1;
3252 		}
3253 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3254 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3255 		    this_sack_lowest_newack, rto_ok)) {
3256 			chunk_freed = 1;
3257 		}
3258 		prev_frag_end = frag_end;
3259 	}
3260 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3261 		if (num_frs)
3262 			sctp_log_fr(*biggest_tsn_acked,
3263 			    *biggest_newly_acked_tsn,
3264 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3265 	}
3266 	return (chunk_freed);
3267 }
3268 
3269 static void
3270 sctp_check_for_revoked(struct sctp_tcb *stcb,
3271     struct sctp_association *asoc, uint32_t cumack,
3272     uint32_t biggest_tsn_acked)
3273 {
3274 	struct sctp_tmit_chunk *tp1;
3275 
3276 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3277 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3278 			/*
3279 			 * ok this guy is either ACK or MARKED. If it is
3280 			 * ACKED it has been previously acked but not this
3281 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3282 			 * again.
3283 			 */
3284 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3285 				break;
3286 			}
3287 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3288 				/* it has been revoked */
3289 				tp1->sent = SCTP_DATAGRAM_SENT;
3290 				tp1->rec.data.chunk_was_revoked = 1;
3291 				/*
3292 				 * We must add this stuff back in to assure
3293 				 * timers and such get started.
3294 				 */
3295 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3296 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3297 					    tp1->whoTo->flight_size,
3298 					    tp1->book_size,
3299 					    (uint32_t)(uintptr_t)tp1->whoTo,
3300 					    tp1->rec.data.tsn);
3301 				}
3302 				sctp_flight_size_increase(tp1);
3303 				sctp_total_flight_increase(stcb, tp1);
3304 				/*
3305 				 * We inflate the cwnd to compensate for our
3306 				 * artificial inflation of the flight_size.
3307 				 */
3308 				tp1->whoTo->cwnd += tp1->book_size;
3309 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3310 					sctp_log_sack(asoc->last_acked_seq,
3311 					    cumack,
3312 					    tp1->rec.data.tsn,
3313 					    0,
3314 					    0,
3315 					    SCTP_LOG_TSN_REVOKED);
3316 				}
3317 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3318 				/* it has been re-acked in this SACK */
3319 				tp1->sent = SCTP_DATAGRAM_ACKED;
3320 			}
3321 		}
3322 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3323 			break;
3324 	}
3325 }
3326 
3327 
3328 static void
3329 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3330     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3331 {
3332 	struct sctp_tmit_chunk *tp1;
3333 	int strike_flag = 0;
3334 	struct timeval now;
3335 	int tot_retrans = 0;
3336 	uint32_t sending_seq;
3337 	struct sctp_nets *net;
3338 	int num_dests_sacked = 0;
3339 
3340 	/*
3341 	 * select the sending_seq, this is either the next thing ready to be
3342 	 * sent but not transmitted, OR, the next seq we assign.
3343 	 */
3344 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3345 	if (tp1 == NULL) {
3346 		sending_seq = asoc->sending_seq;
3347 	} else {
3348 		sending_seq = tp1->rec.data.tsn;
3349 	}
3350 
3351 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3352 	if ((asoc->sctp_cmt_on_off > 0) &&
3353 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3354 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3355 			if (net->saw_newack)
3356 				num_dests_sacked++;
3357 		}
3358 	}
3359 	if (stcb->asoc.prsctp_supported) {
3360 		(void)SCTP_GETTIME_TIMEVAL(&now);
3361 	}
3362 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3363 		strike_flag = 0;
3364 		if (tp1->no_fr_allowed) {
3365 			/* this one had a timeout or something */
3366 			continue;
3367 		}
3368 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3369 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3370 				sctp_log_fr(biggest_tsn_newly_acked,
3371 				    tp1->rec.data.tsn,
3372 				    tp1->sent,
3373 				    SCTP_FR_LOG_CHECK_STRIKE);
3374 		}
3375 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3376 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3377 			/* done */
3378 			break;
3379 		}
3380 		if (stcb->asoc.prsctp_supported) {
3381 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3382 				/* Is it expired? */
3383 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3384 					/* Yes so drop it */
3385 					if (tp1->data != NULL) {
3386 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3387 						    SCTP_SO_NOT_LOCKED);
3388 					}
3389 					continue;
3390 				}
3391 			}
3392 
3393 		}
3394 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3395 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3396 			/* we are beyond the tsn in the sack  */
3397 			break;
3398 		}
3399 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3400 			/* either a RESEND, ACKED, or MARKED */
3401 			/* skip */
3402 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3403 				/* Continue strikin FWD-TSN chunks */
3404 				tp1->rec.data.fwd_tsn_cnt++;
3405 			}
3406 			continue;
3407 		}
3408 		/*
3409 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3410 		 */
3411 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3412 			/*
3413 			 * No new acks were receieved for data sent to this
3414 			 * dest. Therefore, according to the SFR algo for
3415 			 * CMT, no data sent to this dest can be marked for
3416 			 * FR using this SACK.
3417 			 */
3418 			continue;
3419 		} else if (tp1->whoTo &&
3420 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3421 			    tp1->whoTo->this_sack_highest_newack) &&
3422 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3423 			/*
3424 			 * CMT: New acks were receieved for data sent to
3425 			 * this dest. But no new acks were seen for data
3426 			 * sent after tp1. Therefore, according to the SFR
3427 			 * algo for CMT, tp1 cannot be marked for FR using
3428 			 * this SACK. This step covers part of the DAC algo
3429 			 * and the HTNA algo as well.
3430 			 */
3431 			continue;
3432 		}
3433 		/*
3434 		 * Here we check to see if we were have already done a FR
3435 		 * and if so we see if the biggest TSN we saw in the sack is
3436 		 * smaller than the recovery point. If so we don't strike
3437 		 * the tsn... otherwise we CAN strike the TSN.
3438 		 */
3439 		/*
3440 		 * @@@ JRI: Check for CMT if (accum_moved &&
3441 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3442 		 * 0)) {
3443 		 */
3444 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3445 			/*
3446 			 * Strike the TSN if in fast-recovery and cum-ack
3447 			 * moved.
3448 			 */
3449 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3450 				sctp_log_fr(biggest_tsn_newly_acked,
3451 				    tp1->rec.data.tsn,
3452 				    tp1->sent,
3453 				    SCTP_FR_LOG_STRIKE_CHUNK);
3454 			}
3455 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3456 				tp1->sent++;
3457 			}
3458 			if ((asoc->sctp_cmt_on_off > 0) &&
3459 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3460 				/*
3461 				 * CMT DAC algorithm: If SACK flag is set to
3462 				 * 0, then lowest_newack test will not pass
3463 				 * because it would have been set to the
3464 				 * cumack earlier. If not already to be
3465 				 * rtx'd, If not a mixed sack and if tp1 is
3466 				 * not between two sacked TSNs, then mark by
3467 				 * one more. NOTE that we are marking by one
3468 				 * additional time since the SACK DAC flag
3469 				 * indicates that two packets have been
3470 				 * received after this missing TSN.
3471 				 */
3472 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3473 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3474 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3475 						sctp_log_fr(16 + num_dests_sacked,
3476 						    tp1->rec.data.tsn,
3477 						    tp1->sent,
3478 						    SCTP_FR_LOG_STRIKE_CHUNK);
3479 					}
3480 					tp1->sent++;
3481 				}
3482 			}
3483 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3484 		    (asoc->sctp_cmt_on_off == 0)) {
3485 			/*
3486 			 * For those that have done a FR we must take
3487 			 * special consideration if we strike. I.e the
3488 			 * biggest_newly_acked must be higher than the
3489 			 * sending_seq at the time we did the FR.
3490 			 */
3491 			if (
3492 #ifdef SCTP_FR_TO_ALTERNATE
3493 			/*
3494 			 * If FR's go to new networks, then we must only do
3495 			 * this for singly homed asoc's. However if the FR's
3496 			 * go to the same network (Armando's work) then its
3497 			 * ok to FR multiple times.
3498 			 */
3499 			    (asoc->numnets < 2)
3500 #else
3501 			    (1)
3502 #endif
3503 			    ) {
3504 
3505 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3506 				    tp1->rec.data.fast_retran_tsn)) {
3507 					/*
3508 					 * Strike the TSN, since this ack is
3509 					 * beyond where things were when we
3510 					 * did a FR.
3511 					 */
3512 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3513 						sctp_log_fr(biggest_tsn_newly_acked,
3514 						    tp1->rec.data.tsn,
3515 						    tp1->sent,
3516 						    SCTP_FR_LOG_STRIKE_CHUNK);
3517 					}
3518 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3519 						tp1->sent++;
3520 					}
3521 					strike_flag = 1;
3522 					if ((asoc->sctp_cmt_on_off > 0) &&
3523 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3524 						/*
3525 						 * CMT DAC algorithm: If
3526 						 * SACK flag is set to 0,
3527 						 * then lowest_newack test
3528 						 * will not pass because it
3529 						 * would have been set to
3530 						 * the cumack earlier. If
3531 						 * not already to be rtx'd,
3532 						 * If not a mixed sack and
3533 						 * if tp1 is not between two
3534 						 * sacked TSNs, then mark by
3535 						 * one more. NOTE that we
3536 						 * are marking by one
3537 						 * additional time since the
3538 						 * SACK DAC flag indicates
3539 						 * that two packets have
3540 						 * been received after this
3541 						 * missing TSN.
3542 						 */
3543 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3544 						    (num_dests_sacked == 1) &&
3545 						    SCTP_TSN_GT(this_sack_lowest_newack,
3546 						    tp1->rec.data.tsn)) {
3547 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3548 								sctp_log_fr(32 + num_dests_sacked,
3549 								    tp1->rec.data.tsn,
3550 								    tp1->sent,
3551 								    SCTP_FR_LOG_STRIKE_CHUNK);
3552 							}
3553 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3554 								tp1->sent++;
3555 							}
3556 						}
3557 					}
3558 				}
3559 			}
3560 			/*
3561 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3562 			 * algo covers HTNA.
3563 			 */
3564 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3565 		    biggest_tsn_newly_acked)) {
3566 			/*
3567 			 * We don't strike these: This is the  HTNA
3568 			 * algorithm i.e. we don't strike If our TSN is
3569 			 * larger than the Highest TSN Newly Acked.
3570 			 */
3571 			;
3572 		} else {
3573 			/* Strike the TSN */
3574 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3575 				sctp_log_fr(biggest_tsn_newly_acked,
3576 				    tp1->rec.data.tsn,
3577 				    tp1->sent,
3578 				    SCTP_FR_LOG_STRIKE_CHUNK);
3579 			}
3580 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3581 				tp1->sent++;
3582 			}
3583 			if ((asoc->sctp_cmt_on_off > 0) &&
3584 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3585 				/*
3586 				 * CMT DAC algorithm: If SACK flag is set to
3587 				 * 0, then lowest_newack test will not pass
3588 				 * because it would have been set to the
3589 				 * cumack earlier. If not already to be
3590 				 * rtx'd, If not a mixed sack and if tp1 is
3591 				 * not between two sacked TSNs, then mark by
3592 				 * one more. NOTE that we are marking by one
3593 				 * additional time since the SACK DAC flag
3594 				 * indicates that two packets have been
3595 				 * received after this missing TSN.
3596 				 */
3597 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3598 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3599 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3600 						sctp_log_fr(48 + num_dests_sacked,
3601 						    tp1->rec.data.tsn,
3602 						    tp1->sent,
3603 						    SCTP_FR_LOG_STRIKE_CHUNK);
3604 					}
3605 					tp1->sent++;
3606 				}
3607 			}
3608 		}
3609 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3610 			struct sctp_nets *alt;
3611 
3612 			/* fix counts and things */
3613 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3614 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3615 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3616 				    tp1->book_size,
3617 				    (uint32_t)(uintptr_t)tp1->whoTo,
3618 				    tp1->rec.data.tsn);
3619 			}
3620 			if (tp1->whoTo) {
3621 				tp1->whoTo->net_ack++;
3622 				sctp_flight_size_decrease(tp1);
3623 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3624 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3625 					    tp1);
3626 				}
3627 			}
3628 
3629 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3630 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3631 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3632 			}
3633 			/* add back to the rwnd */
3634 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3635 
3636 			/* remove from the total flight */
3637 			sctp_total_flight_decrease(stcb, tp1);
3638 
3639 			if ((stcb->asoc.prsctp_supported) &&
3640 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3641 				/*
3642 				 * Has it been retransmitted tv_sec times? -
3643 				 * we store the retran count there.
3644 				 */
3645 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3646 					/* Yes, so drop it */
3647 					if (tp1->data != NULL) {
3648 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3649 						    SCTP_SO_NOT_LOCKED);
3650 					}
3651 					/* Make sure to flag we had a FR */
3652 					if (tp1->whoTo != NULL) {
3653 						tp1->whoTo->net_ack++;
3654 					}
3655 					continue;
3656 				}
3657 			}
3658 			/*
3659 			 * SCTP_PRINTF("OK, we are now ready to FR this
3660 			 * guy\n");
3661 			 */
3662 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3663 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3664 				    0, SCTP_FR_MARKED);
3665 			}
3666 			if (strike_flag) {
3667 				/* This is a subsequent FR */
3668 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3669 			}
3670 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3671 			if (asoc->sctp_cmt_on_off > 0) {
3672 				/*
3673 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3674 				 * If CMT is being used, then pick dest with
3675 				 * largest ssthresh for any retransmission.
3676 				 */
3677 				tp1->no_fr_allowed = 1;
3678 				alt = tp1->whoTo;
3679 				/* sa_ignore NO_NULL_CHK */
3680 				if (asoc->sctp_cmt_pf > 0) {
3681 					/*
3682 					 * JRS 5/18/07 - If CMT PF is on,
3683 					 * use the PF version of
3684 					 * find_alt_net()
3685 					 */
3686 					alt = sctp_find_alternate_net(stcb, alt, 2);
3687 				} else {
3688 					/*
3689 					 * JRS 5/18/07 - If only CMT is on,
3690 					 * use the CMT version of
3691 					 * find_alt_net()
3692 					 */
3693 					/* sa_ignore NO_NULL_CHK */
3694 					alt = sctp_find_alternate_net(stcb, alt, 1);
3695 				}
3696 				if (alt == NULL) {
3697 					alt = tp1->whoTo;
3698 				}
3699 				/*
3700 				 * CUCv2: If a different dest is picked for
3701 				 * the retransmission, then new
3702 				 * (rtx-)pseudo_cumack needs to be tracked
3703 				 * for orig dest. Let CUCv2 track new (rtx-)
3704 				 * pseudo-cumack always.
3705 				 */
3706 				if (tp1->whoTo) {
3707 					tp1->whoTo->find_pseudo_cumack = 1;
3708 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3709 				}
3710 
3711 			} else {	/* CMT is OFF */
3712 
3713 #ifdef SCTP_FR_TO_ALTERNATE
3714 				/* Can we find an alternate? */
3715 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3716 #else
3717 				/*
3718 				 * default behavior is to NOT retransmit
3719 				 * FR's to an alternate. Armando Caro's
3720 				 * paper details why.
3721 				 */
3722 				alt = tp1->whoTo;
3723 #endif
3724 			}
3725 
3726 			tp1->rec.data.doing_fast_retransmit = 1;
3727 			tot_retrans++;
3728 			/* mark the sending seq for possible subsequent FR's */
3729 			/*
3730 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3731 			 * (uint32_t)tpi->rec.data.tsn);
3732 			 */
3733 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3734 				/*
3735 				 * If the queue of send is empty then its
3736 				 * the next sequence number that will be
3737 				 * assigned so we subtract one from this to
3738 				 * get the one we last sent.
3739 				 */
3740 				tp1->rec.data.fast_retran_tsn = sending_seq;
3741 			} else {
3742 				/*
3743 				 * If there are chunks on the send queue
3744 				 * (unsent data that has made it from the
3745 				 * stream queues but not out the door, we
3746 				 * take the first one (which will have the
3747 				 * lowest TSN) and subtract one to get the
3748 				 * one we last sent.
3749 				 */
3750 				struct sctp_tmit_chunk *ttt;
3751 
3752 				ttt = TAILQ_FIRST(&asoc->send_queue);
3753 				tp1->rec.data.fast_retran_tsn =
3754 				    ttt->rec.data.tsn;
3755 			}
3756 
3757 			if (tp1->do_rtt) {
3758 				/*
3759 				 * this guy had a RTO calculation pending on
3760 				 * it, cancel it
3761 				 */
3762 				if ((tp1->whoTo != NULL) &&
3763 				    (tp1->whoTo->rto_needed == 0)) {
3764 					tp1->whoTo->rto_needed = 1;
3765 				}
3766 				tp1->do_rtt = 0;
3767 			}
3768 			if (alt != tp1->whoTo) {
3769 				/* yes, there is an alternate. */
3770 				sctp_free_remote_addr(tp1->whoTo);
3771 				/* sa_ignore FREED_MEMORY */
3772 				tp1->whoTo = alt;
3773 				atomic_add_int(&alt->ref_count, 1);
3774 			}
3775 		}
3776 	}
3777 }
3778 
3779 struct sctp_tmit_chunk *
3780 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3781     struct sctp_association *asoc)
3782 {
3783 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3784 	struct timeval now;
3785 	int now_filled = 0;
3786 
3787 	if (asoc->prsctp_supported == 0) {
3788 		return (NULL);
3789 	}
3790 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3791 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3792 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3793 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3794 			/* no chance to advance, out of here */
3795 			break;
3796 		}
3797 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3798 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3799 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3800 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3801 				    asoc->advanced_peer_ack_point,
3802 				    tp1->rec.data.tsn, 0, 0);
3803 			}
3804 		}
3805 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3806 			/*
3807 			 * We can't fwd-tsn past any that are reliable aka
3808 			 * retransmitted until the asoc fails.
3809 			 */
3810 			break;
3811 		}
3812 		if (!now_filled) {
3813 			(void)SCTP_GETTIME_TIMEVAL(&now);
3814 			now_filled = 1;
3815 		}
3816 		/*
3817 		 * now we got a chunk which is marked for another
3818 		 * retransmission to a PR-stream but has run out its chances
3819 		 * already maybe OR has been marked to skip now. Can we skip
3820 		 * it if its a resend?
3821 		 */
3822 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3823 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3824 			/*
3825 			 * Now is this one marked for resend and its time is
3826 			 * now up?
3827 			 */
3828 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3829 				/* Yes so drop it */
3830 				if (tp1->data) {
3831 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3832 					    1, SCTP_SO_NOT_LOCKED);
3833 				}
3834 			} else {
3835 				/*
3836 				 * No, we are done when hit one for resend
3837 				 * whos time as not expired.
3838 				 */
3839 				break;
3840 			}
3841 		}
3842 		/*
3843 		 * Ok now if this chunk is marked to drop it we can clean up
3844 		 * the chunk, advance our peer ack point and we can check
3845 		 * the next chunk.
3846 		 */
3847 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3848 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3849 			/* advance PeerAckPoint goes forward */
3850 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3851 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3852 				a_adv = tp1;
3853 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3854 				/* No update but we do save the chk */
3855 				a_adv = tp1;
3856 			}
3857 		} else {
3858 			/*
3859 			 * If it is still in RESEND we can advance no
3860 			 * further
3861 			 */
3862 			break;
3863 		}
3864 	}
3865 	return (a_adv);
3866 }
3867 
3868 static int
3869 sctp_fs_audit(struct sctp_association *asoc)
3870 {
3871 	struct sctp_tmit_chunk *chk;
3872 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3873 	int ret;
3874 #ifndef INVARIANTS
3875 	int entry_flight, entry_cnt;
3876 #endif
3877 
3878 	ret = 0;
3879 #ifndef INVARIANTS
3880 	entry_flight = asoc->total_flight;
3881 	entry_cnt = asoc->total_flight_count;
3882 #endif
3883 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3884 		return (0);
3885 
3886 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3887 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3888 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3889 			    chk->rec.data.tsn,
3890 			    chk->send_size,
3891 			    chk->snd_count);
3892 			inflight++;
3893 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3894 			resend++;
3895 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3896 			inbetween++;
3897 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3898 			above++;
3899 		} else {
3900 			acked++;
3901 		}
3902 	}
3903 
3904 	if ((inflight > 0) || (inbetween > 0)) {
3905 #ifdef INVARIANTS
3906 		panic("Flight size-express incorrect? \n");
3907 #else
3908 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3909 		    entry_flight, entry_cnt);
3910 
3911 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3912 		    inflight, inbetween, resend, above, acked);
3913 		ret = 1;
3914 #endif
3915 	}
3916 	return (ret);
3917 }
3918 
3919 
3920 static void
3921 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3922     struct sctp_association *asoc,
3923     struct sctp_tmit_chunk *tp1)
3924 {
3925 	tp1->window_probe = 0;
3926 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3927 		/* TSN's skipped we do NOT move back. */
3928 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3929 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3930 		    tp1->book_size,
3931 		    (uint32_t)(uintptr_t)tp1->whoTo,
3932 		    tp1->rec.data.tsn);
3933 		return;
3934 	}
3935 	/* First setup this by shrinking flight */
3936 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3937 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3938 		    tp1);
3939 	}
3940 	sctp_flight_size_decrease(tp1);
3941 	sctp_total_flight_decrease(stcb, tp1);
3942 	/* Now mark for resend */
3943 	tp1->sent = SCTP_DATAGRAM_RESEND;
3944 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3945 
3946 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3947 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3948 		    tp1->whoTo->flight_size,
3949 		    tp1->book_size,
3950 		    (uint32_t)(uintptr_t)tp1->whoTo,
3951 		    tp1->rec.data.tsn);
3952 	}
3953 }
3954 
3955 void
3956 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3957     uint32_t rwnd, int *abort_now, int ecne_seen)
3958 {
3959 	struct sctp_nets *net;
3960 	struct sctp_association *asoc;
3961 	struct sctp_tmit_chunk *tp1, *tp2;
3962 	uint32_t old_rwnd;
3963 	int win_probe_recovery = 0;
3964 	int win_probe_recovered = 0;
3965 	int j, done_once = 0;
3966 	int rto_ok = 1;
3967 	uint32_t send_s;
3968 
3969 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3970 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3971 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3972 	}
3973 	SCTP_TCB_LOCK_ASSERT(stcb);
3974 #ifdef SCTP_ASOCLOG_OF_TSNS
3975 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3976 	stcb->asoc.cumack_log_at++;
3977 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3978 		stcb->asoc.cumack_log_at = 0;
3979 	}
3980 #endif
3981 	asoc = &stcb->asoc;
3982 	old_rwnd = asoc->peers_rwnd;
3983 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3984 		/* old ack */
3985 		return;
3986 	} else if (asoc->last_acked_seq == cumack) {
3987 		/* Window update sack */
3988 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3989 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3990 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3991 			/* SWS sender side engages */
3992 			asoc->peers_rwnd = 0;
3993 		}
3994 		if (asoc->peers_rwnd > old_rwnd) {
3995 			goto again;
3996 		}
3997 		return;
3998 	}
3999 
4000 	/* First setup for CC stuff */
4001 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4002 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4003 			/* Drag along the window_tsn for cwr's */
4004 			net->cwr_window_tsn = cumack;
4005 		}
4006 		net->prev_cwnd = net->cwnd;
4007 		net->net_ack = 0;
4008 		net->net_ack2 = 0;
4009 
4010 		/*
4011 		 * CMT: Reset CUC and Fast recovery algo variables before
4012 		 * SACK processing
4013 		 */
4014 		net->new_pseudo_cumack = 0;
4015 		net->will_exit_fast_recovery = 0;
4016 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4017 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4018 		}
4019 	}
4020 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4021 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4022 		    sctpchunk_listhead);
4023 		send_s = tp1->rec.data.tsn + 1;
4024 	} else {
4025 		send_s = asoc->sending_seq;
4026 	}
4027 	if (SCTP_TSN_GE(cumack, send_s)) {
4028 		struct mbuf *op_err;
4029 		char msg[SCTP_DIAG_INFO_LEN];
4030 
4031 		*abort_now = 1;
4032 		/* XXX */
4033 		SCTP_SNPRINTF(msg, sizeof(msg),
4034 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4035 		    cumack, send_s);
4036 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4037 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4038 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4039 		return;
4040 	}
4041 	asoc->this_sack_highest_gap = cumack;
4042 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4043 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4044 		    stcb->asoc.overall_error_count,
4045 		    0,
4046 		    SCTP_FROM_SCTP_INDATA,
4047 		    __LINE__);
4048 	}
4049 	stcb->asoc.overall_error_count = 0;
4050 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4051 		/* process the new consecutive TSN first */
4052 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4053 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4054 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4055 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4056 				}
4057 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4058 					/*
4059 					 * If it is less than ACKED, it is
4060 					 * now no-longer in flight. Higher
4061 					 * values may occur during marking
4062 					 */
4063 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4064 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4065 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4066 							    tp1->whoTo->flight_size,
4067 							    tp1->book_size,
4068 							    (uint32_t)(uintptr_t)tp1->whoTo,
4069 							    tp1->rec.data.tsn);
4070 						}
4071 						sctp_flight_size_decrease(tp1);
4072 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4073 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4074 							    tp1);
4075 						}
4076 						/* sa_ignore NO_NULL_CHK */
4077 						sctp_total_flight_decrease(stcb, tp1);
4078 					}
4079 					tp1->whoTo->net_ack += tp1->send_size;
4080 					if (tp1->snd_count < 2) {
4081 						/*
4082 						 * True non-retransmitted
4083 						 * chunk
4084 						 */
4085 						tp1->whoTo->net_ack2 +=
4086 						    tp1->send_size;
4087 
4088 						/* update RTO too? */
4089 						if (tp1->do_rtt) {
4090 							if (rto_ok &&
4091 							    sctp_calculate_rto(stcb,
4092 							    &stcb->asoc,
4093 							    tp1->whoTo,
4094 							    &tp1->sent_rcv_time,
4095 							    SCTP_RTT_FROM_DATA)) {
4096 								rto_ok = 0;
4097 							}
4098 							if (tp1->whoTo->rto_needed == 0) {
4099 								tp1->whoTo->rto_needed = 1;
4100 							}
4101 							tp1->do_rtt = 0;
4102 						}
4103 					}
4104 					/*
4105 					 * CMT: CUCv2 algorithm. From the
4106 					 * cumack'd TSNs, for each TSN being
4107 					 * acked for the first time, set the
4108 					 * following variables for the
4109 					 * corresp destination.
4110 					 * new_pseudo_cumack will trigger a
4111 					 * cwnd update.
4112 					 * find_(rtx_)pseudo_cumack will
4113 					 * trigger search for the next
4114 					 * expected (rtx-)pseudo-cumack.
4115 					 */
4116 					tp1->whoTo->new_pseudo_cumack = 1;
4117 					tp1->whoTo->find_pseudo_cumack = 1;
4118 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4119 
4120 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4121 						/* sa_ignore NO_NULL_CHK */
4122 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4123 					}
4124 				}
4125 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4126 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4127 				}
4128 				if (tp1->rec.data.chunk_was_revoked) {
4129 					/* deflate the cwnd */
4130 					tp1->whoTo->cwnd -= tp1->book_size;
4131 					tp1->rec.data.chunk_was_revoked = 0;
4132 				}
4133 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4134 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4135 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4136 #ifdef INVARIANTS
4137 					} else {
4138 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4139 #endif
4140 					}
4141 				}
4142 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4143 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4144 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4145 					asoc->trigger_reset = 1;
4146 				}
4147 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4148 				if (tp1->data) {
4149 					/* sa_ignore NO_NULL_CHK */
4150 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4151 					sctp_m_freem(tp1->data);
4152 					tp1->data = NULL;
4153 				}
4154 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4155 					sctp_log_sack(asoc->last_acked_seq,
4156 					    cumack,
4157 					    tp1->rec.data.tsn,
4158 					    0,
4159 					    0,
4160 					    SCTP_LOG_FREE_SENT);
4161 				}
4162 				asoc->sent_queue_cnt--;
4163 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4164 			} else {
4165 				break;
4166 			}
4167 		}
4168 
4169 	}
4170 	/* sa_ignore NO_NULL_CHK */
4171 	if (stcb->sctp_socket) {
4172 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4173 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4174 			/* sa_ignore NO_NULL_CHK */
4175 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4176 		}
4177 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4178 	} else {
4179 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4180 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4181 		}
4182 	}
4183 
4184 	/* JRS - Use the congestion control given in the CC module */
4185 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4186 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4187 			if (net->net_ack2 > 0) {
4188 				/*
4189 				 * Karn's rule applies to clearing error
4190 				 * count, this is optional.
4191 				 */
4192 				net->error_count = 0;
4193 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4194 					/* addr came good */
4195 					net->dest_state |= SCTP_ADDR_REACHABLE;
4196 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4197 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4198 				}
4199 				if (net == stcb->asoc.primary_destination) {
4200 					if (stcb->asoc.alternate) {
4201 						/*
4202 						 * release the alternate,
4203 						 * primary is good
4204 						 */
4205 						sctp_free_remote_addr(stcb->asoc.alternate);
4206 						stcb->asoc.alternate = NULL;
4207 					}
4208 				}
4209 				if (net->dest_state & SCTP_ADDR_PF) {
4210 					net->dest_state &= ~SCTP_ADDR_PF;
4211 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4212 					    stcb->sctp_ep, stcb, net,
4213 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4214 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4215 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4216 					/* Done with this net */
4217 					net->net_ack = 0;
4218 				}
4219 				/* restore any doubled timers */
4220 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4221 				if (net->RTO < stcb->asoc.minrto) {
4222 					net->RTO = stcb->asoc.minrto;
4223 				}
4224 				if (net->RTO > stcb->asoc.maxrto) {
4225 					net->RTO = stcb->asoc.maxrto;
4226 				}
4227 			}
4228 		}
4229 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4230 	}
4231 	asoc->last_acked_seq = cumack;
4232 
4233 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4234 		/* nothing left in-flight */
4235 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4236 			net->flight_size = 0;
4237 			net->partial_bytes_acked = 0;
4238 		}
4239 		asoc->total_flight = 0;
4240 		asoc->total_flight_count = 0;
4241 	}
4242 
4243 	/* RWND update */
4244 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4245 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4246 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4247 		/* SWS sender side engages */
4248 		asoc->peers_rwnd = 0;
4249 	}
4250 	if (asoc->peers_rwnd > old_rwnd) {
4251 		win_probe_recovery = 1;
4252 	}
4253 	/* Now assure a timer where data is queued at */
4254 again:
4255 	j = 0;
4256 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4257 		if (win_probe_recovery && (net->window_probe)) {
4258 			win_probe_recovered = 1;
4259 			/*
4260 			 * Find first chunk that was used with window probe
4261 			 * and clear the sent
4262 			 */
4263 			/* sa_ignore FREED_MEMORY */
4264 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4265 				if (tp1->window_probe) {
4266 					/* move back to data send queue */
4267 					sctp_window_probe_recovery(stcb, asoc, tp1);
4268 					break;
4269 				}
4270 			}
4271 		}
4272 		if (net->flight_size) {
4273 			j++;
4274 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4275 			if (net->window_probe) {
4276 				net->window_probe = 0;
4277 			}
4278 		} else {
4279 			if (net->window_probe) {
4280 				/*
4281 				 * In window probes we must assure a timer
4282 				 * is still running there
4283 				 */
4284 				net->window_probe = 0;
4285 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4286 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4287 				}
4288 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4289 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4290 				    stcb, net,
4291 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4292 			}
4293 		}
4294 	}
4295 	if ((j == 0) &&
4296 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4297 	    (asoc->sent_queue_retran_cnt == 0) &&
4298 	    (win_probe_recovered == 0) &&
4299 	    (done_once == 0)) {
4300 		/*
4301 		 * huh, this should not happen unless all packets are
4302 		 * PR-SCTP and marked to skip of course.
4303 		 */
4304 		if (sctp_fs_audit(asoc)) {
4305 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4306 				net->flight_size = 0;
4307 			}
4308 			asoc->total_flight = 0;
4309 			asoc->total_flight_count = 0;
4310 			asoc->sent_queue_retran_cnt = 0;
4311 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4312 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4313 					sctp_flight_size_increase(tp1);
4314 					sctp_total_flight_increase(stcb, tp1);
4315 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4316 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4317 				}
4318 			}
4319 		}
4320 		done_once = 1;
4321 		goto again;
4322 	}
4323 	/**********************************/
4324 	/* Now what about shutdown issues */
4325 	/**********************************/
4326 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4327 		/* nothing left on sendqueue.. consider done */
4328 		/* clean up */
4329 		if ((asoc->stream_queue_cnt == 1) &&
4330 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4331 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4332 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4333 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4334 		}
4335 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4336 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4337 		    (asoc->stream_queue_cnt == 1) &&
4338 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4339 			struct mbuf *op_err;
4340 
4341 			*abort_now = 1;
4342 			/* XXX */
4343 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4344 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4345 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4346 			return;
4347 		}
4348 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4349 		    (asoc->stream_queue_cnt == 0)) {
4350 			struct sctp_nets *netp;
4351 
4352 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4353 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4354 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4355 			}
4356 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4357 			sctp_stop_timers_for_shutdown(stcb);
4358 			if (asoc->alternate) {
4359 				netp = asoc->alternate;
4360 			} else {
4361 				netp = asoc->primary_destination;
4362 			}
4363 			sctp_send_shutdown(stcb, netp);
4364 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4365 			    stcb->sctp_ep, stcb, netp);
4366 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4367 			    stcb->sctp_ep, stcb, NULL);
4368 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4369 		    (asoc->stream_queue_cnt == 0)) {
4370 			struct sctp_nets *netp;
4371 
4372 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4373 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4374 			sctp_stop_timers_for_shutdown(stcb);
4375 			if (asoc->alternate) {
4376 				netp = asoc->alternate;
4377 			} else {
4378 				netp = asoc->primary_destination;
4379 			}
4380 			sctp_send_shutdown_ack(stcb, netp);
4381 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4382 			    stcb->sctp_ep, stcb, netp);
4383 		}
4384 	}
4385 	/*********************************************/
4386 	/* Here we perform PR-SCTP procedures        */
4387 	/* (section 4.2)                             */
4388 	/*********************************************/
4389 	/* C1. update advancedPeerAckPoint */
4390 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4391 		asoc->advanced_peer_ack_point = cumack;
4392 	}
4393 	/* PR-Sctp issues need to be addressed too */
4394 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4395 		struct sctp_tmit_chunk *lchk;
4396 		uint32_t old_adv_peer_ack_point;
4397 
4398 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4399 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4400 		/* C3. See if we need to send a Fwd-TSN */
4401 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4402 			/*
4403 			 * ISSUE with ECN, see FWD-TSN processing.
4404 			 */
4405 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4406 				send_forward_tsn(stcb, asoc);
4407 			} else if (lchk) {
4408 				/* try to FR fwd-tsn's that get lost too */
4409 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4410 					send_forward_tsn(stcb, asoc);
4411 				}
4412 			}
4413 		}
4414 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4415 			if (lchk->whoTo != NULL) {
4416 				break;
4417 			}
4418 		}
4419 		if (lchk != NULL) {
4420 			/* Assure a timer is up */
4421 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4422 			    stcb->sctp_ep, stcb, lchk->whoTo);
4423 		}
4424 	}
4425 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4426 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4427 		    rwnd,
4428 		    stcb->asoc.peers_rwnd,
4429 		    stcb->asoc.total_flight,
4430 		    stcb->asoc.total_output_queue_size);
4431 	}
4432 }
4433 
4434 void
4435 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4436     struct sctp_tcb *stcb,
4437     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4438     int *abort_now, uint8_t flags,
4439     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4440 {
4441 	struct sctp_association *asoc;
4442 	struct sctp_tmit_chunk *tp1, *tp2;
4443 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4444 	uint16_t wake_him = 0;
4445 	uint32_t send_s = 0;
4446 	long j;
4447 	int accum_moved = 0;
4448 	int will_exit_fast_recovery = 0;
4449 	uint32_t a_rwnd, old_rwnd;
4450 	int win_probe_recovery = 0;
4451 	int win_probe_recovered = 0;
4452 	struct sctp_nets *net = NULL;
4453 	int done_once;
4454 	int rto_ok = 1;
4455 	uint8_t reneged_all = 0;
4456 	uint8_t cmt_dac_flag;
4457 
4458 	/*
4459 	 * we take any chance we can to service our queues since we cannot
4460 	 * get awoken when the socket is read from :<
4461 	 */
4462 	/*
4463 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4464 	 * old sack, if so discard. 2) If there is nothing left in the send
4465 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4466 	 * too, update any rwnd change and verify no timers are running.
4467 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4468 	 * moved process these first and note that it moved. 4) Process any
4469 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4470 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4471 	 * sync up flightsizes and things, stop all timers and also check
4472 	 * for shutdown_pending state. If so then go ahead and send off the
4473 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4474 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4475 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4476 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4477 	 * if in shutdown_recv state.
4478 	 */
4479 	SCTP_TCB_LOCK_ASSERT(stcb);
4480 	/* CMT DAC algo */
4481 	this_sack_lowest_newack = 0;
4482 	SCTP_STAT_INCR(sctps_slowpath_sack);
4483 	last_tsn = cum_ack;
4484 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4485 #ifdef SCTP_ASOCLOG_OF_TSNS
4486 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4487 	stcb->asoc.cumack_log_at++;
4488 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4489 		stcb->asoc.cumack_log_at = 0;
4490 	}
4491 #endif
4492 	a_rwnd = rwnd;
4493 
4494 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4495 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4496 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4497 	}
4498 
4499 	old_rwnd = stcb->asoc.peers_rwnd;
4500 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4501 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4502 		    stcb->asoc.overall_error_count,
4503 		    0,
4504 		    SCTP_FROM_SCTP_INDATA,
4505 		    __LINE__);
4506 	}
4507 	stcb->asoc.overall_error_count = 0;
4508 	asoc = &stcb->asoc;
4509 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4510 		sctp_log_sack(asoc->last_acked_seq,
4511 		    cum_ack,
4512 		    0,
4513 		    num_seg,
4514 		    num_dup,
4515 		    SCTP_LOG_NEW_SACK);
4516 	}
4517 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4518 		uint16_t i;
4519 		uint32_t *dupdata, dblock;
4520 
4521 		for (i = 0; i < num_dup; i++) {
4522 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4523 			    sizeof(uint32_t), (uint8_t *)&dblock);
4524 			if (dupdata == NULL) {
4525 				break;
4526 			}
4527 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4528 		}
4529 	}
4530 	/* reality check */
4531 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4532 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4533 		    sctpchunk_listhead);
4534 		send_s = tp1->rec.data.tsn + 1;
4535 	} else {
4536 		tp1 = NULL;
4537 		send_s = asoc->sending_seq;
4538 	}
4539 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4540 		struct mbuf *op_err;
4541 		char msg[SCTP_DIAG_INFO_LEN];
4542 
4543 		/*
4544 		 * no way, we have not even sent this TSN out yet. Peer is
4545 		 * hopelessly messed up with us.
4546 		 */
4547 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4548 		    cum_ack, send_s);
4549 		if (tp1) {
4550 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4551 			    tp1->rec.data.tsn, (void *)tp1);
4552 		}
4553 hopeless_peer:
4554 		*abort_now = 1;
4555 		/* XXX */
4556 		SCTP_SNPRINTF(msg, sizeof(msg),
4557 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4558 		    cum_ack, send_s);
4559 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4560 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4561 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4562 		return;
4563 	}
4564 	/**********************/
4565 	/* 1) check the range */
4566 	/**********************/
4567 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4568 		/* acking something behind */
4569 		return;
4570 	}
4571 
4572 	/* update the Rwnd of the peer */
4573 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4574 	    TAILQ_EMPTY(&asoc->send_queue) &&
4575 	    (asoc->stream_queue_cnt == 0)) {
4576 		/* nothing left on send/sent and strmq */
4577 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4578 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4579 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4580 		}
4581 		asoc->peers_rwnd = a_rwnd;
4582 		if (asoc->sent_queue_retran_cnt) {
4583 			asoc->sent_queue_retran_cnt = 0;
4584 		}
4585 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4586 			/* SWS sender side engages */
4587 			asoc->peers_rwnd = 0;
4588 		}
4589 		/* stop any timers */
4590 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4591 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4592 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4593 			net->partial_bytes_acked = 0;
4594 			net->flight_size = 0;
4595 		}
4596 		asoc->total_flight = 0;
4597 		asoc->total_flight_count = 0;
4598 		return;
4599 	}
4600 	/*
4601 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4602 	 * things. The total byte count acked is tracked in netAckSz AND
4603 	 * netAck2 is used to track the total bytes acked that are un-
4604 	 * amibguious and were never retransmitted. We track these on a per
4605 	 * destination address basis.
4606 	 */
4607 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4608 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4609 			/* Drag along the window_tsn for cwr's */
4610 			net->cwr_window_tsn = cum_ack;
4611 		}
4612 		net->prev_cwnd = net->cwnd;
4613 		net->net_ack = 0;
4614 		net->net_ack2 = 0;
4615 
4616 		/*
4617 		 * CMT: Reset CUC and Fast recovery algo variables before
4618 		 * SACK processing
4619 		 */
4620 		net->new_pseudo_cumack = 0;
4621 		net->will_exit_fast_recovery = 0;
4622 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4623 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4624 		}
4625 
4626 		/*
4627 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4628 		 * to be greater than the cumack. Also reset saw_newack to 0
4629 		 * for all dests.
4630 		 */
4631 		net->saw_newack = 0;
4632 		net->this_sack_highest_newack = last_tsn;
4633 	}
4634 	/* process the new consecutive TSN first */
4635 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4636 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4637 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4638 				accum_moved = 1;
4639 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4640 					/*
4641 					 * If it is less than ACKED, it is
4642 					 * now no-longer in flight. Higher
4643 					 * values may occur during marking
4644 					 */
4645 					if ((tp1->whoTo->dest_state &
4646 					    SCTP_ADDR_UNCONFIRMED) &&
4647 					    (tp1->snd_count < 2)) {
4648 						/*
4649 						 * If there was no retran
4650 						 * and the address is
4651 						 * un-confirmed and we sent
4652 						 * there and are now
4653 						 * sacked.. its confirmed,
4654 						 * mark it so.
4655 						 */
4656 						tp1->whoTo->dest_state &=
4657 						    ~SCTP_ADDR_UNCONFIRMED;
4658 					}
4659 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4660 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4661 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4662 							    tp1->whoTo->flight_size,
4663 							    tp1->book_size,
4664 							    (uint32_t)(uintptr_t)tp1->whoTo,
4665 							    tp1->rec.data.tsn);
4666 						}
4667 						sctp_flight_size_decrease(tp1);
4668 						sctp_total_flight_decrease(stcb, tp1);
4669 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4670 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4671 							    tp1);
4672 						}
4673 					}
4674 					tp1->whoTo->net_ack += tp1->send_size;
4675 
4676 					/* CMT SFR and DAC algos */
4677 					this_sack_lowest_newack = tp1->rec.data.tsn;
4678 					tp1->whoTo->saw_newack = 1;
4679 
4680 					if (tp1->snd_count < 2) {
4681 						/*
4682 						 * True non-retransmitted
4683 						 * chunk
4684 						 */
4685 						tp1->whoTo->net_ack2 +=
4686 						    tp1->send_size;
4687 
4688 						/* update RTO too? */
4689 						if (tp1->do_rtt) {
4690 							if (rto_ok &&
4691 							    sctp_calculate_rto(stcb,
4692 							    &stcb->asoc,
4693 							    tp1->whoTo,
4694 							    &tp1->sent_rcv_time,
4695 							    SCTP_RTT_FROM_DATA)) {
4696 								rto_ok = 0;
4697 							}
4698 							if (tp1->whoTo->rto_needed == 0) {
4699 								tp1->whoTo->rto_needed = 1;
4700 							}
4701 							tp1->do_rtt = 0;
4702 						}
4703 					}
4704 					/*
4705 					 * CMT: CUCv2 algorithm. From the
4706 					 * cumack'd TSNs, for each TSN being
4707 					 * acked for the first time, set the
4708 					 * following variables for the
4709 					 * corresp destination.
4710 					 * new_pseudo_cumack will trigger a
4711 					 * cwnd update.
4712 					 * find_(rtx_)pseudo_cumack will
4713 					 * trigger search for the next
4714 					 * expected (rtx-)pseudo-cumack.
4715 					 */
4716 					tp1->whoTo->new_pseudo_cumack = 1;
4717 					tp1->whoTo->find_pseudo_cumack = 1;
4718 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4719 
4720 
4721 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4722 						sctp_log_sack(asoc->last_acked_seq,
4723 						    cum_ack,
4724 						    tp1->rec.data.tsn,
4725 						    0,
4726 						    0,
4727 						    SCTP_LOG_TSN_ACKED);
4728 					}
4729 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4730 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4731 					}
4732 				}
4733 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4734 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4735 #ifdef SCTP_AUDITING_ENABLED
4736 					sctp_audit_log(0xB3,
4737 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4738 #endif
4739 				}
4740 				if (tp1->rec.data.chunk_was_revoked) {
4741 					/* deflate the cwnd */
4742 					tp1->whoTo->cwnd -= tp1->book_size;
4743 					tp1->rec.data.chunk_was_revoked = 0;
4744 				}
4745 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4746 					tp1->sent = SCTP_DATAGRAM_ACKED;
4747 				}
4748 			}
4749 		} else {
4750 			break;
4751 		}
4752 	}
4753 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4754 	/* always set this up to cum-ack */
4755 	asoc->this_sack_highest_gap = last_tsn;
4756 
4757 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4758 
4759 		/*
4760 		 * thisSackHighestGap will increase while handling NEW
4761 		 * segments this_sack_highest_newack will increase while
4762 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4763 		 * used for CMT DAC algo. saw_newack will also change.
4764 		 */
4765 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4766 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4767 		    num_seg, num_nr_seg, &rto_ok)) {
4768 			wake_him++;
4769 		}
4770 		/*
4771 		 * validate the biggest_tsn_acked in the gap acks if strict
4772 		 * adherence is wanted.
4773 		 */
4774 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4775 			/*
4776 			 * peer is either confused or we are under attack.
4777 			 * We must abort.
4778 			 */
4779 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4780 			    biggest_tsn_acked, send_s);
4781 			goto hopeless_peer;
4782 		}
4783 	}
4784 	/*******************************************/
4785 	/* cancel ALL T3-send timer if accum moved */
4786 	/*******************************************/
4787 	if (asoc->sctp_cmt_on_off > 0) {
4788 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4789 			if (net->new_pseudo_cumack)
4790 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4791 				    stcb, net,
4792 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4793 
4794 		}
4795 	} else {
4796 		if (accum_moved) {
4797 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4799 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4800 			}
4801 		}
4802 	}
4803 	/********************************************/
4804 	/* drop the acked chunks from the sentqueue */
4805 	/********************************************/
4806 	asoc->last_acked_seq = cum_ack;
4807 
4808 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4809 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4810 			break;
4811 		}
4812 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4813 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4814 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4815 #ifdef INVARIANTS
4816 			} else {
4817 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4818 #endif
4819 			}
4820 		}
4821 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4822 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4823 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4824 			asoc->trigger_reset = 1;
4825 		}
4826 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4827 		if (PR_SCTP_ENABLED(tp1->flags)) {
4828 			if (asoc->pr_sctp_cnt != 0)
4829 				asoc->pr_sctp_cnt--;
4830 		}
4831 		asoc->sent_queue_cnt--;
4832 		if (tp1->data) {
4833 			/* sa_ignore NO_NULL_CHK */
4834 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4835 			sctp_m_freem(tp1->data);
4836 			tp1->data = NULL;
4837 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4838 				asoc->sent_queue_cnt_removeable--;
4839 			}
4840 		}
4841 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4842 			sctp_log_sack(asoc->last_acked_seq,
4843 			    cum_ack,
4844 			    tp1->rec.data.tsn,
4845 			    0,
4846 			    0,
4847 			    SCTP_LOG_FREE_SENT);
4848 		}
4849 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4850 		wake_him++;
4851 	}
4852 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4853 #ifdef INVARIANTS
4854 		panic("Warning flight size is positive and should be 0");
4855 #else
4856 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4857 		    asoc->total_flight);
4858 #endif
4859 		asoc->total_flight = 0;
4860 	}
4861 
4862 	/* sa_ignore NO_NULL_CHK */
4863 	if ((wake_him) && (stcb->sctp_socket)) {
4864 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4865 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4866 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4867 		}
4868 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4869 	} else {
4870 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4871 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4872 		}
4873 	}
4874 
4875 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4876 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4877 			/* Setup so we will exit RFC2582 fast recovery */
4878 			will_exit_fast_recovery = 1;
4879 		}
4880 	}
4881 	/*
4882 	 * Check for revoked fragments:
4883 	 *
4884 	 * if Previous sack - Had no frags then we can't have any revoked if
4885 	 * Previous sack - Had frag's then - If we now have frags aka
4886 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4887 	 * some of them. else - The peer revoked all ACKED fragments, since
4888 	 * we had some before and now we have NONE.
4889 	 */
4890 
4891 	if (num_seg) {
4892 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4893 		asoc->saw_sack_with_frags = 1;
4894 	} else if (asoc->saw_sack_with_frags) {
4895 		int cnt_revoked = 0;
4896 
4897 		/* Peer revoked all dg's marked or acked */
4898 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4899 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4900 				tp1->sent = SCTP_DATAGRAM_SENT;
4901 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4902 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4903 					    tp1->whoTo->flight_size,
4904 					    tp1->book_size,
4905 					    (uint32_t)(uintptr_t)tp1->whoTo,
4906 					    tp1->rec.data.tsn);
4907 				}
4908 				sctp_flight_size_increase(tp1);
4909 				sctp_total_flight_increase(stcb, tp1);
4910 				tp1->rec.data.chunk_was_revoked = 1;
4911 				/*
4912 				 * To ensure that this increase in
4913 				 * flightsize, which is artificial, does not
4914 				 * throttle the sender, we also increase the
4915 				 * cwnd artificially.
4916 				 */
4917 				tp1->whoTo->cwnd += tp1->book_size;
4918 				cnt_revoked++;
4919 			}
4920 		}
4921 		if (cnt_revoked) {
4922 			reneged_all = 1;
4923 		}
4924 		asoc->saw_sack_with_frags = 0;
4925 	}
4926 	if (num_nr_seg > 0)
4927 		asoc->saw_sack_with_nr_frags = 1;
4928 	else
4929 		asoc->saw_sack_with_nr_frags = 0;
4930 
4931 	/* JRS - Use the congestion control given in the CC module */
4932 	if (ecne_seen == 0) {
4933 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4934 			if (net->net_ack2 > 0) {
4935 				/*
4936 				 * Karn's rule applies to clearing error
4937 				 * count, this is optional.
4938 				 */
4939 				net->error_count = 0;
4940 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4941 					/* addr came good */
4942 					net->dest_state |= SCTP_ADDR_REACHABLE;
4943 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4944 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4945 				}
4946 
4947 				if (net == stcb->asoc.primary_destination) {
4948 					if (stcb->asoc.alternate) {
4949 						/*
4950 						 * release the alternate,
4951 						 * primary is good
4952 						 */
4953 						sctp_free_remote_addr(stcb->asoc.alternate);
4954 						stcb->asoc.alternate = NULL;
4955 					}
4956 				}
4957 
4958 				if (net->dest_state & SCTP_ADDR_PF) {
4959 					net->dest_state &= ~SCTP_ADDR_PF;
4960 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4961 					    stcb->sctp_ep, stcb, net,
4962 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4963 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4964 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4965 					/* Done with this net */
4966 					net->net_ack = 0;
4967 				}
4968 				/* restore any doubled timers */
4969 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4970 				if (net->RTO < stcb->asoc.minrto) {
4971 					net->RTO = stcb->asoc.minrto;
4972 				}
4973 				if (net->RTO > stcb->asoc.maxrto) {
4974 					net->RTO = stcb->asoc.maxrto;
4975 				}
4976 			}
4977 		}
4978 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4979 	}
4980 
4981 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4982 		/* nothing left in-flight */
4983 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 			/* stop all timers */
4985 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 			    stcb, net,
4987 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4988 			net->flight_size = 0;
4989 			net->partial_bytes_acked = 0;
4990 		}
4991 		asoc->total_flight = 0;
4992 		asoc->total_flight_count = 0;
4993 	}
4994 
4995 	/**********************************/
4996 	/* Now what about shutdown issues */
4997 	/**********************************/
4998 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4999 		/* nothing left on sendqueue.. consider done */
5000 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5001 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5002 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5003 		}
5004 		asoc->peers_rwnd = a_rwnd;
5005 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5006 			/* SWS sender side engages */
5007 			asoc->peers_rwnd = 0;
5008 		}
5009 		/* clean up */
5010 		if ((asoc->stream_queue_cnt == 1) &&
5011 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5012 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5013 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5014 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5015 		}
5016 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5017 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5018 		    (asoc->stream_queue_cnt == 1) &&
5019 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5020 			struct mbuf *op_err;
5021 
5022 			*abort_now = 1;
5023 			/* XXX */
5024 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5025 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5026 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5027 			return;
5028 		}
5029 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5030 		    (asoc->stream_queue_cnt == 0)) {
5031 			struct sctp_nets *netp;
5032 
5033 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5034 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5035 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5036 			}
5037 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5038 			sctp_stop_timers_for_shutdown(stcb);
5039 			if (asoc->alternate) {
5040 				netp = asoc->alternate;
5041 			} else {
5042 				netp = asoc->primary_destination;
5043 			}
5044 			sctp_send_shutdown(stcb, netp);
5045 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5046 			    stcb->sctp_ep, stcb, netp);
5047 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5048 			    stcb->sctp_ep, stcb, NULL);
5049 			return;
5050 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5051 		    (asoc->stream_queue_cnt == 0)) {
5052 			struct sctp_nets *netp;
5053 
5054 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5055 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5056 			sctp_stop_timers_for_shutdown(stcb);
5057 			if (asoc->alternate) {
5058 				netp = asoc->alternate;
5059 			} else {
5060 				netp = asoc->primary_destination;
5061 			}
5062 			sctp_send_shutdown_ack(stcb, netp);
5063 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5064 			    stcb->sctp_ep, stcb, netp);
5065 			return;
5066 		}
5067 	}
5068 	/*
5069 	 * Now here we are going to recycle net_ack for a different use...
5070 	 * HEADS UP.
5071 	 */
5072 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5073 		net->net_ack = 0;
5074 	}
5075 
5076 	/*
5077 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5078 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5079 	 * automatically ensure that.
5080 	 */
5081 	if ((asoc->sctp_cmt_on_off > 0) &&
5082 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5083 	    (cmt_dac_flag == 0)) {
5084 		this_sack_lowest_newack = cum_ack;
5085 	}
5086 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5087 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5088 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5089 	}
5090 	/* JRS - Use the congestion control given in the CC module */
5091 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5092 
5093 	/* Now are we exiting loss recovery ? */
5094 	if (will_exit_fast_recovery) {
5095 		/* Ok, we must exit fast recovery */
5096 		asoc->fast_retran_loss_recovery = 0;
5097 	}
5098 	if ((asoc->sat_t3_loss_recovery) &&
5099 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5100 		/* end satellite t3 loss recovery */
5101 		asoc->sat_t3_loss_recovery = 0;
5102 	}
5103 	/*
5104 	 * CMT Fast recovery
5105 	 */
5106 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5107 		if (net->will_exit_fast_recovery) {
5108 			/* Ok, we must exit fast recovery */
5109 			net->fast_retran_loss_recovery = 0;
5110 		}
5111 	}
5112 
5113 	/* Adjust and set the new rwnd value */
5114 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5115 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5116 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5117 	}
5118 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5119 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5120 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5121 		/* SWS sender side engages */
5122 		asoc->peers_rwnd = 0;
5123 	}
5124 	if (asoc->peers_rwnd > old_rwnd) {
5125 		win_probe_recovery = 1;
5126 	}
5127 
5128 	/*
5129 	 * Now we must setup so we have a timer up for anyone with
5130 	 * outstanding data.
5131 	 */
5132 	done_once = 0;
5133 again:
5134 	j = 0;
5135 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5136 		if (win_probe_recovery && (net->window_probe)) {
5137 			win_probe_recovered = 1;
5138 			/*-
5139 			 * Find first chunk that was used with
5140 			 * window probe and clear the event. Put
5141 			 * it back into the send queue as if has
5142 			 * not been sent.
5143 			 */
5144 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5145 				if (tp1->window_probe) {
5146 					sctp_window_probe_recovery(stcb, asoc, tp1);
5147 					break;
5148 				}
5149 			}
5150 		}
5151 		if (net->flight_size) {
5152 			j++;
5153 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5154 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5155 				    stcb->sctp_ep, stcb, net);
5156 			}
5157 			if (net->window_probe) {
5158 				net->window_probe = 0;
5159 			}
5160 		} else {
5161 			if (net->window_probe) {
5162 				/*
5163 				 * In window probes we must assure a timer
5164 				 * is still running there
5165 				 */
5166 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5167 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5168 					    stcb->sctp_ep, stcb, net);
5169 
5170 				}
5171 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5172 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5173 				    stcb, net,
5174 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5175 			}
5176 		}
5177 	}
5178 	if ((j == 0) &&
5179 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5180 	    (asoc->sent_queue_retran_cnt == 0) &&
5181 	    (win_probe_recovered == 0) &&
5182 	    (done_once == 0)) {
5183 		/*
5184 		 * huh, this should not happen unless all packets are
5185 		 * PR-SCTP and marked to skip of course.
5186 		 */
5187 		if (sctp_fs_audit(asoc)) {
5188 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5189 				net->flight_size = 0;
5190 			}
5191 			asoc->total_flight = 0;
5192 			asoc->total_flight_count = 0;
5193 			asoc->sent_queue_retran_cnt = 0;
5194 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5195 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5196 					sctp_flight_size_increase(tp1);
5197 					sctp_total_flight_increase(stcb, tp1);
5198 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5199 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5200 				}
5201 			}
5202 		}
5203 		done_once = 1;
5204 		goto again;
5205 	}
5206 	/*********************************************/
5207 	/* Here we perform PR-SCTP procedures        */
5208 	/* (section 4.2)                             */
5209 	/*********************************************/
5210 	/* C1. update advancedPeerAckPoint */
5211 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5212 		asoc->advanced_peer_ack_point = cum_ack;
5213 	}
5214 	/* C2. try to further move advancedPeerAckPoint ahead */
5215 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5216 		struct sctp_tmit_chunk *lchk;
5217 		uint32_t old_adv_peer_ack_point;
5218 
5219 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5220 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5221 		/* C3. See if we need to send a Fwd-TSN */
5222 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5223 			/*
5224 			 * ISSUE with ECN, see FWD-TSN processing.
5225 			 */
5226 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5227 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5228 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5229 				    old_adv_peer_ack_point);
5230 			}
5231 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5232 				send_forward_tsn(stcb, asoc);
5233 			} else if (lchk) {
5234 				/* try to FR fwd-tsn's that get lost too */
5235 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5236 					send_forward_tsn(stcb, asoc);
5237 				}
5238 			}
5239 		}
5240 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5241 			if (lchk->whoTo != NULL) {
5242 				break;
5243 			}
5244 		}
5245 		if (lchk != NULL) {
5246 			/* Assure a timer is up */
5247 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5248 			    stcb->sctp_ep, stcb, lchk->whoTo);
5249 		}
5250 	}
5251 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5252 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5253 		    a_rwnd,
5254 		    stcb->asoc.peers_rwnd,
5255 		    stcb->asoc.total_flight,
5256 		    stcb->asoc.total_output_queue_size);
5257 	}
5258 }
5259 
5260 void
5261 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5262 {
5263 	/* Copy cum-ack */
5264 	uint32_t cum_ack, a_rwnd;
5265 
5266 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5267 	/* Arrange so a_rwnd does NOT change */
5268 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5269 
5270 	/* Now call the express sack handling */
5271 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5272 }
5273 
5274 static void
5275 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5276     struct sctp_stream_in *strmin)
5277 {
5278 	struct sctp_queued_to_read *control, *ncontrol;
5279 	struct sctp_association *asoc;
5280 	uint32_t mid;
5281 	int need_reasm_check = 0;
5282 
5283 	asoc = &stcb->asoc;
5284 	mid = strmin->last_mid_delivered;
5285 	/*
5286 	 * First deliver anything prior to and including the stream no that
5287 	 * came in.
5288 	 */
5289 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5290 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5291 			/* this is deliverable now */
5292 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5293 				if (control->on_strm_q) {
5294 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5295 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5296 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5297 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5298 #ifdef INVARIANTS
5299 					} else {
5300 						panic("strmin: %p ctl: %p unknown %d",
5301 						    strmin, control, control->on_strm_q);
5302 #endif
5303 					}
5304 					control->on_strm_q = 0;
5305 				}
5306 				/* subtract pending on streams */
5307 				if (asoc->size_on_all_streams >= control->length) {
5308 					asoc->size_on_all_streams -= control->length;
5309 				} else {
5310 #ifdef INVARIANTS
5311 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5312 #else
5313 					asoc->size_on_all_streams = 0;
5314 #endif
5315 				}
5316 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5317 				/* deliver it to at least the delivery-q */
5318 				if (stcb->sctp_socket) {
5319 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5320 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5321 					    control,
5322 					    &stcb->sctp_socket->so_rcv,
5323 					    1, SCTP_READ_LOCK_HELD,
5324 					    SCTP_SO_NOT_LOCKED);
5325 				}
5326 			} else {
5327 				/* Its a fragmented message */
5328 				if (control->first_frag_seen) {
5329 					/*
5330 					 * Make it so this is next to
5331 					 * deliver, we restore later
5332 					 */
5333 					strmin->last_mid_delivered = control->mid - 1;
5334 					need_reasm_check = 1;
5335 					break;
5336 				}
5337 			}
5338 		} else {
5339 			/* no more delivery now. */
5340 			break;
5341 		}
5342 	}
5343 	if (need_reasm_check) {
5344 		int ret;
5345 
5346 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5347 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5348 			/* Restore the next to deliver unless we are ahead */
5349 			strmin->last_mid_delivered = mid;
5350 		}
5351 		if (ret == 0) {
5352 			/* Left the front Partial one on */
5353 			return;
5354 		}
5355 		need_reasm_check = 0;
5356 	}
5357 	/*
5358 	 * now we must deliver things in queue the normal way  if any are
5359 	 * now ready.
5360 	 */
5361 	mid = strmin->last_mid_delivered + 1;
5362 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5363 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5364 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5365 				/* this is deliverable now */
5366 				if (control->on_strm_q) {
5367 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5368 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5369 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5370 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5371 #ifdef INVARIANTS
5372 					} else {
5373 						panic("strmin: %p ctl: %p unknown %d",
5374 						    strmin, control, control->on_strm_q);
5375 #endif
5376 					}
5377 					control->on_strm_q = 0;
5378 				}
5379 				/* subtract pending on streams */
5380 				if (asoc->size_on_all_streams >= control->length) {
5381 					asoc->size_on_all_streams -= control->length;
5382 				} else {
5383 #ifdef INVARIANTS
5384 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5385 #else
5386 					asoc->size_on_all_streams = 0;
5387 #endif
5388 				}
5389 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5390 				/* deliver it to at least the delivery-q */
5391 				strmin->last_mid_delivered = control->mid;
5392 				if (stcb->sctp_socket) {
5393 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5394 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5395 					    control,
5396 					    &stcb->sctp_socket->so_rcv, 1,
5397 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5398 
5399 				}
5400 				mid = strmin->last_mid_delivered + 1;
5401 			} else {
5402 				/* Its a fragmented message */
5403 				if (control->first_frag_seen) {
5404 					/*
5405 					 * Make it so this is next to
5406 					 * deliver
5407 					 */
5408 					strmin->last_mid_delivered = control->mid - 1;
5409 					need_reasm_check = 1;
5410 					break;
5411 				}
5412 			}
5413 		} else {
5414 			break;
5415 		}
5416 	}
5417 	if (need_reasm_check) {
5418 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5419 	}
5420 }
5421 
5422 
5423 
5424 static void
5425 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5426     struct sctp_association *asoc, struct sctp_stream_in *strm,
5427     struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5428 {
5429 	struct sctp_tmit_chunk *chk, *nchk;
5430 	int cnt_removed = 0;
5431 
5432 	/*
5433 	 * For now large messages held on the stream reasm that are complete
5434 	 * will be tossed too. We could in theory do more work to spin
5435 	 * through and stop after dumping one msg aka seeing the start of a
5436 	 * new msg at the head, and call the delivery function... to see if
5437 	 * it can be delivered... But for now we just dump everything on the
5438 	 * queue.
5439 	 */
5440 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5441 		return;
5442 	}
5443 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5444 		/* Purge hanging chunks */
5445 		if (!asoc->idata_supported && (ordered == 0)) {
5446 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5447 				break;
5448 			}
5449 		}
5450 		cnt_removed++;
5451 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5452 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5453 			asoc->size_on_reasm_queue -= chk->send_size;
5454 		} else {
5455 #ifdef INVARIANTS
5456 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5457 #else
5458 			asoc->size_on_reasm_queue = 0;
5459 #endif
5460 		}
5461 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5462 		if (chk->data) {
5463 			sctp_m_freem(chk->data);
5464 			chk->data = NULL;
5465 		}
5466 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5467 	}
5468 	if (!TAILQ_EMPTY(&control->reasm)) {
5469 		/* This has to be old data, unordered */
5470 		if (control->data) {
5471 			sctp_m_freem(control->data);
5472 			control->data = NULL;
5473 		}
5474 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5475 		chk = TAILQ_FIRST(&control->reasm);
5476 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5477 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5478 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5479 			    chk, SCTP_READ_LOCK_HELD);
5480 		}
5481 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5482 		return;
5483 	}
5484 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5485 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5486 		if (asoc->size_on_all_streams >= control->length) {
5487 			asoc->size_on_all_streams -= control->length;
5488 		} else {
5489 #ifdef INVARIANTS
5490 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5491 #else
5492 			asoc->size_on_all_streams = 0;
5493 #endif
5494 		}
5495 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5496 		control->on_strm_q = 0;
5497 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5498 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5499 		control->on_strm_q = 0;
5500 #ifdef INVARIANTS
5501 	} else if (control->on_strm_q) {
5502 		panic("strm: %p ctl: %p unknown %d",
5503 		    strm, control, control->on_strm_q);
5504 #endif
5505 	}
5506 	control->on_strm_q = 0;
5507 	if (control->on_read_q == 0) {
5508 		sctp_free_remote_addr(control->whoFrom);
5509 		if (control->data) {
5510 			sctp_m_freem(control->data);
5511 			control->data = NULL;
5512 		}
5513 		sctp_free_a_readq(stcb, control);
5514 	}
5515 }
5516 
5517 void
5518 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5519     struct sctp_forward_tsn_chunk *fwd,
5520     int *abort_flag, struct mbuf *m, int offset)
5521 {
5522 	/* The pr-sctp fwd tsn */
5523 	/*
5524 	 * here we will perform all the data receiver side steps for
5525 	 * processing FwdTSN, as required in by pr-sctp draft:
5526 	 *
5527 	 * Assume we get FwdTSN(x):
5528 	 *
5529 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5530 	 * + others we have 3) examine and update re-ordering queue on
5531 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5532 	 * report where we are.
5533 	 */
5534 	struct sctp_association *asoc;
5535 	uint32_t new_cum_tsn, gap;
5536 	unsigned int i, fwd_sz, m_size;
5537 	uint32_t str_seq;
5538 	struct sctp_stream_in *strm;
5539 	struct sctp_queued_to_read *control, *sv;
5540 
5541 	asoc = &stcb->asoc;
5542 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5543 		SCTPDBG(SCTP_DEBUG_INDATA1,
5544 		    "Bad size too small/big fwd-tsn\n");
5545 		return;
5546 	}
5547 	m_size = (stcb->asoc.mapping_array_size << 3);
5548 	/*************************************************************/
5549 	/* 1. Here we update local cumTSN and shift the bitmap array */
5550 	/*************************************************************/
5551 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5552 
5553 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5554 		/* Already got there ... */
5555 		return;
5556 	}
5557 	/*
5558 	 * now we know the new TSN is more advanced, let's find the actual
5559 	 * gap
5560 	 */
5561 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5562 	asoc->cumulative_tsn = new_cum_tsn;
5563 	if (gap >= m_size) {
5564 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5565 			struct mbuf *op_err;
5566 			char msg[SCTP_DIAG_INFO_LEN];
5567 
5568 			/*
5569 			 * out of range (of single byte chunks in the rwnd I
5570 			 * give out). This must be an attacker.
5571 			 */
5572 			*abort_flag = 1;
5573 			SCTP_SNPRINTF(msg, sizeof(msg),
5574 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5575 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5576 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5577 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5578 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5579 			return;
5580 		}
5581 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5582 
5583 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5584 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5585 		asoc->highest_tsn_inside_map = new_cum_tsn;
5586 
5587 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5588 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5589 
5590 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5591 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5592 		}
5593 	} else {
5594 		SCTP_TCB_LOCK_ASSERT(stcb);
5595 		for (i = 0; i <= gap; i++) {
5596 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5597 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5598 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5599 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5600 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5601 				}
5602 			}
5603 		}
5604 	}
5605 	/*************************************************************/
5606 	/* 2. Clear up re-assembly queue                             */
5607 	/*************************************************************/
5608 
5609 	/* This is now done as part of clearing up the stream/seq */
5610 	if (asoc->idata_supported == 0) {
5611 		uint16_t sid;
5612 
5613 		/* Flush all the un-ordered data based on cum-tsn */
5614 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5615 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5616 			strm = &asoc->strmin[sid];
5617 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5618 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5619 			}
5620 		}
5621 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5622 	}
5623 	/*******************************************************/
5624 	/* 3. Update the PR-stream re-ordering queues and fix  */
5625 	/* delivery issues as needed.                       */
5626 	/*******************************************************/
5627 	fwd_sz -= sizeof(*fwd);
5628 	if (m && fwd_sz) {
5629 		/* New method. */
5630 		unsigned int num_str;
5631 		uint32_t mid;
5632 		uint16_t sid;
5633 		uint16_t ordered, flags;
5634 		struct sctp_strseq *stseq, strseqbuf;
5635 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5636 
5637 		offset += sizeof(*fwd);
5638 
5639 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5640 		if (asoc->idata_supported) {
5641 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5642 		} else {
5643 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5644 		}
5645 		for (i = 0; i < num_str; i++) {
5646 			if (asoc->idata_supported) {
5647 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5648 				    sizeof(struct sctp_strseq_mid),
5649 				    (uint8_t *)&strseqbuf_m);
5650 				offset += sizeof(struct sctp_strseq_mid);
5651 				if (stseq_m == NULL) {
5652 					break;
5653 				}
5654 				sid = ntohs(stseq_m->sid);
5655 				mid = ntohl(stseq_m->mid);
5656 				flags = ntohs(stseq_m->flags);
5657 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5658 					ordered = 0;
5659 				} else {
5660 					ordered = 1;
5661 				}
5662 			} else {
5663 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5664 				    sizeof(struct sctp_strseq),
5665 				    (uint8_t *)&strseqbuf);
5666 				offset += sizeof(struct sctp_strseq);
5667 				if (stseq == NULL) {
5668 					break;
5669 				}
5670 				sid = ntohs(stseq->sid);
5671 				mid = (uint32_t)ntohs(stseq->ssn);
5672 				ordered = 1;
5673 			}
5674 			/* Convert */
5675 
5676 			/* now process */
5677 
5678 			/*
5679 			 * Ok we now look for the stream/seq on the read
5680 			 * queue where its not all delivered. If we find it
5681 			 * we transmute the read entry into a PDI_ABORTED.
5682 			 */
5683 			if (sid >= asoc->streamincnt) {
5684 				/* screwed up streams, stop!  */
5685 				break;
5686 			}
5687 			if ((asoc->str_of_pdapi == sid) &&
5688 			    (asoc->ssn_of_pdapi == mid)) {
5689 				/*
5690 				 * If this is the one we were partially
5691 				 * delivering now then we no longer are.
5692 				 * Note this will change with the reassembly
5693 				 * re-write.
5694 				 */
5695 				asoc->fragmented_delivery_inprogress = 0;
5696 			}
5697 			strm = &asoc->strmin[sid];
5698 			if (ordered) {
5699 				TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
5700 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5701 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5702 					}
5703 				}
5704 			} else {
5705 				if (asoc->idata_supported) {
5706 					TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
5707 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5708 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5709 						}
5710 					}
5711 				} else {
5712 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5713 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5714 					}
5715 				}
5716 			}
5717 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5718 				if ((control->sinfo_stream == sid) &&
5719 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5720 					str_seq = (sid << 16) | (0x0000ffff & mid);
5721 					control->pdapi_aborted = 1;
5722 					sv = stcb->asoc.control_pdapi;
5723 					control->end_added = 1;
5724 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5725 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5726 						if (asoc->size_on_all_streams >= control->length) {
5727 							asoc->size_on_all_streams -= control->length;
5728 						} else {
5729 #ifdef INVARIANTS
5730 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5731 #else
5732 							asoc->size_on_all_streams = 0;
5733 #endif
5734 						}
5735 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5736 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5737 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5738 #ifdef INVARIANTS
5739 					} else if (control->on_strm_q) {
5740 						panic("strm: %p ctl: %p unknown %d",
5741 						    strm, control, control->on_strm_q);
5742 #endif
5743 					}
5744 					control->on_strm_q = 0;
5745 					stcb->asoc.control_pdapi = control;
5746 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5747 					    stcb,
5748 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5749 					    (void *)&str_seq,
5750 					    SCTP_SO_NOT_LOCKED);
5751 					stcb->asoc.control_pdapi = sv;
5752 					break;
5753 				} else if ((control->sinfo_stream == sid) &&
5754 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5755 					/* We are past our victim SSN */
5756 					break;
5757 				}
5758 			}
5759 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5760 				/* Update the sequence number */
5761 				strm->last_mid_delivered = mid;
5762 			}
5763 			/* now kick the stream the new way */
5764 			/* sa_ignore NO_NULL_CHK */
5765 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5766 		}
5767 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5768 	}
5769 	/*
5770 	 * Now slide thing forward.
5771 	 */
5772 	sctp_slide_mapping_arrays(stcb);
5773 }
5774