xref: /freebsd/sys/netinet/sctp_indata.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static uint32_t
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		KASSERT(asoc->cnt_on_reasm_queue == 0, ("cnt_on_reasm_queue is %u", asoc->cnt_on_reasm_queue));
96 		KASSERT(asoc->cnt_on_all_streams == 0, ("cnt_on_all_streams is %u", asoc->cnt_on_all_streams));
97 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 		return (calc);
99 	}
100 	/* get actual space */
101 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
102 	/*
103 	 * take out what has NOT been put on socket queue and we yet hold
104 	 * for putting up.
105 	 */
106 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
107 	    asoc->cnt_on_reasm_queue * MSIZE));
108 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
109 	    asoc->cnt_on_all_streams * MSIZE));
110 	if (calc == 0) {
111 		/* out of space */
112 		return (calc);
113 	}
114 	/* what is the overhead of all these rwnd's */
115 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
116 	/*
117 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
118 	 * even it is 0. SWS engaged
119 	 */
120 	if (calc < stcb->asoc.my_rwnd_control_len) {
121 		calc = 1;
122 	}
123 	return (calc);
124 }
125 
126 
127 
128 /*
129  * Build out our readq entry based on the incoming packet.
130  */
131 struct sctp_queued_to_read *
132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133     struct sctp_nets *net,
134     uint32_t tsn, uint32_t ppid,
135     uint32_t context, uint16_t sid,
136     uint32_t mid, uint8_t flags,
137     struct mbuf *dm)
138 {
139 	struct sctp_queued_to_read *read_queue_e = NULL;
140 
141 	sctp_alloc_a_readq(stcb, read_queue_e);
142 	if (read_queue_e == NULL) {
143 		goto failed_build;
144 	}
145 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 	read_queue_e->sinfo_stream = sid;
147 	read_queue_e->sinfo_flags = (flags << 8);
148 	read_queue_e->sinfo_ppid = ppid;
149 	read_queue_e->sinfo_context = context;
150 	read_queue_e->sinfo_tsn = tsn;
151 	read_queue_e->sinfo_cumtsn = tsn;
152 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 	read_queue_e->mid = mid;
154 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 	TAILQ_INIT(&read_queue_e->reasm);
156 	read_queue_e->whoFrom = net;
157 	atomic_add_int(&net->ref_count, 1);
158 	read_queue_e->data = dm;
159 	read_queue_e->stcb = stcb;
160 	read_queue_e->port_from = stcb->rport;
161 failed_build:
162 	return (read_queue_e);
163 }
164 
165 struct mbuf *
166 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
167 {
168 	struct sctp_extrcvinfo *seinfo;
169 	struct sctp_sndrcvinfo *outinfo;
170 	struct sctp_rcvinfo *rcvinfo;
171 	struct sctp_nxtinfo *nxtinfo;
172 	struct cmsghdr *cmh;
173 	struct mbuf *ret;
174 	int len;
175 	int use_extended;
176 	int provide_nxt;
177 
178 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
179 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
180 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
181 		/* user does not want any ancillary data */
182 		return (NULL);
183 	}
184 	len = 0;
185 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
186 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
187 	}
188 	seinfo = (struct sctp_extrcvinfo *)sinfo;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
190 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
191 		provide_nxt = 1;
192 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
193 	} else {
194 		provide_nxt = 0;
195 	}
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
197 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
198 			use_extended = 1;
199 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 		} else {
201 			use_extended = 0;
202 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
203 		}
204 	} else {
205 		use_extended = 0;
206 	}
207 
208 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
209 	if (ret == NULL) {
210 		/* No space */
211 		return (ret);
212 	}
213 	SCTP_BUF_LEN(ret) = 0;
214 
215 	/* We need a CMSG header followed by the struct */
216 	cmh = mtod(ret, struct cmsghdr *);
217 	/*
218 	 * Make sure that there is no un-initialized padding between the
219 	 * cmsg header and cmsg data and after the cmsg data.
220 	 */
221 	memset(cmh, 0, len);
222 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
223 		cmh->cmsg_level = IPPROTO_SCTP;
224 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
225 		cmh->cmsg_type = SCTP_RCVINFO;
226 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
227 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
228 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
229 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
230 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
231 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
232 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
233 		rcvinfo->rcv_context = sinfo->sinfo_context;
234 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
235 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
236 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 	}
238 	if (provide_nxt) {
239 		cmh->cmsg_level = IPPROTO_SCTP;
240 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
241 		cmh->cmsg_type = SCTP_NXTINFO;
242 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
243 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
244 		nxtinfo->nxt_flags = 0;
245 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
246 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
247 		}
248 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
249 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
250 		}
251 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
252 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
253 		}
254 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
255 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
256 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
257 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
258 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
259 	}
260 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
261 		cmh->cmsg_level = IPPROTO_SCTP;
262 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
263 		if (use_extended) {
264 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 			cmh->cmsg_type = SCTP_EXTRCV;
266 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
267 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
268 		} else {
269 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 			cmh->cmsg_type = SCTP_SNDRCV;
271 			*outinfo = *sinfo;
272 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
273 		}
274 	}
275 	return (ret);
276 }
277 
278 
279 static void
280 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
281 {
282 	uint32_t gap, i, cumackp1;
283 	int fnd = 0;
284 	int in_r = 0, in_nr = 0;
285 
286 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 		return;
288 	}
289 	cumackp1 = asoc->cumulative_tsn + 1;
290 	if (SCTP_TSN_GT(cumackp1, tsn)) {
291 		/*
292 		 * this tsn is behind the cum ack and thus we don't need to
293 		 * worry about it being moved from one to the other.
294 		 */
295 		return;
296 	}
297 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
298 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
299 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
300 	if ((in_r == 0) && (in_nr == 0)) {
301 #ifdef INVARIANTS
302 		panic("Things are really messed up now");
303 #else
304 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
305 		sctp_print_mapping_array(asoc);
306 #endif
307 	}
308 	if (in_nr == 0)
309 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if (in_r)
311 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
312 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
313 		asoc->highest_tsn_inside_nr_map = tsn;
314 	}
315 	if (tsn == asoc->highest_tsn_inside_map) {
316 		/* We must back down to see what the new highest is */
317 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
318 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
319 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
320 				asoc->highest_tsn_inside_map = i;
321 				fnd = 1;
322 				break;
323 			}
324 		}
325 		if (!fnd) {
326 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
327 		}
328 	}
329 }
330 
331 static int
332 sctp_place_control_in_stream(struct sctp_stream_in *strm,
333     struct sctp_association *asoc,
334     struct sctp_queued_to_read *control)
335 {
336 	struct sctp_queued_to_read *at;
337 	struct sctp_readhead *q;
338 	uint8_t flags, unordered;
339 
340 	flags = (control->sinfo_flags >> 8);
341 	unordered = flags & SCTP_DATA_UNORDERED;
342 	if (unordered) {
343 		q = &strm->uno_inqueue;
344 		if (asoc->idata_supported == 0) {
345 			if (!TAILQ_EMPTY(q)) {
346 				/*
347 				 * Only one stream can be here in old style
348 				 * -- abort
349 				 */
350 				return (-1);
351 			}
352 			TAILQ_INSERT_TAIL(q, control, next_instrm);
353 			control->on_strm_q = SCTP_ON_UNORDERED;
354 			return (0);
355 		}
356 	} else {
357 		q = &strm->inqueue;
358 	}
359 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
360 		control->end_added = 1;
361 		control->first_frag_seen = 1;
362 		control->last_frag_seen = 1;
363 	}
364 	if (TAILQ_EMPTY(q)) {
365 		/* Empty queue */
366 		TAILQ_INSERT_HEAD(q, control, next_instrm);
367 		if (unordered) {
368 			control->on_strm_q = SCTP_ON_UNORDERED;
369 		} else {
370 			control->on_strm_q = SCTP_ON_ORDERED;
371 		}
372 		return (0);
373 	} else {
374 		TAILQ_FOREACH(at, q, next_instrm) {
375 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
376 				/*
377 				 * one in queue is bigger than the new one,
378 				 * insert before this one
379 				 */
380 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
381 				if (unordered) {
382 					control->on_strm_q = SCTP_ON_UNORDERED;
383 				} else {
384 					control->on_strm_q = SCTP_ON_ORDERED;
385 				}
386 				break;
387 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
388 				/*
389 				 * Gak, He sent me a duplicate msg id
390 				 * number?? return -1 to abort.
391 				 */
392 				return (-1);
393 			} else {
394 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
395 					/*
396 					 * We are at the end, insert it
397 					 * after this one
398 					 */
399 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
400 						sctp_log_strm_del(control, at,
401 						    SCTP_STR_LOG_FROM_INSERT_TL);
402 					}
403 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
404 					if (unordered) {
405 						control->on_strm_q = SCTP_ON_UNORDERED;
406 					} else {
407 						control->on_strm_q = SCTP_ON_ORDERED;
408 					}
409 					break;
410 				}
411 			}
412 		}
413 	}
414 	return (0);
415 }
416 
417 static void
418 sctp_abort_in_reasm(struct sctp_tcb *stcb,
419     struct sctp_queued_to_read *control,
420     struct sctp_tmit_chunk *chk,
421     int *abort_flag, int opspot)
422 {
423 	char msg[SCTP_DIAG_INFO_LEN];
424 	struct mbuf *oper;
425 
426 	if (stcb->asoc.idata_supported) {
427 		snprintf(msg, sizeof(msg),
428 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
429 		    opspot,
430 		    control->fsn_included,
431 		    chk->rec.data.tsn,
432 		    chk->rec.data.sid,
433 		    chk->rec.data.fsn, chk->rec.data.mid);
434 	} else {
435 		snprintf(msg, sizeof(msg),
436 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
437 		    opspot,
438 		    control->fsn_included,
439 		    chk->rec.data.tsn,
440 		    chk->rec.data.sid,
441 		    chk->rec.data.fsn,
442 		    (uint16_t)chk->rec.data.mid);
443 	}
444 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
445 	sctp_m_freem(chk->data);
446 	chk->data = NULL;
447 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
448 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
449 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
450 	*abort_flag = 1;
451 }
452 
453 static void
454 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 {
456 	/*
457 	 * The control could not be placed and must be cleaned.
458 	 */
459 	struct sctp_tmit_chunk *chk, *nchk;
460 
461 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
462 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
463 		if (chk->data)
464 			sctp_m_freem(chk->data);
465 		chk->data = NULL;
466 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
467 	}
468 	sctp_free_a_readq(stcb, control);
469 }
470 
471 /*
472  * Queue the chunk either right into the socket buffer if it is the next one
473  * to go OR put it in the correct place in the delivery queue.  If we do
474  * append to the so_buf, keep doing so until we are out of order as
475  * long as the control's entered are non-fragmented.
476  */
477 static void
478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479     struct sctp_association *asoc,
480     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 {
482 	/*
483 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 	 * all the data in one stream this could happen quite rapidly. One
485 	 * could use the TSN to keep track of things, but this scheme breaks
486 	 * down in the other type of stream usage that could occur. Send a
487 	 * single msg to stream 0, send 4Billion messages to stream 1, now
488 	 * send a message to stream 0. You have a situation where the TSN
489 	 * has wrapped but not in the stream. Is this worth worrying about
490 	 * or should we just change our queue sort at the bottom to be by
491 	 * TSN.
492 	 *
493 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 	 * assignment this could happen... and I don't see how this would be
496 	 * a violation. So for now I am undecided an will leave the sort by
497 	 * SSN alone. Maybe a hybred approach is the answer
498 	 *
499 	 */
500 	struct sctp_queued_to_read *at;
501 	int queue_needed;
502 	uint32_t nxt_todel;
503 	struct mbuf *op_err;
504 	struct sctp_stream_in *strm;
505 	char msg[SCTP_DIAG_INFO_LEN];
506 
507 	strm = &asoc->strmin[control->sinfo_stream];
508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
510 	}
511 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 		/* The incoming sseq is behind where we last delivered? */
513 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 		    strm->last_mid_delivered, control->mid);
515 		/*
516 		 * throw it in the stream so it gets cleaned up in
517 		 * association destruction
518 		 */
519 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 		if (asoc->idata_supported) {
521 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 			    strm->last_mid_delivered, control->sinfo_tsn,
523 			    control->sinfo_stream, control->mid);
524 		} else {
525 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 			    (uint16_t)strm->last_mid_delivered,
527 			    control->sinfo_tsn,
528 			    control->sinfo_stream,
529 			    (uint16_t)control->mid);
530 		}
531 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
534 		*abort_flag = 1;
535 		return;
536 
537 	}
538 	queue_needed = 1;
539 	asoc->size_on_all_streams += control->length;
540 	sctp_ucount_incr(asoc->cnt_on_all_streams);
541 	nxt_todel = strm->last_mid_delivered + 1;
542 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
543 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
544 		struct socket *so;
545 
546 		so = SCTP_INP_SO(stcb->sctp_ep);
547 		atomic_add_int(&stcb->asoc.refcnt, 1);
548 		SCTP_TCB_UNLOCK(stcb);
549 		SCTP_SOCKET_LOCK(so, 1);
550 		SCTP_TCB_LOCK(stcb);
551 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
552 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
553 			SCTP_SOCKET_UNLOCK(so, 1);
554 			return;
555 		}
556 #endif
557 		/* can be delivered right away? */
558 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
559 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
560 		}
561 		/* EY it wont be queued if it could be delivered directly */
562 		queue_needed = 0;
563 		if (asoc->size_on_all_streams >= control->length) {
564 			asoc->size_on_all_streams -= control->length;
565 		} else {
566 #ifdef INVARIANTS
567 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
568 #else
569 			asoc->size_on_all_streams = 0;
570 #endif
571 		}
572 		sctp_ucount_decr(asoc->cnt_on_all_streams);
573 		strm->last_mid_delivered++;
574 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
575 		sctp_add_to_readq(stcb->sctp_ep, stcb,
576 		    control,
577 		    &stcb->sctp_socket->so_rcv, 1,
578 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
579 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
580 			/* all delivered */
581 			nxt_todel = strm->last_mid_delivered + 1;
582 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
583 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
584 				if (control->on_strm_q == SCTP_ON_ORDERED) {
585 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
586 					if (asoc->size_on_all_streams >= control->length) {
587 						asoc->size_on_all_streams -= control->length;
588 					} else {
589 #ifdef INVARIANTS
590 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
591 #else
592 						asoc->size_on_all_streams = 0;
593 #endif
594 					}
595 					sctp_ucount_decr(asoc->cnt_on_all_streams);
596 #ifdef INVARIANTS
597 				} else {
598 					panic("Huh control: %p is on_strm_q: %d",
599 					    control, control->on_strm_q);
600 #endif
601 				}
602 				control->on_strm_q = 0;
603 				strm->last_mid_delivered++;
604 				/*
605 				 * We ignore the return of deliver_data here
606 				 * since we always can hold the chunk on the
607 				 * d-queue. And we have a finite number that
608 				 * can be delivered from the strq.
609 				 */
610 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
611 					sctp_log_strm_del(control, NULL,
612 					    SCTP_STR_LOG_FROM_IMMED_DEL);
613 				}
614 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 				sctp_add_to_readq(stcb->sctp_ep, stcb,
616 				    control,
617 				    &stcb->sctp_socket->so_rcv, 1,
618 				    SCTP_READ_LOCK_NOT_HELD,
619 				    SCTP_SO_LOCKED);
620 				continue;
621 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
622 				*need_reasm = 1;
623 			}
624 			break;
625 		}
626 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
627 		SCTP_SOCKET_UNLOCK(so, 1);
628 #endif
629 	}
630 	if (queue_needed) {
631 		/*
632 		 * Ok, we did not deliver this guy, find the correct place
633 		 * to put it on the queue.
634 		 */
635 		if (sctp_place_control_in_stream(strm, asoc, control)) {
636 			snprintf(msg, sizeof(msg),
637 			    "Queue to str MID: %u duplicate",
638 			    control->mid);
639 			sctp_clean_up_control(stcb, control);
640 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
641 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
642 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
643 			*abort_flag = 1;
644 		}
645 	}
646 }
647 
648 
649 static void
650 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
651 {
652 	struct mbuf *m, *prev = NULL;
653 	struct sctp_tcb *stcb;
654 
655 	stcb = control->stcb;
656 	control->held_length = 0;
657 	control->length = 0;
658 	m = control->data;
659 	while (m) {
660 		if (SCTP_BUF_LEN(m) == 0) {
661 			/* Skip mbufs with NO length */
662 			if (prev == NULL) {
663 				/* First one */
664 				control->data = sctp_m_free(m);
665 				m = control->data;
666 			} else {
667 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
668 				m = SCTP_BUF_NEXT(prev);
669 			}
670 			if (m == NULL) {
671 				control->tail_mbuf = prev;
672 			}
673 			continue;
674 		}
675 		prev = m;
676 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
677 		if (control->on_read_q) {
678 			/*
679 			 * On read queue so we must increment the SB stuff,
680 			 * we assume caller has done any locks of SB.
681 			 */
682 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
683 		}
684 		m = SCTP_BUF_NEXT(m);
685 	}
686 	if (prev) {
687 		control->tail_mbuf = prev;
688 	}
689 }
690 
691 static void
692 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
693 {
694 	struct mbuf *prev = NULL;
695 	struct sctp_tcb *stcb;
696 
697 	stcb = control->stcb;
698 	if (stcb == NULL) {
699 #ifdef INVARIANTS
700 		panic("Control broken");
701 #else
702 		return;
703 #endif
704 	}
705 	if (control->tail_mbuf == NULL) {
706 		/* TSNH */
707 		control->data = m;
708 		sctp_setup_tail_pointer(control);
709 		return;
710 	}
711 	control->tail_mbuf->m_next = m;
712 	while (m) {
713 		if (SCTP_BUF_LEN(m) == 0) {
714 			/* Skip mbufs with NO length */
715 			if (prev == NULL) {
716 				/* First one */
717 				control->tail_mbuf->m_next = sctp_m_free(m);
718 				m = control->tail_mbuf->m_next;
719 			} else {
720 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
721 				m = SCTP_BUF_NEXT(prev);
722 			}
723 			if (m == NULL) {
724 				control->tail_mbuf = prev;
725 			}
726 			continue;
727 		}
728 		prev = m;
729 		if (control->on_read_q) {
730 			/*
731 			 * On read queue so we must increment the SB stuff,
732 			 * we assume caller has done any locks of SB.
733 			 */
734 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
735 		}
736 		*added += SCTP_BUF_LEN(m);
737 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
738 		m = SCTP_BUF_NEXT(m);
739 	}
740 	if (prev) {
741 		control->tail_mbuf = prev;
742 	}
743 }
744 
745 static void
746 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
747 {
748 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
749 	nc->sinfo_stream = control->sinfo_stream;
750 	nc->mid = control->mid;
751 	TAILQ_INIT(&nc->reasm);
752 	nc->top_fsn = control->top_fsn;
753 	nc->mid = control->mid;
754 	nc->sinfo_flags = control->sinfo_flags;
755 	nc->sinfo_ppid = control->sinfo_ppid;
756 	nc->sinfo_context = control->sinfo_context;
757 	nc->fsn_included = 0xffffffff;
758 	nc->sinfo_tsn = control->sinfo_tsn;
759 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
760 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
761 	nc->whoFrom = control->whoFrom;
762 	atomic_add_int(&nc->whoFrom->ref_count, 1);
763 	nc->stcb = control->stcb;
764 	nc->port_from = control->port_from;
765 }
766 
767 static void
768 sctp_reset_a_control(struct sctp_queued_to_read *control,
769     struct sctp_inpcb *inp, uint32_t tsn)
770 {
771 	control->fsn_included = tsn;
772 	if (control->on_read_q) {
773 		/*
774 		 * We have to purge it from there, hopefully this will work
775 		 * :-)
776 		 */
777 		TAILQ_REMOVE(&inp->read_queue, control, next);
778 		control->on_read_q = 0;
779 	}
780 }
781 
782 static int
783 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
784     struct sctp_association *asoc,
785     struct sctp_stream_in *strm,
786     struct sctp_queued_to_read *control,
787     uint32_t pd_point,
788     int inp_read_lock_held)
789 {
790 	/*
791 	 * Special handling for the old un-ordered data chunk. All the
792 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
793 	 * to see if we have it all. If you return one, no other control
794 	 * entries on the un-ordered queue will be looked at. In theory
795 	 * there should be no others entries in reality, unless the guy is
796 	 * sending both unordered NDATA and unordered DATA...
797 	 */
798 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 	uint32_t fsn;
800 	struct sctp_queued_to_read *nc;
801 	int cnt_added;
802 
803 	if (control->first_frag_seen == 0) {
804 		/* Nothing we can do, we have not seen the first piece yet */
805 		return (1);
806 	}
807 	/* Collapse any we can */
808 	cnt_added = 0;
809 restart:
810 	fsn = control->fsn_included + 1;
811 	/* Now what can we add? */
812 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
813 		if (chk->rec.data.fsn == fsn) {
814 			/* Ok lets add it */
815 			sctp_alloc_a_readq(stcb, nc);
816 			if (nc == NULL) {
817 				break;
818 			}
819 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
820 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
821 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
822 			fsn++;
823 			cnt_added++;
824 			chk = NULL;
825 			if (control->end_added) {
826 				/* We are done */
827 				if (!TAILQ_EMPTY(&control->reasm)) {
828 					/*
829 					 * Ok we have to move anything left
830 					 * on the control queue to a new
831 					 * control.
832 					 */
833 					sctp_build_readq_entry_from_ctl(nc, control);
834 					tchk = TAILQ_FIRST(&control->reasm);
835 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
836 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
837 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
838 							asoc->size_on_reasm_queue -= tchk->send_size;
839 						} else {
840 #ifdef INVARIANTS
841 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 #else
843 							asoc->size_on_reasm_queue = 0;
844 #endif
845 						}
846 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
847 						nc->first_frag_seen = 1;
848 						nc->fsn_included = tchk->rec.data.fsn;
849 						nc->data = tchk->data;
850 						nc->sinfo_ppid = tchk->rec.data.ppid;
851 						nc->sinfo_tsn = tchk->rec.data.tsn;
852 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 						tchk->data = NULL;
854 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
855 						sctp_setup_tail_pointer(nc);
856 						tchk = TAILQ_FIRST(&control->reasm);
857 					}
858 					/* Spin the rest onto the queue */
859 					while (tchk) {
860 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
861 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
862 						tchk = TAILQ_FIRST(&control->reasm);
863 					}
864 					/*
865 					 * Now lets add it to the queue
866 					 * after removing control
867 					 */
868 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
869 					nc->on_strm_q = SCTP_ON_UNORDERED;
870 					if (control->on_strm_q) {
871 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
872 						control->on_strm_q = 0;
873 					}
874 				}
875 				if (control->pdapi_started) {
876 					strm->pd_api_started = 0;
877 					control->pdapi_started = 0;
878 				}
879 				if (control->on_strm_q) {
880 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
881 					control->on_strm_q = 0;
882 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 				}
884 				if (control->on_read_q == 0) {
885 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
886 					    &stcb->sctp_socket->so_rcv, control->end_added,
887 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 				}
889 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
890 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 					/*
892 					 * Switch to the new guy and
893 					 * continue
894 					 */
895 					control = nc;
896 					goto restart;
897 				} else {
898 					if (nc->on_strm_q == 0) {
899 						sctp_free_a_readq(stcb, nc);
900 					}
901 				}
902 				return (1);
903 			} else {
904 				sctp_free_a_readq(stcb, nc);
905 			}
906 		} else {
907 			/* Can't add more */
908 			break;
909 		}
910 	}
911 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
912 		strm->pd_api_started = 1;
913 		control->pdapi_started = 1;
914 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
915 		    &stcb->sctp_socket->so_rcv, control->end_added,
916 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
917 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
918 		return (0);
919 	} else {
920 		return (1);
921 	}
922 }
923 
924 static void
925 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
926     struct sctp_association *asoc,
927     struct sctp_queued_to_read *control,
928     struct sctp_tmit_chunk *chk,
929     int *abort_flag)
930 {
931 	struct sctp_tmit_chunk *at;
932 	int inserted;
933 
934 	/*
935 	 * Here we need to place the chunk into the control structure sorted
936 	 * in the correct order.
937 	 */
938 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
939 		/* Its the very first one. */
940 		SCTPDBG(SCTP_DEBUG_XXX,
941 		    "chunk is a first fsn: %u becomes fsn_included\n",
942 		    chk->rec.data.fsn);
943 		if (control->first_frag_seen) {
944 			/*
945 			 * In old un-ordered we can reassembly on one
946 			 * control multiple messages. As long as the next
947 			 * FIRST is greater then the old first (TSN i.e. FSN
948 			 * wise)
949 			 */
950 			struct mbuf *tdata;
951 			uint32_t tmp;
952 
953 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
954 				/*
955 				 * Easy way the start of a new guy beyond
956 				 * the lowest
957 				 */
958 				goto place_chunk;
959 			}
960 			if ((chk->rec.data.fsn == control->fsn_included) ||
961 			    (control->pdapi_started)) {
962 				/*
963 				 * Ok this should not happen, if it does we
964 				 * started the pd-api on the higher TSN
965 				 * (since the equals part is a TSN failure
966 				 * it must be that).
967 				 *
968 				 * We are completly hosed in that case since
969 				 * I have no way to recover. This really
970 				 * will only happen if we can get more TSN's
971 				 * higher before the pd-api-point.
972 				 */
973 				sctp_abort_in_reasm(stcb, control, chk,
974 				    abort_flag,
975 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
976 
977 				return;
978 			}
979 			/*
980 			 * Ok we have two firsts and the one we just got is
981 			 * smaller than the one we previously placed.. yuck!
982 			 * We must swap them out.
983 			 */
984 			/* swap the mbufs */
985 			tdata = control->data;
986 			control->data = chk->data;
987 			chk->data = tdata;
988 			/* Save the lengths */
989 			chk->send_size = control->length;
990 			/* Recompute length of control and tail pointer */
991 			sctp_setup_tail_pointer(control);
992 			/* Fix the FSN included */
993 			tmp = control->fsn_included;
994 			control->fsn_included = chk->rec.data.fsn;
995 			chk->rec.data.fsn = tmp;
996 			/* Fix the TSN included */
997 			tmp = control->sinfo_tsn;
998 			control->sinfo_tsn = chk->rec.data.tsn;
999 			chk->rec.data.tsn = tmp;
1000 			/* Fix the PPID included */
1001 			tmp = control->sinfo_ppid;
1002 			control->sinfo_ppid = chk->rec.data.ppid;
1003 			chk->rec.data.ppid = tmp;
1004 			/* Fix tail pointer */
1005 			goto place_chunk;
1006 		}
1007 		control->first_frag_seen = 1;
1008 		control->fsn_included = chk->rec.data.fsn;
1009 		control->top_fsn = chk->rec.data.fsn;
1010 		control->sinfo_tsn = chk->rec.data.tsn;
1011 		control->sinfo_ppid = chk->rec.data.ppid;
1012 		control->data = chk->data;
1013 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1014 		chk->data = NULL;
1015 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1016 		sctp_setup_tail_pointer(control);
1017 		return;
1018 	}
1019 place_chunk:
1020 	inserted = 0;
1021 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1022 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1023 			/*
1024 			 * This one in queue is bigger than the new one,
1025 			 * insert the new one before at.
1026 			 */
1027 			asoc->size_on_reasm_queue += chk->send_size;
1028 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1029 			inserted = 1;
1030 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1031 			break;
1032 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1033 			/*
1034 			 * They sent a duplicate fsn number. This really
1035 			 * should not happen since the FSN is a TSN and it
1036 			 * should have been dropped earlier.
1037 			 */
1038 			sctp_abort_in_reasm(stcb, control, chk,
1039 			    abort_flag,
1040 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1041 			return;
1042 		}
1043 	}
1044 	if (inserted == 0) {
1045 		/* Its at the end */
1046 		asoc->size_on_reasm_queue += chk->send_size;
1047 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 		control->top_fsn = chk->rec.data.fsn;
1049 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1050 	}
1051 }
1052 
1053 static int
1054 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1055     struct sctp_stream_in *strm, int inp_read_lock_held)
1056 {
1057 	/*
1058 	 * Given a stream, strm, see if any of the SSN's on it that are
1059 	 * fragmented are ready to deliver. If so go ahead and place them on
1060 	 * the read queue. In so placing if we have hit the end, then we
1061 	 * need to remove them from the stream's queue.
1062 	 */
1063 	struct sctp_queued_to_read *control, *nctl = NULL;
1064 	uint32_t next_to_del;
1065 	uint32_t pd_point;
1066 	int ret = 0;
1067 
1068 	if (stcb->sctp_socket) {
1069 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1070 		    stcb->sctp_ep->partial_delivery_point);
1071 	} else {
1072 		pd_point = stcb->sctp_ep->partial_delivery_point;
1073 	}
1074 	control = TAILQ_FIRST(&strm->uno_inqueue);
1075 
1076 	if ((control != NULL) &&
1077 	    (asoc->idata_supported == 0)) {
1078 		/* Special handling needed for "old" data format */
1079 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1080 			goto done_un;
1081 		}
1082 	}
1083 	if (strm->pd_api_started) {
1084 		/* Can't add more */
1085 		return (0);
1086 	}
1087 	while (control) {
1088 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1089 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1090 		nctl = TAILQ_NEXT(control, next_instrm);
1091 		if (control->end_added) {
1092 			/* We just put the last bit on */
1093 			if (control->on_strm_q) {
1094 #ifdef INVARIANTS
1095 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1096 					panic("Huh control: %p on_q: %d -- not unordered?",
1097 					    control, control->on_strm_q);
1098 				}
1099 #endif
1100 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1101 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1102 				control->on_strm_q = 0;
1103 			}
1104 			if (control->on_read_q == 0) {
1105 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1106 				    control,
1107 				    &stcb->sctp_socket->so_rcv, control->end_added,
1108 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1109 			}
1110 		} else {
1111 			/* Can we do a PD-API for this un-ordered guy? */
1112 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1113 				strm->pd_api_started = 1;
1114 				control->pdapi_started = 1;
1115 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1116 				    control,
1117 				    &stcb->sctp_socket->so_rcv, control->end_added,
1118 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1119 
1120 				break;
1121 			}
1122 		}
1123 		control = nctl;
1124 	}
1125 done_un:
1126 	control = TAILQ_FIRST(&strm->inqueue);
1127 	if (strm->pd_api_started) {
1128 		/* Can't add more */
1129 		return (0);
1130 	}
1131 	if (control == NULL) {
1132 		return (ret);
1133 	}
1134 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1135 		/*
1136 		 * Ok the guy at the top was being partially delivered
1137 		 * completed, so we remove it. Note the pd_api flag was
1138 		 * taken off when the chunk was merged on in
1139 		 * sctp_queue_data_for_reasm below.
1140 		 */
1141 		nctl = TAILQ_NEXT(control, next_instrm);
1142 		SCTPDBG(SCTP_DEBUG_XXX,
1143 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1144 		    control, control->end_added, control->mid,
1145 		    control->top_fsn, control->fsn_included,
1146 		    strm->last_mid_delivered);
1147 		if (control->end_added) {
1148 			if (control->on_strm_q) {
1149 #ifdef INVARIANTS
1150 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1151 					panic("Huh control: %p on_q: %d -- not ordered?",
1152 					    control, control->on_strm_q);
1153 				}
1154 #endif
1155 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1156 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1157 				if (asoc->size_on_all_streams >= control->length) {
1158 					asoc->size_on_all_streams -= control->length;
1159 				} else {
1160 #ifdef INVARIANTS
1161 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1162 #else
1163 					asoc->size_on_all_streams = 0;
1164 #endif
1165 				}
1166 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1167 				control->on_strm_q = 0;
1168 			}
1169 			if (strm->pd_api_started && control->pdapi_started) {
1170 				control->pdapi_started = 0;
1171 				strm->pd_api_started = 0;
1172 			}
1173 			if (control->on_read_q == 0) {
1174 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1175 				    control,
1176 				    &stcb->sctp_socket->so_rcv, control->end_added,
1177 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1178 			}
1179 			control = nctl;
1180 		}
1181 	}
1182 	if (strm->pd_api_started) {
1183 		/*
1184 		 * Can't add more must have gotten an un-ordered above being
1185 		 * partially delivered.
1186 		 */
1187 		return (0);
1188 	}
1189 deliver_more:
1190 	next_to_del = strm->last_mid_delivered + 1;
1191 	if (control) {
1192 		SCTPDBG(SCTP_DEBUG_XXX,
1193 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1194 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1195 		    next_to_del);
1196 		nctl = TAILQ_NEXT(control, next_instrm);
1197 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1198 		    (control->first_frag_seen)) {
1199 			int done;
1200 
1201 			/* Ok we can deliver it onto the stream. */
1202 			if (control->end_added) {
1203 				/* We are done with it afterwards */
1204 				if (control->on_strm_q) {
1205 #ifdef INVARIANTS
1206 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1207 						panic("Huh control: %p on_q: %d -- not ordered?",
1208 						    control, control->on_strm_q);
1209 					}
1210 #endif
1211 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1212 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1213 					if (asoc->size_on_all_streams >= control->length) {
1214 						asoc->size_on_all_streams -= control->length;
1215 					} else {
1216 #ifdef INVARIANTS
1217 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1218 #else
1219 						asoc->size_on_all_streams = 0;
1220 #endif
1221 					}
1222 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1223 					control->on_strm_q = 0;
1224 				}
1225 				ret++;
1226 			}
1227 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1228 				/*
1229 				 * A singleton now slipping through - mark
1230 				 * it non-revokable too
1231 				 */
1232 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1233 			} else if (control->end_added == 0) {
1234 				/*
1235 				 * Check if we can defer adding until its
1236 				 * all there
1237 				 */
1238 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1239 					/*
1240 					 * Don't need it or cannot add more
1241 					 * (one being delivered that way)
1242 					 */
1243 					goto out;
1244 				}
1245 			}
1246 			done = (control->end_added) && (control->last_frag_seen);
1247 			if (control->on_read_q == 0) {
1248 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1249 				    control,
1250 				    &stcb->sctp_socket->so_rcv, control->end_added,
1251 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1252 			}
1253 			strm->last_mid_delivered = next_to_del;
1254 			if (done) {
1255 				control = nctl;
1256 				goto deliver_more;
1257 			} else {
1258 				/* We are now doing PD API */
1259 				strm->pd_api_started = 1;
1260 				control->pdapi_started = 1;
1261 			}
1262 		}
1263 	}
1264 out:
1265 	return (ret);
1266 }
1267 
1268 
1269 uint32_t
1270 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1271     struct sctp_stream_in *strm,
1272     struct sctp_tcb *stcb, struct sctp_association *asoc,
1273     struct sctp_tmit_chunk *chk, int hold_rlock)
1274 {
1275 	/*
1276 	 * Given a control and a chunk, merge the data from the chk onto the
1277 	 * control and free up the chunk resources.
1278 	 */
1279 	uint32_t added = 0;
1280 	int i_locked = 0;
1281 
1282 	if (control->on_read_q && (hold_rlock == 0)) {
1283 		/*
1284 		 * Its being pd-api'd so we must do some locks.
1285 		 */
1286 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1287 		i_locked = 1;
1288 	}
1289 	if (control->data == NULL) {
1290 		control->data = chk->data;
1291 		sctp_setup_tail_pointer(control);
1292 	} else {
1293 		sctp_add_to_tail_pointer(control, chk->data, &added);
1294 	}
1295 	control->fsn_included = chk->rec.data.fsn;
1296 	asoc->size_on_reasm_queue -= chk->send_size;
1297 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1298 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1299 	chk->data = NULL;
1300 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1301 		control->first_frag_seen = 1;
1302 		control->sinfo_tsn = chk->rec.data.tsn;
1303 		control->sinfo_ppid = chk->rec.data.ppid;
1304 	}
1305 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1306 		/* Its complete */
1307 		if ((control->on_strm_q) && (control->on_read_q)) {
1308 			if (control->pdapi_started) {
1309 				control->pdapi_started = 0;
1310 				strm->pd_api_started = 0;
1311 			}
1312 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1313 				/* Unordered */
1314 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1315 				control->on_strm_q = 0;
1316 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1317 				/* Ordered */
1318 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1319 				if (asoc->size_on_all_streams >= control->length) {
1320 					asoc->size_on_all_streams -= control->length;
1321 				} else {
1322 #ifdef INVARIANTS
1323 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1324 #else
1325 					asoc->size_on_all_streams = 0;
1326 #endif
1327 				}
1328 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1329 				control->on_strm_q = 0;
1330 #ifdef INVARIANTS
1331 			} else if (control->on_strm_q) {
1332 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1333 				    control->on_strm_q);
1334 #endif
1335 			}
1336 		}
1337 		control->end_added = 1;
1338 		control->last_frag_seen = 1;
1339 	}
1340 	if (i_locked) {
1341 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1342 	}
1343 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1344 	return (added);
1345 }
1346 
1347 /*
1348  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1349  * queue, see if anthing can be delivered. If so pull it off (or as much as
1350  * we can. If we run out of space then we must dump what we can and set the
1351  * appropriate flag to say we queued what we could.
1352  */
1353 static void
1354 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1355     struct sctp_queued_to_read *control,
1356     struct sctp_tmit_chunk *chk,
1357     int created_control,
1358     int *abort_flag, uint32_t tsn)
1359 {
1360 	uint32_t next_fsn;
1361 	struct sctp_tmit_chunk *at, *nat;
1362 	struct sctp_stream_in *strm;
1363 	int do_wakeup, unordered;
1364 	uint32_t lenadded;
1365 
1366 	strm = &asoc->strmin[control->sinfo_stream];
1367 	/*
1368 	 * For old un-ordered data chunks.
1369 	 */
1370 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1371 		unordered = 1;
1372 	} else {
1373 		unordered = 0;
1374 	}
1375 	/* Must be added to the stream-in queue */
1376 	if (created_control) {
1377 		if (unordered == 0) {
1378 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1379 		}
1380 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1381 			/* Duplicate SSN? */
1382 			sctp_clean_up_control(stcb, control);
1383 			sctp_abort_in_reasm(stcb, control, chk,
1384 			    abort_flag,
1385 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1386 			return;
1387 		}
1388 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1389 			/*
1390 			 * Ok we created this control and now lets validate
1391 			 * that its legal i.e. there is a B bit set, if not
1392 			 * and we have up to the cum-ack then its invalid.
1393 			 */
1394 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1395 				sctp_abort_in_reasm(stcb, control, chk,
1396 				    abort_flag,
1397 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1398 				return;
1399 			}
1400 		}
1401 	}
1402 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1403 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1404 		return;
1405 	}
1406 	/*
1407 	 * Ok we must queue the chunk into the reasembly portion: o if its
1408 	 * the first it goes to the control mbuf. o if its not first but the
1409 	 * next in sequence it goes to the control, and each succeeding one
1410 	 * in order also goes. o if its not in order we place it on the list
1411 	 * in its place.
1412 	 */
1413 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1414 		/* Its the very first one. */
1415 		SCTPDBG(SCTP_DEBUG_XXX,
1416 		    "chunk is a first fsn: %u becomes fsn_included\n",
1417 		    chk->rec.data.fsn);
1418 		if (control->first_frag_seen) {
1419 			/*
1420 			 * Error on senders part, they either sent us two
1421 			 * data chunks with FIRST, or they sent two
1422 			 * un-ordered chunks that were fragmented at the
1423 			 * same time in the same stream.
1424 			 */
1425 			sctp_abort_in_reasm(stcb, control, chk,
1426 			    abort_flag,
1427 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1428 			return;
1429 		}
1430 		control->first_frag_seen = 1;
1431 		control->sinfo_ppid = chk->rec.data.ppid;
1432 		control->sinfo_tsn = chk->rec.data.tsn;
1433 		control->fsn_included = chk->rec.data.fsn;
1434 		control->data = chk->data;
1435 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1436 		chk->data = NULL;
1437 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1438 		sctp_setup_tail_pointer(control);
1439 		asoc->size_on_all_streams += control->length;
1440 	} else {
1441 		/* Place the chunk in our list */
1442 		int inserted = 0;
1443 
1444 		if (control->last_frag_seen == 0) {
1445 			/* Still willing to raise highest FSN seen */
1446 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1447 				SCTPDBG(SCTP_DEBUG_XXX,
1448 				    "We have a new top_fsn: %u\n",
1449 				    chk->rec.data.fsn);
1450 				control->top_fsn = chk->rec.data.fsn;
1451 			}
1452 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1453 				SCTPDBG(SCTP_DEBUG_XXX,
1454 				    "The last fsn is now in place fsn: %u\n",
1455 				    chk->rec.data.fsn);
1456 				control->last_frag_seen = 1;
1457 			}
1458 			if (asoc->idata_supported || control->first_frag_seen) {
1459 				/*
1460 				 * For IDATA we always check since we know
1461 				 * that the first fragment is 0. For old
1462 				 * DATA we have to receive the first before
1463 				 * we know the first FSN (which is the TSN).
1464 				 */
1465 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1466 					/*
1467 					 * We have already delivered up to
1468 					 * this so its a dup
1469 					 */
1470 					sctp_abort_in_reasm(stcb, control, chk,
1471 					    abort_flag,
1472 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1473 					return;
1474 				}
1475 			}
1476 		} else {
1477 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 				/* Second last? huh? */
1479 				SCTPDBG(SCTP_DEBUG_XXX,
1480 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1481 				    chk->rec.data.fsn, control->top_fsn);
1482 				sctp_abort_in_reasm(stcb, control,
1483 				    chk, abort_flag,
1484 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1485 				return;
1486 			}
1487 			if (asoc->idata_supported || control->first_frag_seen) {
1488 				/*
1489 				 * For IDATA we always check since we know
1490 				 * that the first fragment is 0. For old
1491 				 * DATA we have to receive the first before
1492 				 * we know the first FSN (which is the TSN).
1493 				 */
1494 
1495 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 					/*
1497 					 * We have already delivered up to
1498 					 * this so its a dup
1499 					 */
1500 					SCTPDBG(SCTP_DEBUG_XXX,
1501 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1502 					    chk->rec.data.fsn, control->fsn_included);
1503 					sctp_abort_in_reasm(stcb, control, chk,
1504 					    abort_flag,
1505 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1506 					return;
1507 				}
1508 			}
1509 			/*
1510 			 * validate not beyond top FSN if we have seen last
1511 			 * one
1512 			 */
1513 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1514 				SCTPDBG(SCTP_DEBUG_XXX,
1515 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1516 				    chk->rec.data.fsn,
1517 				    control->top_fsn);
1518 				sctp_abort_in_reasm(stcb, control, chk,
1519 				    abort_flag,
1520 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1521 				return;
1522 			}
1523 		}
1524 		/*
1525 		 * If we reach here, we need to place the new chunk in the
1526 		 * reassembly for this control.
1527 		 */
1528 		SCTPDBG(SCTP_DEBUG_XXX,
1529 		    "chunk is a not first fsn: %u needs to be inserted\n",
1530 		    chk->rec.data.fsn);
1531 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1532 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1533 				/*
1534 				 * This one in queue is bigger than the new
1535 				 * one, insert the new one before at.
1536 				 */
1537 				SCTPDBG(SCTP_DEBUG_XXX,
1538 				    "Insert it before fsn: %u\n",
1539 				    at->rec.data.fsn);
1540 				asoc->size_on_reasm_queue += chk->send_size;
1541 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1542 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1543 				inserted = 1;
1544 				break;
1545 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1546 				/*
1547 				 * Gak, He sent me a duplicate str seq
1548 				 * number
1549 				 */
1550 				/*
1551 				 * foo bar, I guess I will just free this
1552 				 * new guy, should we abort too? FIX ME
1553 				 * MAYBE? Or it COULD be that the SSN's have
1554 				 * wrapped. Maybe I should compare to TSN
1555 				 * somehow... sigh for now just blow away
1556 				 * the chunk!
1557 				 */
1558 				SCTPDBG(SCTP_DEBUG_XXX,
1559 				    "Duplicate to fsn: %u -- abort\n",
1560 				    at->rec.data.fsn);
1561 				sctp_abort_in_reasm(stcb, control,
1562 				    chk, abort_flag,
1563 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1564 				return;
1565 			}
1566 		}
1567 		if (inserted == 0) {
1568 			/* Goes on the end */
1569 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1570 			    chk->rec.data.fsn);
1571 			asoc->size_on_reasm_queue += chk->send_size;
1572 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1574 		}
1575 	}
1576 	/*
1577 	 * Ok lets see if we can suck any up into the control structure that
1578 	 * are in seq if it makes sense.
1579 	 */
1580 	do_wakeup = 0;
1581 	/*
1582 	 * If the first fragment has not been seen there is no sense in
1583 	 * looking.
1584 	 */
1585 	if (control->first_frag_seen) {
1586 		next_fsn = control->fsn_included + 1;
1587 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1588 			if (at->rec.data.fsn == next_fsn) {
1589 				/* We can add this one now to the control */
1590 				SCTPDBG(SCTP_DEBUG_XXX,
1591 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1592 				    control, at,
1593 				    at->rec.data.fsn,
1594 				    next_fsn, control->fsn_included);
1595 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1596 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1597 				asoc->size_on_all_streams += lenadded;
1598 				if (control->on_read_q) {
1599 					do_wakeup = 1;
1600 				}
1601 				next_fsn++;
1602 				if (control->end_added && control->pdapi_started) {
1603 					if (strm->pd_api_started) {
1604 						strm->pd_api_started = 0;
1605 						control->pdapi_started = 0;
1606 					}
1607 					if (control->on_read_q == 0) {
1608 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1609 						    control,
1610 						    &stcb->sctp_socket->so_rcv, control->end_added,
1611 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1612 						do_wakeup = 1;
1613 					}
1614 					break;
1615 				}
1616 			} else {
1617 				break;
1618 			}
1619 		}
1620 	}
1621 	if (do_wakeup) {
1622 		/* Need to wakeup the reader */
1623 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1624 	}
1625 }
1626 
1627 static struct sctp_queued_to_read *
1628 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1629 {
1630 	struct sctp_queued_to_read *control;
1631 
1632 	if (ordered) {
1633 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1634 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1635 				break;
1636 			}
1637 		}
1638 	} else {
1639 		if (idata_supported) {
1640 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1641 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1642 					break;
1643 				}
1644 			}
1645 		} else {
1646 			control = TAILQ_FIRST(&strm->uno_inqueue);
1647 		}
1648 	}
1649 	return (control);
1650 }
1651 
1652 static int
1653 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1654     struct mbuf **m, int offset, int chk_length,
1655     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1656     int *break_flag, int last_chunk, uint8_t chk_type)
1657 {
1658 	/* Process a data chunk */
1659 	/* struct sctp_tmit_chunk *chk; */
1660 	struct sctp_tmit_chunk *chk;
1661 	uint32_t tsn, fsn, gap, mid;
1662 	struct mbuf *dmbuf;
1663 	int the_len;
1664 	int need_reasm_check = 0;
1665 	uint16_t sid;
1666 	struct mbuf *op_err;
1667 	char msg[SCTP_DIAG_INFO_LEN];
1668 	struct sctp_queued_to_read *control, *ncontrol;
1669 	uint32_t ppid;
1670 	uint8_t chk_flags;
1671 	struct sctp_stream_reset_list *liste;
1672 	int ordered;
1673 	size_t clen;
1674 	int created_control = 0;
1675 
1676 	if (chk_type == SCTP_IDATA) {
1677 		struct sctp_idata_chunk *chunk, chunk_buf;
1678 
1679 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1680 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1681 		chk_flags = chunk->ch.chunk_flags;
1682 		clen = sizeof(struct sctp_idata_chunk);
1683 		tsn = ntohl(chunk->dp.tsn);
1684 		sid = ntohs(chunk->dp.sid);
1685 		mid = ntohl(chunk->dp.mid);
1686 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1687 			fsn = 0;
1688 			ppid = chunk->dp.ppid_fsn.ppid;
1689 		} else {
1690 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1691 			ppid = 0xffffffff;	/* Use as an invalid value. */
1692 		}
1693 	} else {
1694 		struct sctp_data_chunk *chunk, chunk_buf;
1695 
1696 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1697 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1698 		chk_flags = chunk->ch.chunk_flags;
1699 		clen = sizeof(struct sctp_data_chunk);
1700 		tsn = ntohl(chunk->dp.tsn);
1701 		sid = ntohs(chunk->dp.sid);
1702 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1703 		fsn = tsn;
1704 		ppid = chunk->dp.ppid;
1705 	}
1706 	if ((size_t)chk_length == clen) {
1707 		/*
1708 		 * Need to send an abort since we had a empty data chunk.
1709 		 */
1710 		op_err = sctp_generate_no_user_data_cause(tsn);
1711 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1712 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1713 		*abort_flag = 1;
1714 		return (0);
1715 	}
1716 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1717 		asoc->send_sack = 1;
1718 	}
1719 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1720 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1721 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1722 	}
1723 	if (stcb == NULL) {
1724 		return (0);
1725 	}
1726 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1727 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1728 		/* It is a duplicate */
1729 		SCTP_STAT_INCR(sctps_recvdupdata);
1730 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1731 			/* Record a dup for the next outbound sack */
1732 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1733 			asoc->numduptsns++;
1734 		}
1735 		asoc->send_sack = 1;
1736 		return (0);
1737 	}
1738 	/* Calculate the number of TSN's between the base and this TSN */
1739 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1740 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1741 		/* Can't hold the bit in the mapping at max array, toss it */
1742 		return (0);
1743 	}
1744 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1745 		SCTP_TCB_LOCK_ASSERT(stcb);
1746 		if (sctp_expand_mapping_array(asoc, gap)) {
1747 			/* Can't expand, drop it */
1748 			return (0);
1749 		}
1750 	}
1751 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1752 		*high_tsn = tsn;
1753 	}
1754 	/* See if we have received this one already */
1755 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1756 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1757 		SCTP_STAT_INCR(sctps_recvdupdata);
1758 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1759 			/* Record a dup for the next outbound sack */
1760 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1761 			asoc->numduptsns++;
1762 		}
1763 		asoc->send_sack = 1;
1764 		return (0);
1765 	}
1766 	/*
1767 	 * Check to see about the GONE flag, duplicates would cause a sack
1768 	 * to be sent up above
1769 	 */
1770 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1771 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1772 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1773 		/*
1774 		 * wait a minute, this guy is gone, there is no longer a
1775 		 * receiver. Send peer an ABORT!
1776 		 */
1777 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1778 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1779 		*abort_flag = 1;
1780 		return (0);
1781 	}
1782 	/*
1783 	 * Now before going further we see if there is room. If NOT then we
1784 	 * MAY let one through only IF this TSN is the one we are waiting
1785 	 * for on a partial delivery API.
1786 	 */
1787 
1788 	/* Is the stream valid? */
1789 	if (sid >= asoc->streamincnt) {
1790 		struct sctp_error_invalid_stream *cause;
1791 
1792 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1793 		    0, M_NOWAIT, 1, MT_DATA);
1794 		if (op_err != NULL) {
1795 			/* add some space up front so prepend will work well */
1796 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1797 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1798 			/*
1799 			 * Error causes are just param's and this one has
1800 			 * two back to back phdr, one with the error type
1801 			 * and size, the other with the streamid and a rsvd
1802 			 */
1803 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1804 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1805 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1806 			cause->stream_id = htons(sid);
1807 			cause->reserved = htons(0);
1808 			sctp_queue_op_err(stcb, op_err);
1809 		}
1810 		SCTP_STAT_INCR(sctps_badsid);
1811 		SCTP_TCB_LOCK_ASSERT(stcb);
1812 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1813 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1814 			asoc->highest_tsn_inside_nr_map = tsn;
1815 		}
1816 		if (tsn == (asoc->cumulative_tsn + 1)) {
1817 			/* Update cum-ack */
1818 			asoc->cumulative_tsn = tsn;
1819 		}
1820 		return (0);
1821 	}
1822 	/*
1823 	 * If its a fragmented message, lets see if we can find the control
1824 	 * on the reassembly queues.
1825 	 */
1826 	if ((chk_type == SCTP_IDATA) &&
1827 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1828 	    (fsn == 0)) {
1829 		/*
1830 		 * The first *must* be fsn 0, and other (middle/end) pieces
1831 		 * can *not* be fsn 0. XXX: This can happen in case of a
1832 		 * wrap around. Ignore is for now.
1833 		 */
1834 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1835 		    mid, chk_flags);
1836 		goto err_out;
1837 	}
1838 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1839 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1840 	    chk_flags, control);
1841 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1842 		/* See if we can find the re-assembly entity */
1843 		if (control != NULL) {
1844 			/* We found something, does it belong? */
1845 			if (ordered && (mid != control->mid)) {
1846 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1847 		err_out:
1848 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1849 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1850 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1851 				*abort_flag = 1;
1852 				return (0);
1853 			}
1854 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1855 				/*
1856 				 * We can't have a switched order with an
1857 				 * unordered chunk
1858 				 */
1859 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1860 				    tsn);
1861 				goto err_out;
1862 			}
1863 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1864 				/*
1865 				 * We can't have a switched unordered with a
1866 				 * ordered chunk
1867 				 */
1868 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1869 				    tsn);
1870 				goto err_out;
1871 			}
1872 		}
1873 	} else {
1874 		/*
1875 		 * Its a complete segment. Lets validate we don't have a
1876 		 * re-assembly going on with the same Stream/Seq (for
1877 		 * ordered) or in the same Stream for unordered.
1878 		 */
1879 		if (control != NULL) {
1880 			if (ordered || asoc->idata_supported) {
1881 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1882 				    chk_flags, mid);
1883 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1884 				goto err_out;
1885 			} else {
1886 				if ((tsn == control->fsn_included + 1) &&
1887 				    (control->end_added == 0)) {
1888 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1889 					goto err_out;
1890 				} else {
1891 					control = NULL;
1892 				}
1893 			}
1894 		}
1895 	}
1896 	/* now do the tests */
1897 	if (((asoc->cnt_on_all_streams +
1898 	    asoc->cnt_on_reasm_queue +
1899 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1900 	    (((int)asoc->my_rwnd) <= 0)) {
1901 		/*
1902 		 * When we have NO room in the rwnd we check to make sure
1903 		 * the reader is doing its job...
1904 		 */
1905 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1906 			/* some to read, wake-up */
1907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1908 			struct socket *so;
1909 
1910 			so = SCTP_INP_SO(stcb->sctp_ep);
1911 			atomic_add_int(&stcb->asoc.refcnt, 1);
1912 			SCTP_TCB_UNLOCK(stcb);
1913 			SCTP_SOCKET_LOCK(so, 1);
1914 			SCTP_TCB_LOCK(stcb);
1915 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1916 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1917 				/* assoc was freed while we were unlocked */
1918 				SCTP_SOCKET_UNLOCK(so, 1);
1919 				return (0);
1920 			}
1921 #endif
1922 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1924 			SCTP_SOCKET_UNLOCK(so, 1);
1925 #endif
1926 		}
1927 		/* now is it in the mapping array of what we have accepted? */
1928 		if (chk_type == SCTP_DATA) {
1929 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1930 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1931 				/* Nope not in the valid range dump it */
1932 		dump_packet:
1933 				sctp_set_rwnd(stcb, asoc);
1934 				if ((asoc->cnt_on_all_streams +
1935 				    asoc->cnt_on_reasm_queue +
1936 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1937 					SCTP_STAT_INCR(sctps_datadropchklmt);
1938 				} else {
1939 					SCTP_STAT_INCR(sctps_datadroprwnd);
1940 				}
1941 				*break_flag = 1;
1942 				return (0);
1943 			}
1944 		} else {
1945 			if (control == NULL) {
1946 				goto dump_packet;
1947 			}
1948 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1949 				goto dump_packet;
1950 			}
1951 		}
1952 	}
1953 #ifdef SCTP_ASOCLOG_OF_TSNS
1954 	SCTP_TCB_LOCK_ASSERT(stcb);
1955 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1956 		asoc->tsn_in_at = 0;
1957 		asoc->tsn_in_wrapped = 1;
1958 	}
1959 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1960 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1961 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1962 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1963 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1964 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1965 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1966 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1967 	asoc->tsn_in_at++;
1968 #endif
1969 	/*
1970 	 * Before we continue lets validate that we are not being fooled by
1971 	 * an evil attacker. We can only have Nk chunks based on our TSN
1972 	 * spread allowed by the mapping array N * 8 bits, so there is no
1973 	 * way our stream sequence numbers could have wrapped. We of course
1974 	 * only validate the FIRST fragment so the bit must be set.
1975 	 */
1976 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1977 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1978 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1979 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1980 		/* The incoming sseq is behind where we last delivered? */
1981 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1982 		    mid, asoc->strmin[sid].last_mid_delivered);
1983 
1984 		if (asoc->idata_supported) {
1985 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1986 			    asoc->strmin[sid].last_mid_delivered,
1987 			    tsn,
1988 			    sid,
1989 			    mid);
1990 		} else {
1991 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1992 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
1993 			    tsn,
1994 			    sid,
1995 			    (uint16_t)mid);
1996 		}
1997 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1998 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1999 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2000 		*abort_flag = 1;
2001 		return (0);
2002 	}
2003 	if (chk_type == SCTP_IDATA) {
2004 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2005 	} else {
2006 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2007 	}
2008 	if (last_chunk == 0) {
2009 		if (chk_type == SCTP_IDATA) {
2010 			dmbuf = SCTP_M_COPYM(*m,
2011 			    (offset + sizeof(struct sctp_idata_chunk)),
2012 			    the_len, M_NOWAIT);
2013 		} else {
2014 			dmbuf = SCTP_M_COPYM(*m,
2015 			    (offset + sizeof(struct sctp_data_chunk)),
2016 			    the_len, M_NOWAIT);
2017 		}
2018 #ifdef SCTP_MBUF_LOGGING
2019 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2020 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2021 		}
2022 #endif
2023 	} else {
2024 		/* We can steal the last chunk */
2025 		int l_len;
2026 
2027 		dmbuf = *m;
2028 		/* lop off the top part */
2029 		if (chk_type == SCTP_IDATA) {
2030 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2031 		} else {
2032 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2033 		}
2034 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2035 			l_len = SCTP_BUF_LEN(dmbuf);
2036 		} else {
2037 			/*
2038 			 * need to count up the size hopefully does not hit
2039 			 * this to often :-0
2040 			 */
2041 			struct mbuf *lat;
2042 
2043 			l_len = 0;
2044 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2045 				l_len += SCTP_BUF_LEN(lat);
2046 			}
2047 		}
2048 		if (l_len > the_len) {
2049 			/* Trim the end round bytes off  too */
2050 			m_adj(dmbuf, -(l_len - the_len));
2051 		}
2052 	}
2053 	if (dmbuf == NULL) {
2054 		SCTP_STAT_INCR(sctps_nomem);
2055 		return (0);
2056 	}
2057 	/*
2058 	 * Now no matter what, we need a control, get one if we don't have
2059 	 * one (we may have gotten it above when we found the message was
2060 	 * fragmented
2061 	 */
2062 	if (control == NULL) {
2063 		sctp_alloc_a_readq(stcb, control);
2064 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2065 		    ppid,
2066 		    sid,
2067 		    chk_flags,
2068 		    NULL, fsn, mid);
2069 		if (control == NULL) {
2070 			SCTP_STAT_INCR(sctps_nomem);
2071 			return (0);
2072 		}
2073 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2074 			struct mbuf *mm;
2075 
2076 			control->data = dmbuf;
2077 			for (mm = control->data; mm; mm = mm->m_next) {
2078 				control->length += SCTP_BUF_LEN(mm);
2079 			}
2080 			control->tail_mbuf = NULL;
2081 			control->end_added = 1;
2082 			control->last_frag_seen = 1;
2083 			control->first_frag_seen = 1;
2084 			control->fsn_included = fsn;
2085 			control->top_fsn = fsn;
2086 		}
2087 		created_control = 1;
2088 	}
2089 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2090 	    chk_flags, ordered, mid, control);
2091 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2092 	    TAILQ_EMPTY(&asoc->resetHead) &&
2093 	    ((ordered == 0) ||
2094 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2095 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2096 		/* Candidate for express delivery */
2097 		/*
2098 		 * Its not fragmented, No PD-API is up, Nothing in the
2099 		 * delivery queue, Its un-ordered OR ordered and the next to
2100 		 * deliver AND nothing else is stuck on the stream queue,
2101 		 * And there is room for it in the socket buffer. Lets just
2102 		 * stuff it up the buffer....
2103 		 */
2104 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2105 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2106 			asoc->highest_tsn_inside_nr_map = tsn;
2107 		}
2108 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2109 		    control, mid);
2110 
2111 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2112 		    control, &stcb->sctp_socket->so_rcv,
2113 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2114 
2115 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2116 			/* for ordered, bump what we delivered */
2117 			asoc->strmin[sid].last_mid_delivered++;
2118 		}
2119 		SCTP_STAT_INCR(sctps_recvexpress);
2120 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2121 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2122 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2123 		}
2124 		control = NULL;
2125 		goto finish_express_del;
2126 	}
2127 	/* Now will we need a chunk too? */
2128 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2129 		sctp_alloc_a_chunk(stcb, chk);
2130 		if (chk == NULL) {
2131 			/* No memory so we drop the chunk */
2132 			SCTP_STAT_INCR(sctps_nomem);
2133 			if (last_chunk == 0) {
2134 				/* we copied it, free the copy */
2135 				sctp_m_freem(dmbuf);
2136 			}
2137 			return (0);
2138 		}
2139 		chk->rec.data.tsn = tsn;
2140 		chk->no_fr_allowed = 0;
2141 		chk->rec.data.fsn = fsn;
2142 		chk->rec.data.mid = mid;
2143 		chk->rec.data.sid = sid;
2144 		chk->rec.data.ppid = ppid;
2145 		chk->rec.data.context = stcb->asoc.context;
2146 		chk->rec.data.doing_fast_retransmit = 0;
2147 		chk->rec.data.rcv_flags = chk_flags;
2148 		chk->asoc = asoc;
2149 		chk->send_size = the_len;
2150 		chk->whoTo = net;
2151 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2152 		    chk,
2153 		    control, mid);
2154 		atomic_add_int(&net->ref_count, 1);
2155 		chk->data = dmbuf;
2156 	}
2157 	/* Set the appropriate TSN mark */
2158 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2159 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2160 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2161 			asoc->highest_tsn_inside_nr_map = tsn;
2162 		}
2163 	} else {
2164 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2165 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2166 			asoc->highest_tsn_inside_map = tsn;
2167 		}
2168 	}
2169 	/* Now is it complete (i.e. not fragmented)? */
2170 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2171 		/*
2172 		 * Special check for when streams are resetting. We could be
2173 		 * more smart about this and check the actual stream to see
2174 		 * if it is not being reset.. that way we would not create a
2175 		 * HOLB when amongst streams being reset and those not being
2176 		 * reset.
2177 		 *
2178 		 */
2179 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2180 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2181 			/*
2182 			 * yep its past where we need to reset... go ahead
2183 			 * and queue it.
2184 			 */
2185 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2186 				/* first one on */
2187 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2188 			} else {
2189 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2190 				unsigned char inserted = 0;
2191 
2192 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2193 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2194 
2195 						continue;
2196 					} else {
2197 						/* found it */
2198 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2199 						inserted = 1;
2200 						break;
2201 					}
2202 				}
2203 				if (inserted == 0) {
2204 					/*
2205 					 * must be put at end, use prevP
2206 					 * (all setup from loop) to setup
2207 					 * nextP.
2208 					 */
2209 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2210 				}
2211 			}
2212 			goto finish_express_del;
2213 		}
2214 		if (chk_flags & SCTP_DATA_UNORDERED) {
2215 			/* queue directly into socket buffer */
2216 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2217 			    control, mid);
2218 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2219 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2220 			    control,
2221 			    &stcb->sctp_socket->so_rcv, 1,
2222 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2223 
2224 		} else {
2225 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2226 			    mid);
2227 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2228 			if (*abort_flag) {
2229 				if (last_chunk) {
2230 					*m = NULL;
2231 				}
2232 				return (0);
2233 			}
2234 		}
2235 		goto finish_express_del;
2236 	}
2237 	/* If we reach here its a reassembly */
2238 	need_reasm_check = 1;
2239 	SCTPDBG(SCTP_DEBUG_XXX,
2240 	    "Queue data to stream for reasm control: %p MID: %u\n",
2241 	    control, mid);
2242 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2243 	if (*abort_flag) {
2244 		/*
2245 		 * the assoc is now gone and chk was put onto the reasm
2246 		 * queue, which has all been freed.
2247 		 */
2248 		if (last_chunk) {
2249 			*m = NULL;
2250 		}
2251 		return (0);
2252 	}
2253 finish_express_del:
2254 	/* Here we tidy up things */
2255 	if (tsn == (asoc->cumulative_tsn + 1)) {
2256 		/* Update cum-ack */
2257 		asoc->cumulative_tsn = tsn;
2258 	}
2259 	if (last_chunk) {
2260 		*m = NULL;
2261 	}
2262 	if (ordered) {
2263 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2264 	} else {
2265 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2266 	}
2267 	SCTP_STAT_INCR(sctps_recvdata);
2268 	/* Set it present please */
2269 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2270 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2271 	}
2272 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2273 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2274 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2275 	}
2276 	if (need_reasm_check) {
2277 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2278 		need_reasm_check = 0;
2279 	}
2280 	/* check the special flag for stream resets */
2281 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2282 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2283 		/*
2284 		 * we have finished working through the backlogged TSN's now
2285 		 * time to reset streams. 1: call reset function. 2: free
2286 		 * pending_reply space 3: distribute any chunks in
2287 		 * pending_reply_queue.
2288 		 */
2289 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2290 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2291 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2292 		SCTP_FREE(liste, SCTP_M_STRESET);
2293 		/* sa_ignore FREED_MEMORY */
2294 		liste = TAILQ_FIRST(&asoc->resetHead);
2295 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2296 			/* All can be removed */
2297 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2298 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2299 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2300 				if (*abort_flag) {
2301 					return (0);
2302 				}
2303 				if (need_reasm_check) {
2304 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2305 					need_reasm_check = 0;
2306 				}
2307 			}
2308 		} else {
2309 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2310 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2311 					break;
2312 				}
2313 				/*
2314 				 * if control->sinfo_tsn is <= liste->tsn we
2315 				 * can process it which is the NOT of
2316 				 * control->sinfo_tsn > liste->tsn
2317 				 */
2318 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2319 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2320 				if (*abort_flag) {
2321 					return (0);
2322 				}
2323 				if (need_reasm_check) {
2324 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2325 					need_reasm_check = 0;
2326 				}
2327 			}
2328 		}
2329 	}
2330 	return (1);
2331 }
2332 
2333 static const int8_t sctp_map_lookup_tab[256] = {
2334 	0, 1, 0, 2, 0, 1, 0, 3,
2335 	0, 1, 0, 2, 0, 1, 0, 4,
2336 	0, 1, 0, 2, 0, 1, 0, 3,
2337 	0, 1, 0, 2, 0, 1, 0, 5,
2338 	0, 1, 0, 2, 0, 1, 0, 3,
2339 	0, 1, 0, 2, 0, 1, 0, 4,
2340 	0, 1, 0, 2, 0, 1, 0, 3,
2341 	0, 1, 0, 2, 0, 1, 0, 6,
2342 	0, 1, 0, 2, 0, 1, 0, 3,
2343 	0, 1, 0, 2, 0, 1, 0, 4,
2344 	0, 1, 0, 2, 0, 1, 0, 3,
2345 	0, 1, 0, 2, 0, 1, 0, 5,
2346 	0, 1, 0, 2, 0, 1, 0, 3,
2347 	0, 1, 0, 2, 0, 1, 0, 4,
2348 	0, 1, 0, 2, 0, 1, 0, 3,
2349 	0, 1, 0, 2, 0, 1, 0, 7,
2350 	0, 1, 0, 2, 0, 1, 0, 3,
2351 	0, 1, 0, 2, 0, 1, 0, 4,
2352 	0, 1, 0, 2, 0, 1, 0, 3,
2353 	0, 1, 0, 2, 0, 1, 0, 5,
2354 	0, 1, 0, 2, 0, 1, 0, 3,
2355 	0, 1, 0, 2, 0, 1, 0, 4,
2356 	0, 1, 0, 2, 0, 1, 0, 3,
2357 	0, 1, 0, 2, 0, 1, 0, 6,
2358 	0, 1, 0, 2, 0, 1, 0, 3,
2359 	0, 1, 0, 2, 0, 1, 0, 4,
2360 	0, 1, 0, 2, 0, 1, 0, 3,
2361 	0, 1, 0, 2, 0, 1, 0, 5,
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 4,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 8
2366 };
2367 
2368 
2369 void
2370 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2371 {
2372 	/*
2373 	 * Now we also need to check the mapping array in a couple of ways.
2374 	 * 1) Did we move the cum-ack point?
2375 	 *
2376 	 * When you first glance at this you might think that all entries
2377 	 * that make up the position of the cum-ack would be in the
2378 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2379 	 * deliverable. Thats true with one exception, when its a fragmented
2380 	 * message we may not deliver the data until some threshold (or all
2381 	 * of it) is in place. So we must OR the nr_mapping_array and
2382 	 * mapping_array to get a true picture of the cum-ack.
2383 	 */
2384 	struct sctp_association *asoc;
2385 	int at;
2386 	uint8_t val;
2387 	int slide_from, slide_end, lgap, distance;
2388 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2389 
2390 	asoc = &stcb->asoc;
2391 
2392 	old_cumack = asoc->cumulative_tsn;
2393 	old_base = asoc->mapping_array_base_tsn;
2394 	old_highest = asoc->highest_tsn_inside_map;
2395 	/*
2396 	 * We could probably improve this a small bit by calculating the
2397 	 * offset of the current cum-ack as the starting point.
2398 	 */
2399 	at = 0;
2400 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2401 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2402 		if (val == 0xff) {
2403 			at += 8;
2404 		} else {
2405 			/* there is a 0 bit */
2406 			at += sctp_map_lookup_tab[val];
2407 			break;
2408 		}
2409 	}
2410 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2411 
2412 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2413 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2414 #ifdef INVARIANTS
2415 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2416 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2417 #else
2418 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2419 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2420 		sctp_print_mapping_array(asoc);
2421 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2422 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2423 		}
2424 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2425 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2426 #endif
2427 	}
2428 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2429 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2430 	} else {
2431 		highest_tsn = asoc->highest_tsn_inside_map;
2432 	}
2433 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2434 		/* The complete array was completed by a single FR */
2435 		/* highest becomes the cum-ack */
2436 		int clr;
2437 #ifdef INVARIANTS
2438 		unsigned int i;
2439 #endif
2440 
2441 		/* clear the array */
2442 		clr = ((at + 7) >> 3);
2443 		if (clr > asoc->mapping_array_size) {
2444 			clr = asoc->mapping_array_size;
2445 		}
2446 		memset(asoc->mapping_array, 0, clr);
2447 		memset(asoc->nr_mapping_array, 0, clr);
2448 #ifdef INVARIANTS
2449 		for (i = 0; i < asoc->mapping_array_size; i++) {
2450 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2451 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2452 				sctp_print_mapping_array(asoc);
2453 			}
2454 		}
2455 #endif
2456 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2457 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2458 	} else if (at >= 8) {
2459 		/* we can slide the mapping array down */
2460 		/* slide_from holds where we hit the first NON 0xff byte */
2461 
2462 		/*
2463 		 * now calculate the ceiling of the move using our highest
2464 		 * TSN value
2465 		 */
2466 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2467 		slide_end = (lgap >> 3);
2468 		if (slide_end < slide_from) {
2469 			sctp_print_mapping_array(asoc);
2470 #ifdef INVARIANTS
2471 			panic("impossible slide");
2472 #else
2473 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2474 			    lgap, slide_end, slide_from, at);
2475 			return;
2476 #endif
2477 		}
2478 		if (slide_end > asoc->mapping_array_size) {
2479 #ifdef INVARIANTS
2480 			panic("would overrun buffer");
2481 #else
2482 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2483 			    asoc->mapping_array_size, slide_end);
2484 			slide_end = asoc->mapping_array_size;
2485 #endif
2486 		}
2487 		distance = (slide_end - slide_from) + 1;
2488 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2489 			sctp_log_map(old_base, old_cumack, old_highest,
2490 			    SCTP_MAP_PREPARE_SLIDE);
2491 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2492 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2493 		}
2494 		if (distance + slide_from > asoc->mapping_array_size ||
2495 		    distance < 0) {
2496 			/*
2497 			 * Here we do NOT slide forward the array so that
2498 			 * hopefully when more data comes in to fill it up
2499 			 * we will be able to slide it forward. Really I
2500 			 * don't think this should happen :-0
2501 			 */
2502 
2503 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2504 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2505 				    (uint32_t)asoc->mapping_array_size,
2506 				    SCTP_MAP_SLIDE_NONE);
2507 			}
2508 		} else {
2509 			int ii;
2510 
2511 			for (ii = 0; ii < distance; ii++) {
2512 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2513 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2514 
2515 			}
2516 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2517 				asoc->mapping_array[ii] = 0;
2518 				asoc->nr_mapping_array[ii] = 0;
2519 			}
2520 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2521 				asoc->highest_tsn_inside_map += (slide_from << 3);
2522 			}
2523 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2524 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2525 			}
2526 			asoc->mapping_array_base_tsn += (slide_from << 3);
2527 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2528 				sctp_log_map(asoc->mapping_array_base_tsn,
2529 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2530 				    SCTP_MAP_SLIDE_RESULT);
2531 			}
2532 		}
2533 	}
2534 }
2535 
2536 void
2537 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2538 {
2539 	struct sctp_association *asoc;
2540 	uint32_t highest_tsn;
2541 	int is_a_gap;
2542 
2543 	sctp_slide_mapping_arrays(stcb);
2544 	asoc = &stcb->asoc;
2545 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2546 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2547 	} else {
2548 		highest_tsn = asoc->highest_tsn_inside_map;
2549 	}
2550 	/* Is there a gap now? */
2551 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2552 
2553 	/*
2554 	 * Now we need to see if we need to queue a sack or just start the
2555 	 * timer (if allowed).
2556 	 */
2557 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2558 		/*
2559 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2560 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2561 		 * SACK
2562 		 */
2563 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2564 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2565 			    stcb->sctp_ep, stcb, NULL,
2566 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2567 		}
2568 		sctp_send_shutdown(stcb,
2569 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2570 		if (is_a_gap) {
2571 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2572 		}
2573 	} else {
2574 		/*
2575 		 * CMT DAC algorithm: increase number of packets received
2576 		 * since last ack
2577 		 */
2578 		stcb->asoc.cmt_dac_pkts_rcvd++;
2579 
2580 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2581 							 * SACK */
2582 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2583 							 * longer is one */
2584 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2585 		    (is_a_gap) ||	/* is still a gap */
2586 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2587 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2588 		    ) {
2589 
2590 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2591 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2592 			    (stcb->asoc.send_sack == 0) &&
2593 			    (stcb->asoc.numduptsns == 0) &&
2594 			    (stcb->asoc.delayed_ack) &&
2595 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2596 
2597 				/*
2598 				 * CMT DAC algorithm: With CMT, delay acks
2599 				 * even in the face of
2600 				 *
2601 				 * reordering. Therefore, if acks that do
2602 				 * not have to be sent because of the above
2603 				 * reasons, will be delayed. That is, acks
2604 				 * that would have been sent due to gap
2605 				 * reports will be delayed with DAC. Start
2606 				 * the delayed ack timer.
2607 				 */
2608 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2609 				    stcb->sctp_ep, stcb, NULL);
2610 			} else {
2611 				/*
2612 				 * Ok we must build a SACK since the timer
2613 				 * is pending, we got our first packet OR
2614 				 * there are gaps or duplicates.
2615 				 */
2616 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2617 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2618 			}
2619 		} else {
2620 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2621 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2622 				    stcb->sctp_ep, stcb, NULL);
2623 			}
2624 		}
2625 	}
2626 }
2627 
2628 int
2629 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2630     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2631     struct sctp_nets *net, uint32_t *high_tsn)
2632 {
2633 	struct sctp_chunkhdr *ch, chunk_buf;
2634 	struct sctp_association *asoc;
2635 	int num_chunks = 0;	/* number of control chunks processed */
2636 	int stop_proc = 0;
2637 	int chk_length, break_flag, last_chunk;
2638 	int abort_flag = 0, was_a_gap;
2639 	struct mbuf *m;
2640 	uint32_t highest_tsn;
2641 
2642 	/* set the rwnd */
2643 	sctp_set_rwnd(stcb, &stcb->asoc);
2644 
2645 	m = *mm;
2646 	SCTP_TCB_LOCK_ASSERT(stcb);
2647 	asoc = &stcb->asoc;
2648 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2649 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2650 	} else {
2651 		highest_tsn = asoc->highest_tsn_inside_map;
2652 	}
2653 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2654 	/*
2655 	 * setup where we got the last DATA packet from for any SACK that
2656 	 * may need to go out. Don't bump the net. This is done ONLY when a
2657 	 * chunk is assigned.
2658 	 */
2659 	asoc->last_data_chunk_from = net;
2660 
2661 	/*-
2662 	 * Now before we proceed we must figure out if this is a wasted
2663 	 * cluster... i.e. it is a small packet sent in and yet the driver
2664 	 * underneath allocated a full cluster for it. If so we must copy it
2665 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2666 	 * with cluster starvation. Note for __Panda__ we don't do this
2667 	 * since it has clusters all the way down to 64 bytes.
2668 	 */
2669 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2670 		/* we only handle mbufs that are singletons.. not chains */
2671 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2672 		if (m) {
2673 			/* ok lets see if we can copy the data up */
2674 			caddr_t *from, *to;
2675 
2676 			/* get the pointers and copy */
2677 			to = mtod(m, caddr_t *);
2678 			from = mtod((*mm), caddr_t *);
2679 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2680 			/* copy the length and free up the old */
2681 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2682 			sctp_m_freem(*mm);
2683 			/* success, back copy */
2684 			*mm = m;
2685 		} else {
2686 			/* We are in trouble in the mbuf world .. yikes */
2687 			m = *mm;
2688 		}
2689 	}
2690 	/* get pointer to the first chunk header */
2691 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2692 	    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2693 	if (ch == NULL) {
2694 		return (1);
2695 	}
2696 	/*
2697 	 * process all DATA chunks...
2698 	 */
2699 	*high_tsn = asoc->cumulative_tsn;
2700 	break_flag = 0;
2701 	asoc->data_pkts_seen++;
2702 	while (stop_proc == 0) {
2703 		/* validate chunk length */
2704 		chk_length = ntohs(ch->chunk_length);
2705 		if (length - *offset < chk_length) {
2706 			/* all done, mutulated chunk */
2707 			stop_proc = 1;
2708 			continue;
2709 		}
2710 		if ((asoc->idata_supported == 1) &&
2711 		    (ch->chunk_type == SCTP_DATA)) {
2712 			struct mbuf *op_err;
2713 			char msg[SCTP_DIAG_INFO_LEN];
2714 
2715 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2716 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2717 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2718 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2719 			return (2);
2720 		}
2721 		if ((asoc->idata_supported == 0) &&
2722 		    (ch->chunk_type == SCTP_IDATA)) {
2723 			struct mbuf *op_err;
2724 			char msg[SCTP_DIAG_INFO_LEN];
2725 
2726 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2727 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2728 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2729 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2730 			return (2);
2731 		}
2732 		if ((ch->chunk_type == SCTP_DATA) ||
2733 		    (ch->chunk_type == SCTP_IDATA)) {
2734 			int clen;
2735 
2736 			if (ch->chunk_type == SCTP_DATA) {
2737 				clen = sizeof(struct sctp_data_chunk);
2738 			} else {
2739 				clen = sizeof(struct sctp_idata_chunk);
2740 			}
2741 			if (chk_length < clen) {
2742 				/*
2743 				 * Need to send an abort since we had a
2744 				 * invalid data chunk.
2745 				 */
2746 				struct mbuf *op_err;
2747 				char msg[SCTP_DIAG_INFO_LEN];
2748 
2749 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2750 				    chk_length);
2751 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2752 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2753 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2754 				return (2);
2755 			}
2756 #ifdef SCTP_AUDITING_ENABLED
2757 			sctp_audit_log(0xB1, 0);
2758 #endif
2759 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2760 				last_chunk = 1;
2761 			} else {
2762 				last_chunk = 0;
2763 			}
2764 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2765 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2766 			    last_chunk, ch->chunk_type)) {
2767 				num_chunks++;
2768 			}
2769 			if (abort_flag)
2770 				return (2);
2771 
2772 			if (break_flag) {
2773 				/*
2774 				 * Set because of out of rwnd space and no
2775 				 * drop rep space left.
2776 				 */
2777 				stop_proc = 1;
2778 				continue;
2779 			}
2780 		} else {
2781 			/* not a data chunk in the data region */
2782 			switch (ch->chunk_type) {
2783 			case SCTP_INITIATION:
2784 			case SCTP_INITIATION_ACK:
2785 			case SCTP_SELECTIVE_ACK:
2786 			case SCTP_NR_SELECTIVE_ACK:
2787 			case SCTP_HEARTBEAT_REQUEST:
2788 			case SCTP_HEARTBEAT_ACK:
2789 			case SCTP_ABORT_ASSOCIATION:
2790 			case SCTP_SHUTDOWN:
2791 			case SCTP_SHUTDOWN_ACK:
2792 			case SCTP_OPERATION_ERROR:
2793 			case SCTP_COOKIE_ECHO:
2794 			case SCTP_COOKIE_ACK:
2795 			case SCTP_ECN_ECHO:
2796 			case SCTP_ECN_CWR:
2797 			case SCTP_SHUTDOWN_COMPLETE:
2798 			case SCTP_AUTHENTICATION:
2799 			case SCTP_ASCONF_ACK:
2800 			case SCTP_PACKET_DROPPED:
2801 			case SCTP_STREAM_RESET:
2802 			case SCTP_FORWARD_CUM_TSN:
2803 			case SCTP_ASCONF:
2804 				{
2805 					/*
2806 					 * Now, what do we do with KNOWN
2807 					 * chunks that are NOT in the right
2808 					 * place?
2809 					 *
2810 					 * For now, I do nothing but ignore
2811 					 * them. We may later want to add
2812 					 * sysctl stuff to switch out and do
2813 					 * either an ABORT() or possibly
2814 					 * process them.
2815 					 */
2816 					struct mbuf *op_err;
2817 					char msg[SCTP_DIAG_INFO_LEN];
2818 
2819 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2820 					    ch->chunk_type);
2821 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2822 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2823 					return (2);
2824 				}
2825 			default:
2826 				/* unknown chunk type, use bit rules */
2827 				if (ch->chunk_type & 0x40) {
2828 					/* Add a error report to the queue */
2829 					struct mbuf *op_err;
2830 					struct sctp_gen_error_cause *cause;
2831 
2832 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2833 					    0, M_NOWAIT, 1, MT_DATA);
2834 					if (op_err != NULL) {
2835 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2836 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2837 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2838 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2839 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2840 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2841 							sctp_queue_op_err(stcb, op_err);
2842 						} else {
2843 							sctp_m_freem(op_err);
2844 						}
2845 					}
2846 				}
2847 				if ((ch->chunk_type & 0x80) == 0) {
2848 					/* discard the rest of this packet */
2849 					stop_proc = 1;
2850 				}	/* else skip this bad chunk and
2851 					 * continue... */
2852 				break;
2853 			}	/* switch of chunk type */
2854 		}
2855 		*offset += SCTP_SIZE32(chk_length);
2856 		if ((*offset >= length) || stop_proc) {
2857 			/* no more data left in the mbuf chain */
2858 			stop_proc = 1;
2859 			continue;
2860 		}
2861 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2862 		    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2863 		if (ch == NULL) {
2864 			*offset = length;
2865 			stop_proc = 1;
2866 			continue;
2867 		}
2868 	}
2869 	if (break_flag) {
2870 		/*
2871 		 * we need to report rwnd overrun drops.
2872 		 */
2873 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2874 	}
2875 	if (num_chunks) {
2876 		/*
2877 		 * Did we get data, if so update the time for auto-close and
2878 		 * give peer credit for being alive.
2879 		 */
2880 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2881 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2882 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2883 			    stcb->asoc.overall_error_count,
2884 			    0,
2885 			    SCTP_FROM_SCTP_INDATA,
2886 			    __LINE__);
2887 		}
2888 		stcb->asoc.overall_error_count = 0;
2889 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2890 	}
2891 	/* now service all of the reassm queue if needed */
2892 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2893 		/* Assure that we ack right away */
2894 		stcb->asoc.send_sack = 1;
2895 	}
2896 	/* Start a sack timer or QUEUE a SACK for sending */
2897 	sctp_sack_check(stcb, was_a_gap);
2898 	return (0);
2899 }
2900 
2901 static int
2902 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2903     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2904     int *num_frs,
2905     uint32_t *biggest_newly_acked_tsn,
2906     uint32_t *this_sack_lowest_newack,
2907     int *rto_ok)
2908 {
2909 	struct sctp_tmit_chunk *tp1;
2910 	unsigned int theTSN;
2911 	int j, wake_him = 0, circled = 0;
2912 
2913 	/* Recover the tp1 we last saw */
2914 	tp1 = *p_tp1;
2915 	if (tp1 == NULL) {
2916 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2917 	}
2918 	for (j = frag_strt; j <= frag_end; j++) {
2919 		theTSN = j + last_tsn;
2920 		while (tp1) {
2921 			if (tp1->rec.data.doing_fast_retransmit)
2922 				(*num_frs) += 1;
2923 
2924 			/*-
2925 			 * CMT: CUCv2 algorithm. For each TSN being
2926 			 * processed from the sent queue, track the
2927 			 * next expected pseudo-cumack, or
2928 			 * rtx_pseudo_cumack, if required. Separate
2929 			 * cumack trackers for first transmissions,
2930 			 * and retransmissions.
2931 			 */
2932 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2933 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2934 			    (tp1->snd_count == 1)) {
2935 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2936 				tp1->whoTo->find_pseudo_cumack = 0;
2937 			}
2938 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2939 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2940 			    (tp1->snd_count > 1)) {
2941 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2942 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2943 			}
2944 			if (tp1->rec.data.tsn == theTSN) {
2945 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2946 					/*-
2947 					 * must be held until
2948 					 * cum-ack passes
2949 					 */
2950 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2951 						/*-
2952 						 * If it is less than RESEND, it is
2953 						 * now no-longer in flight.
2954 						 * Higher values may already be set
2955 						 * via previous Gap Ack Blocks...
2956 						 * i.e. ACKED or RESEND.
2957 						 */
2958 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2959 						    *biggest_newly_acked_tsn)) {
2960 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2961 						}
2962 						/*-
2963 						 * CMT: SFR algo (and HTNA) - set
2964 						 * saw_newack to 1 for dest being
2965 						 * newly acked. update
2966 						 * this_sack_highest_newack if
2967 						 * appropriate.
2968 						 */
2969 						if (tp1->rec.data.chunk_was_revoked == 0)
2970 							tp1->whoTo->saw_newack = 1;
2971 
2972 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2973 						    tp1->whoTo->this_sack_highest_newack)) {
2974 							tp1->whoTo->this_sack_highest_newack =
2975 							    tp1->rec.data.tsn;
2976 						}
2977 						/*-
2978 						 * CMT DAC algo: also update
2979 						 * this_sack_lowest_newack
2980 						 */
2981 						if (*this_sack_lowest_newack == 0) {
2982 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2983 								sctp_log_sack(*this_sack_lowest_newack,
2984 								    last_tsn,
2985 								    tp1->rec.data.tsn,
2986 								    0,
2987 								    0,
2988 								    SCTP_LOG_TSN_ACKED);
2989 							}
2990 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2991 						}
2992 						/*-
2993 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2994 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2995 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2996 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2997 						 * Separate pseudo_cumack trackers for first transmissions and
2998 						 * retransmissions.
2999 						 */
3000 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3001 							if (tp1->rec.data.chunk_was_revoked == 0) {
3002 								tp1->whoTo->new_pseudo_cumack = 1;
3003 							}
3004 							tp1->whoTo->find_pseudo_cumack = 1;
3005 						}
3006 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3007 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3008 						}
3009 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3010 							if (tp1->rec.data.chunk_was_revoked == 0) {
3011 								tp1->whoTo->new_pseudo_cumack = 1;
3012 							}
3013 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3014 						}
3015 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3016 							sctp_log_sack(*biggest_newly_acked_tsn,
3017 							    last_tsn,
3018 							    tp1->rec.data.tsn,
3019 							    frag_strt,
3020 							    frag_end,
3021 							    SCTP_LOG_TSN_ACKED);
3022 						}
3023 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3024 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3025 							    tp1->whoTo->flight_size,
3026 							    tp1->book_size,
3027 							    (uint32_t)(uintptr_t)tp1->whoTo,
3028 							    tp1->rec.data.tsn);
3029 						}
3030 						sctp_flight_size_decrease(tp1);
3031 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3032 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3033 							    tp1);
3034 						}
3035 						sctp_total_flight_decrease(stcb, tp1);
3036 
3037 						tp1->whoTo->net_ack += tp1->send_size;
3038 						if (tp1->snd_count < 2) {
3039 							/*-
3040 							 * True non-retransmited chunk
3041 							 */
3042 							tp1->whoTo->net_ack2 += tp1->send_size;
3043 
3044 							/*-
3045 							 * update RTO too ?
3046 							 */
3047 							if (tp1->do_rtt) {
3048 								if (*rto_ok) {
3049 									tp1->whoTo->RTO =
3050 									    sctp_calculate_rto(stcb,
3051 									    &stcb->asoc,
3052 									    tp1->whoTo,
3053 									    &tp1->sent_rcv_time,
3054 									    sctp_align_safe_nocopy,
3055 									    SCTP_RTT_FROM_DATA);
3056 									*rto_ok = 0;
3057 								}
3058 								if (tp1->whoTo->rto_needed == 0) {
3059 									tp1->whoTo->rto_needed = 1;
3060 								}
3061 								tp1->do_rtt = 0;
3062 							}
3063 						}
3064 					}
3065 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3066 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3067 						    stcb->asoc.this_sack_highest_gap)) {
3068 							stcb->asoc.this_sack_highest_gap =
3069 							    tp1->rec.data.tsn;
3070 						}
3071 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3072 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3073 #ifdef SCTP_AUDITING_ENABLED
3074 							sctp_audit_log(0xB2,
3075 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3076 #endif
3077 						}
3078 					}
3079 					/*-
3080 					 * All chunks NOT UNSENT fall through here and are marked
3081 					 * (leave PR-SCTP ones that are to skip alone though)
3082 					 */
3083 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3084 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3085 						tp1->sent = SCTP_DATAGRAM_MARKED;
3086 					}
3087 					if (tp1->rec.data.chunk_was_revoked) {
3088 						/* deflate the cwnd */
3089 						tp1->whoTo->cwnd -= tp1->book_size;
3090 						tp1->rec.data.chunk_was_revoked = 0;
3091 					}
3092 					/* NR Sack code here */
3093 					if (nr_sacking &&
3094 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3095 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3096 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3097 #ifdef INVARIANTS
3098 						} else {
3099 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3100 #endif
3101 						}
3102 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3103 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3104 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3105 							stcb->asoc.trigger_reset = 1;
3106 						}
3107 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3108 						if (tp1->data) {
3109 							/*
3110 							 * sa_ignore
3111 							 * NO_NULL_CHK
3112 							 */
3113 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3114 							sctp_m_freem(tp1->data);
3115 							tp1->data = NULL;
3116 						}
3117 						wake_him++;
3118 					}
3119 				}
3120 				break;
3121 			}	/* if (tp1->tsn == theTSN) */
3122 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3123 				break;
3124 			}
3125 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3126 			if ((tp1 == NULL) && (circled == 0)) {
3127 				circled++;
3128 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3129 			}
3130 		}		/* end while (tp1) */
3131 		if (tp1 == NULL) {
3132 			circled = 0;
3133 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3134 		}
3135 		/* In case the fragments were not in order we must reset */
3136 	}			/* end for (j = fragStart */
3137 	*p_tp1 = tp1;
3138 	return (wake_him);	/* Return value only used for nr-sack */
3139 }
3140 
3141 
3142 static int
3143 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3144     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3145     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3146     int num_seg, int num_nr_seg, int *rto_ok)
3147 {
3148 	struct sctp_gap_ack_block *frag, block;
3149 	struct sctp_tmit_chunk *tp1;
3150 	int i;
3151 	int num_frs = 0;
3152 	int chunk_freed;
3153 	int non_revocable;
3154 	uint16_t frag_strt, frag_end, prev_frag_end;
3155 
3156 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3157 	prev_frag_end = 0;
3158 	chunk_freed = 0;
3159 
3160 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3161 		if (i == num_seg) {
3162 			prev_frag_end = 0;
3163 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3164 		}
3165 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3166 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3167 		*offset += sizeof(block);
3168 		if (frag == NULL) {
3169 			return (chunk_freed);
3170 		}
3171 		frag_strt = ntohs(frag->start);
3172 		frag_end = ntohs(frag->end);
3173 
3174 		if (frag_strt > frag_end) {
3175 			/* This gap report is malformed, skip it. */
3176 			continue;
3177 		}
3178 		if (frag_strt <= prev_frag_end) {
3179 			/* This gap report is not in order, so restart. */
3180 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3181 		}
3182 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3183 			*biggest_tsn_acked = last_tsn + frag_end;
3184 		}
3185 		if (i < num_seg) {
3186 			non_revocable = 0;
3187 		} else {
3188 			non_revocable = 1;
3189 		}
3190 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3191 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3192 		    this_sack_lowest_newack, rto_ok)) {
3193 			chunk_freed = 1;
3194 		}
3195 		prev_frag_end = frag_end;
3196 	}
3197 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3198 		if (num_frs)
3199 			sctp_log_fr(*biggest_tsn_acked,
3200 			    *biggest_newly_acked_tsn,
3201 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3202 	}
3203 	return (chunk_freed);
3204 }
3205 
3206 static void
3207 sctp_check_for_revoked(struct sctp_tcb *stcb,
3208     struct sctp_association *asoc, uint32_t cumack,
3209     uint32_t biggest_tsn_acked)
3210 {
3211 	struct sctp_tmit_chunk *tp1;
3212 
3213 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3214 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3215 			/*
3216 			 * ok this guy is either ACK or MARKED. If it is
3217 			 * ACKED it has been previously acked but not this
3218 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3219 			 * again.
3220 			 */
3221 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3222 				break;
3223 			}
3224 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3225 				/* it has been revoked */
3226 				tp1->sent = SCTP_DATAGRAM_SENT;
3227 				tp1->rec.data.chunk_was_revoked = 1;
3228 				/*
3229 				 * We must add this stuff back in to assure
3230 				 * timers and such get started.
3231 				 */
3232 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3233 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3234 					    tp1->whoTo->flight_size,
3235 					    tp1->book_size,
3236 					    (uint32_t)(uintptr_t)tp1->whoTo,
3237 					    tp1->rec.data.tsn);
3238 				}
3239 				sctp_flight_size_increase(tp1);
3240 				sctp_total_flight_increase(stcb, tp1);
3241 				/*
3242 				 * We inflate the cwnd to compensate for our
3243 				 * artificial inflation of the flight_size.
3244 				 */
3245 				tp1->whoTo->cwnd += tp1->book_size;
3246 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3247 					sctp_log_sack(asoc->last_acked_seq,
3248 					    cumack,
3249 					    tp1->rec.data.tsn,
3250 					    0,
3251 					    0,
3252 					    SCTP_LOG_TSN_REVOKED);
3253 				}
3254 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3255 				/* it has been re-acked in this SACK */
3256 				tp1->sent = SCTP_DATAGRAM_ACKED;
3257 			}
3258 		}
3259 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3260 			break;
3261 	}
3262 }
3263 
3264 
3265 static void
3266 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3267     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3268 {
3269 	struct sctp_tmit_chunk *tp1;
3270 	int strike_flag = 0;
3271 	struct timeval now;
3272 	int tot_retrans = 0;
3273 	uint32_t sending_seq;
3274 	struct sctp_nets *net;
3275 	int num_dests_sacked = 0;
3276 
3277 	/*
3278 	 * select the sending_seq, this is either the next thing ready to be
3279 	 * sent but not transmitted, OR, the next seq we assign.
3280 	 */
3281 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3282 	if (tp1 == NULL) {
3283 		sending_seq = asoc->sending_seq;
3284 	} else {
3285 		sending_seq = tp1->rec.data.tsn;
3286 	}
3287 
3288 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3289 	if ((asoc->sctp_cmt_on_off > 0) &&
3290 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3291 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3292 			if (net->saw_newack)
3293 				num_dests_sacked++;
3294 		}
3295 	}
3296 	if (stcb->asoc.prsctp_supported) {
3297 		(void)SCTP_GETTIME_TIMEVAL(&now);
3298 	}
3299 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3300 		strike_flag = 0;
3301 		if (tp1->no_fr_allowed) {
3302 			/* this one had a timeout or something */
3303 			continue;
3304 		}
3305 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3306 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3307 				sctp_log_fr(biggest_tsn_newly_acked,
3308 				    tp1->rec.data.tsn,
3309 				    tp1->sent,
3310 				    SCTP_FR_LOG_CHECK_STRIKE);
3311 		}
3312 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3313 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3314 			/* done */
3315 			break;
3316 		}
3317 		if (stcb->asoc.prsctp_supported) {
3318 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3319 				/* Is it expired? */
3320 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3321 					/* Yes so drop it */
3322 					if (tp1->data != NULL) {
3323 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3324 						    SCTP_SO_NOT_LOCKED);
3325 					}
3326 					continue;
3327 				}
3328 			}
3329 		}
3330 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3331 			/* we are beyond the tsn in the sack  */
3332 			break;
3333 		}
3334 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3335 			/* either a RESEND, ACKED, or MARKED */
3336 			/* skip */
3337 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3338 				/* Continue strikin FWD-TSN chunks */
3339 				tp1->rec.data.fwd_tsn_cnt++;
3340 			}
3341 			continue;
3342 		}
3343 		/*
3344 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3345 		 */
3346 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3347 			/*
3348 			 * No new acks were receieved for data sent to this
3349 			 * dest. Therefore, according to the SFR algo for
3350 			 * CMT, no data sent to this dest can be marked for
3351 			 * FR using this SACK.
3352 			 */
3353 			continue;
3354 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3355 		    tp1->whoTo->this_sack_highest_newack)) {
3356 			/*
3357 			 * CMT: New acks were receieved for data sent to
3358 			 * this dest. But no new acks were seen for data
3359 			 * sent after tp1. Therefore, according to the SFR
3360 			 * algo for CMT, tp1 cannot be marked for FR using
3361 			 * this SACK. This step covers part of the DAC algo
3362 			 * and the HTNA algo as well.
3363 			 */
3364 			continue;
3365 		}
3366 		/*
3367 		 * Here we check to see if we were have already done a FR
3368 		 * and if so we see if the biggest TSN we saw in the sack is
3369 		 * smaller than the recovery point. If so we don't strike
3370 		 * the tsn... otherwise we CAN strike the TSN.
3371 		 */
3372 		/*
3373 		 * @@@ JRI: Check for CMT if (accum_moved &&
3374 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3375 		 * 0)) {
3376 		 */
3377 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3378 			/*
3379 			 * Strike the TSN if in fast-recovery and cum-ack
3380 			 * moved.
3381 			 */
3382 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3383 				sctp_log_fr(biggest_tsn_newly_acked,
3384 				    tp1->rec.data.tsn,
3385 				    tp1->sent,
3386 				    SCTP_FR_LOG_STRIKE_CHUNK);
3387 			}
3388 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3389 				tp1->sent++;
3390 			}
3391 			if ((asoc->sctp_cmt_on_off > 0) &&
3392 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3393 				/*
3394 				 * CMT DAC algorithm: If SACK flag is set to
3395 				 * 0, then lowest_newack test will not pass
3396 				 * because it would have been set to the
3397 				 * cumack earlier. If not already to be
3398 				 * rtx'd, If not a mixed sack and if tp1 is
3399 				 * not between two sacked TSNs, then mark by
3400 				 * one more. NOTE that we are marking by one
3401 				 * additional time since the SACK DAC flag
3402 				 * indicates that two packets have been
3403 				 * received after this missing TSN.
3404 				 */
3405 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3406 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3407 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3408 						sctp_log_fr(16 + num_dests_sacked,
3409 						    tp1->rec.data.tsn,
3410 						    tp1->sent,
3411 						    SCTP_FR_LOG_STRIKE_CHUNK);
3412 					}
3413 					tp1->sent++;
3414 				}
3415 			}
3416 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3417 		    (asoc->sctp_cmt_on_off == 0)) {
3418 			/*
3419 			 * For those that have done a FR we must take
3420 			 * special consideration if we strike. I.e the
3421 			 * biggest_newly_acked must be higher than the
3422 			 * sending_seq at the time we did the FR.
3423 			 */
3424 			if (
3425 #ifdef SCTP_FR_TO_ALTERNATE
3426 			/*
3427 			 * If FR's go to new networks, then we must only do
3428 			 * this for singly homed asoc's. However if the FR's
3429 			 * go to the same network (Armando's work) then its
3430 			 * ok to FR multiple times.
3431 			 */
3432 			    (asoc->numnets < 2)
3433 #else
3434 			    (1)
3435 #endif
3436 			    ) {
3437 
3438 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3439 				    tp1->rec.data.fast_retran_tsn)) {
3440 					/*
3441 					 * Strike the TSN, since this ack is
3442 					 * beyond where things were when we
3443 					 * did a FR.
3444 					 */
3445 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3446 						sctp_log_fr(biggest_tsn_newly_acked,
3447 						    tp1->rec.data.tsn,
3448 						    tp1->sent,
3449 						    SCTP_FR_LOG_STRIKE_CHUNK);
3450 					}
3451 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3452 						tp1->sent++;
3453 					}
3454 					strike_flag = 1;
3455 					if ((asoc->sctp_cmt_on_off > 0) &&
3456 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3457 						/*
3458 						 * CMT DAC algorithm: If
3459 						 * SACK flag is set to 0,
3460 						 * then lowest_newack test
3461 						 * will not pass because it
3462 						 * would have been set to
3463 						 * the cumack earlier. If
3464 						 * not already to be rtx'd,
3465 						 * If not a mixed sack and
3466 						 * if tp1 is not between two
3467 						 * sacked TSNs, then mark by
3468 						 * one more. NOTE that we
3469 						 * are marking by one
3470 						 * additional time since the
3471 						 * SACK DAC flag indicates
3472 						 * that two packets have
3473 						 * been received after this
3474 						 * missing TSN.
3475 						 */
3476 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3477 						    (num_dests_sacked == 1) &&
3478 						    SCTP_TSN_GT(this_sack_lowest_newack,
3479 						    tp1->rec.data.tsn)) {
3480 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 								sctp_log_fr(32 + num_dests_sacked,
3482 								    tp1->rec.data.tsn,
3483 								    tp1->sent,
3484 								    SCTP_FR_LOG_STRIKE_CHUNK);
3485 							}
3486 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3487 								tp1->sent++;
3488 							}
3489 						}
3490 					}
3491 				}
3492 			}
3493 			/*
3494 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3495 			 * algo covers HTNA.
3496 			 */
3497 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3498 		    biggest_tsn_newly_acked)) {
3499 			/*
3500 			 * We don't strike these: This is the  HTNA
3501 			 * algorithm i.e. we don't strike If our TSN is
3502 			 * larger than the Highest TSN Newly Acked.
3503 			 */
3504 			;
3505 		} else {
3506 			/* Strike the TSN */
3507 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3508 				sctp_log_fr(biggest_tsn_newly_acked,
3509 				    tp1->rec.data.tsn,
3510 				    tp1->sent,
3511 				    SCTP_FR_LOG_STRIKE_CHUNK);
3512 			}
3513 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3514 				tp1->sent++;
3515 			}
3516 			if ((asoc->sctp_cmt_on_off > 0) &&
3517 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3518 				/*
3519 				 * CMT DAC algorithm: If SACK flag is set to
3520 				 * 0, then lowest_newack test will not pass
3521 				 * because it would have been set to the
3522 				 * cumack earlier. If not already to be
3523 				 * rtx'd, If not a mixed sack and if tp1 is
3524 				 * not between two sacked TSNs, then mark by
3525 				 * one more. NOTE that we are marking by one
3526 				 * additional time since the SACK DAC flag
3527 				 * indicates that two packets have been
3528 				 * received after this missing TSN.
3529 				 */
3530 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3531 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3532 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3533 						sctp_log_fr(48 + num_dests_sacked,
3534 						    tp1->rec.data.tsn,
3535 						    tp1->sent,
3536 						    SCTP_FR_LOG_STRIKE_CHUNK);
3537 					}
3538 					tp1->sent++;
3539 				}
3540 			}
3541 		}
3542 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3543 			struct sctp_nets *alt;
3544 
3545 			/* fix counts and things */
3546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3547 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3548 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3549 				    tp1->book_size,
3550 				    (uint32_t)(uintptr_t)tp1->whoTo,
3551 				    tp1->rec.data.tsn);
3552 			}
3553 			if (tp1->whoTo) {
3554 				tp1->whoTo->net_ack++;
3555 				sctp_flight_size_decrease(tp1);
3556 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3557 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3558 					    tp1);
3559 				}
3560 			}
3561 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3562 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3563 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3564 			}
3565 			/* add back to the rwnd */
3566 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3567 
3568 			/* remove from the total flight */
3569 			sctp_total_flight_decrease(stcb, tp1);
3570 
3571 			if ((stcb->asoc.prsctp_supported) &&
3572 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3573 				/*
3574 				 * Has it been retransmitted tv_sec times? -
3575 				 * we store the retran count there.
3576 				 */
3577 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3578 					/* Yes, so drop it */
3579 					if (tp1->data != NULL) {
3580 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3581 						    SCTP_SO_NOT_LOCKED);
3582 					}
3583 					/* Make sure to flag we had a FR */
3584 					tp1->whoTo->net_ack++;
3585 					continue;
3586 				}
3587 			}
3588 			/*
3589 			 * SCTP_PRINTF("OK, we are now ready to FR this
3590 			 * guy\n");
3591 			 */
3592 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3593 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3594 				    0, SCTP_FR_MARKED);
3595 			}
3596 			if (strike_flag) {
3597 				/* This is a subsequent FR */
3598 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3599 			}
3600 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3601 			if (asoc->sctp_cmt_on_off > 0) {
3602 				/*
3603 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3604 				 * If CMT is being used, then pick dest with
3605 				 * largest ssthresh for any retransmission.
3606 				 */
3607 				tp1->no_fr_allowed = 1;
3608 				alt = tp1->whoTo;
3609 				/* sa_ignore NO_NULL_CHK */
3610 				if (asoc->sctp_cmt_pf > 0) {
3611 					/*
3612 					 * JRS 5/18/07 - If CMT PF is on,
3613 					 * use the PF version of
3614 					 * find_alt_net()
3615 					 */
3616 					alt = sctp_find_alternate_net(stcb, alt, 2);
3617 				} else {
3618 					/*
3619 					 * JRS 5/18/07 - If only CMT is on,
3620 					 * use the CMT version of
3621 					 * find_alt_net()
3622 					 */
3623 					/* sa_ignore NO_NULL_CHK */
3624 					alt = sctp_find_alternate_net(stcb, alt, 1);
3625 				}
3626 				if (alt == NULL) {
3627 					alt = tp1->whoTo;
3628 				}
3629 				/*
3630 				 * CUCv2: If a different dest is picked for
3631 				 * the retransmission, then new
3632 				 * (rtx-)pseudo_cumack needs to be tracked
3633 				 * for orig dest. Let CUCv2 track new (rtx-)
3634 				 * pseudo-cumack always.
3635 				 */
3636 				if (tp1->whoTo) {
3637 					tp1->whoTo->find_pseudo_cumack = 1;
3638 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3639 				}
3640 			} else {/* CMT is OFF */
3641 
3642 #ifdef SCTP_FR_TO_ALTERNATE
3643 				/* Can we find an alternate? */
3644 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3645 #else
3646 				/*
3647 				 * default behavior is to NOT retransmit
3648 				 * FR's to an alternate. Armando Caro's
3649 				 * paper details why.
3650 				 */
3651 				alt = tp1->whoTo;
3652 #endif
3653 			}
3654 
3655 			tp1->rec.data.doing_fast_retransmit = 1;
3656 			tot_retrans++;
3657 			/* mark the sending seq for possible subsequent FR's */
3658 			/*
3659 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3660 			 * (uint32_t)tpi->rec.data.tsn);
3661 			 */
3662 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3663 				/*
3664 				 * If the queue of send is empty then its
3665 				 * the next sequence number that will be
3666 				 * assigned so we subtract one from this to
3667 				 * get the one we last sent.
3668 				 */
3669 				tp1->rec.data.fast_retran_tsn = sending_seq;
3670 			} else {
3671 				/*
3672 				 * If there are chunks on the send queue
3673 				 * (unsent data that has made it from the
3674 				 * stream queues but not out the door, we
3675 				 * take the first one (which will have the
3676 				 * lowest TSN) and subtract one to get the
3677 				 * one we last sent.
3678 				 */
3679 				struct sctp_tmit_chunk *ttt;
3680 
3681 				ttt = TAILQ_FIRST(&asoc->send_queue);
3682 				tp1->rec.data.fast_retran_tsn =
3683 				    ttt->rec.data.tsn;
3684 			}
3685 
3686 			if (tp1->do_rtt) {
3687 				/*
3688 				 * this guy had a RTO calculation pending on
3689 				 * it, cancel it
3690 				 */
3691 				if ((tp1->whoTo != NULL) &&
3692 				    (tp1->whoTo->rto_needed == 0)) {
3693 					tp1->whoTo->rto_needed = 1;
3694 				}
3695 				tp1->do_rtt = 0;
3696 			}
3697 			if (alt != tp1->whoTo) {
3698 				/* yes, there is an alternate. */
3699 				sctp_free_remote_addr(tp1->whoTo);
3700 				/* sa_ignore FREED_MEMORY */
3701 				tp1->whoTo = alt;
3702 				atomic_add_int(&alt->ref_count, 1);
3703 			}
3704 		}
3705 	}
3706 }
3707 
3708 struct sctp_tmit_chunk *
3709 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3710     struct sctp_association *asoc)
3711 {
3712 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3713 	struct timeval now;
3714 	int now_filled = 0;
3715 
3716 	if (asoc->prsctp_supported == 0) {
3717 		return (NULL);
3718 	}
3719 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3720 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3721 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3722 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3723 			/* no chance to advance, out of here */
3724 			break;
3725 		}
3726 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3727 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3728 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3729 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3730 				    asoc->advanced_peer_ack_point,
3731 				    tp1->rec.data.tsn, 0, 0);
3732 			}
3733 		}
3734 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3735 			/*
3736 			 * We can't fwd-tsn past any that are reliable aka
3737 			 * retransmitted until the asoc fails.
3738 			 */
3739 			break;
3740 		}
3741 		if (!now_filled) {
3742 			(void)SCTP_GETTIME_TIMEVAL(&now);
3743 			now_filled = 1;
3744 		}
3745 		/*
3746 		 * now we got a chunk which is marked for another
3747 		 * retransmission to a PR-stream but has run out its chances
3748 		 * already maybe OR has been marked to skip now. Can we skip
3749 		 * it if its a resend?
3750 		 */
3751 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3752 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3753 			/*
3754 			 * Now is this one marked for resend and its time is
3755 			 * now up?
3756 			 */
3757 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3758 				/* Yes so drop it */
3759 				if (tp1->data) {
3760 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3761 					    1, SCTP_SO_NOT_LOCKED);
3762 				}
3763 			} else {
3764 				/*
3765 				 * No, we are done when hit one for resend
3766 				 * whos time as not expired.
3767 				 */
3768 				break;
3769 			}
3770 		}
3771 		/*
3772 		 * Ok now if this chunk is marked to drop it we can clean up
3773 		 * the chunk, advance our peer ack point and we can check
3774 		 * the next chunk.
3775 		 */
3776 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3778 			/* advance PeerAckPoint goes forward */
3779 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3780 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3781 				a_adv = tp1;
3782 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3783 				/* No update but we do save the chk */
3784 				a_adv = tp1;
3785 			}
3786 		} else {
3787 			/*
3788 			 * If it is still in RESEND we can advance no
3789 			 * further
3790 			 */
3791 			break;
3792 		}
3793 	}
3794 	return (a_adv);
3795 }
3796 
3797 static int
3798 sctp_fs_audit(struct sctp_association *asoc)
3799 {
3800 	struct sctp_tmit_chunk *chk;
3801 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3802 	int ret;
3803 #ifndef INVARIANTS
3804 	int entry_flight, entry_cnt;
3805 #endif
3806 
3807 	ret = 0;
3808 #ifndef INVARIANTS
3809 	entry_flight = asoc->total_flight;
3810 	entry_cnt = asoc->total_flight_count;
3811 #endif
3812 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3813 		return (0);
3814 
3815 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3816 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3817 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3818 			    chk->rec.data.tsn,
3819 			    chk->send_size,
3820 			    chk->snd_count);
3821 			inflight++;
3822 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3823 			resend++;
3824 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3825 			inbetween++;
3826 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3827 			above++;
3828 		} else {
3829 			acked++;
3830 		}
3831 	}
3832 
3833 	if ((inflight > 0) || (inbetween > 0)) {
3834 #ifdef INVARIANTS
3835 		panic("Flight size-express incorrect? \n");
3836 #else
3837 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3838 		    entry_flight, entry_cnt);
3839 
3840 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3841 		    inflight, inbetween, resend, above, acked);
3842 		ret = 1;
3843 #endif
3844 	}
3845 	return (ret);
3846 }
3847 
3848 
3849 static void
3850 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3851     struct sctp_association *asoc,
3852     struct sctp_tmit_chunk *tp1)
3853 {
3854 	tp1->window_probe = 0;
3855 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3856 		/* TSN's skipped we do NOT move back. */
3857 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3858 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3859 		    tp1->book_size,
3860 		    (uint32_t)(uintptr_t)tp1->whoTo,
3861 		    tp1->rec.data.tsn);
3862 		return;
3863 	}
3864 	/* First setup this by shrinking flight */
3865 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3866 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3867 		    tp1);
3868 	}
3869 	sctp_flight_size_decrease(tp1);
3870 	sctp_total_flight_decrease(stcb, tp1);
3871 	/* Now mark for resend */
3872 	tp1->sent = SCTP_DATAGRAM_RESEND;
3873 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3874 
3875 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3876 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3877 		    tp1->whoTo->flight_size,
3878 		    tp1->book_size,
3879 		    (uint32_t)(uintptr_t)tp1->whoTo,
3880 		    tp1->rec.data.tsn);
3881 	}
3882 }
3883 
3884 void
3885 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3886     uint32_t rwnd, int *abort_now, int ecne_seen)
3887 {
3888 	struct sctp_nets *net;
3889 	struct sctp_association *asoc;
3890 	struct sctp_tmit_chunk *tp1, *tp2;
3891 	uint32_t old_rwnd;
3892 	int win_probe_recovery = 0;
3893 	int win_probe_recovered = 0;
3894 	int j, done_once = 0;
3895 	int rto_ok = 1;
3896 	uint32_t send_s;
3897 
3898 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3899 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3900 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3901 	}
3902 	SCTP_TCB_LOCK_ASSERT(stcb);
3903 #ifdef SCTP_ASOCLOG_OF_TSNS
3904 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3905 	stcb->asoc.cumack_log_at++;
3906 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3907 		stcb->asoc.cumack_log_at = 0;
3908 	}
3909 #endif
3910 	asoc = &stcb->asoc;
3911 	old_rwnd = asoc->peers_rwnd;
3912 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3913 		/* old ack */
3914 		return;
3915 	} else if (asoc->last_acked_seq == cumack) {
3916 		/* Window update sack */
3917 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3918 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3919 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3920 			/* SWS sender side engages */
3921 			asoc->peers_rwnd = 0;
3922 		}
3923 		if (asoc->peers_rwnd > old_rwnd) {
3924 			goto again;
3925 		}
3926 		return;
3927 	}
3928 	/* First setup for CC stuff */
3929 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3930 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3931 			/* Drag along the window_tsn for cwr's */
3932 			net->cwr_window_tsn = cumack;
3933 		}
3934 		net->prev_cwnd = net->cwnd;
3935 		net->net_ack = 0;
3936 		net->net_ack2 = 0;
3937 
3938 		/*
3939 		 * CMT: Reset CUC and Fast recovery algo variables before
3940 		 * SACK processing
3941 		 */
3942 		net->new_pseudo_cumack = 0;
3943 		net->will_exit_fast_recovery = 0;
3944 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3945 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3946 		}
3947 	}
3948 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3949 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3950 		    sctpchunk_listhead);
3951 		send_s = tp1->rec.data.tsn + 1;
3952 	} else {
3953 		send_s = asoc->sending_seq;
3954 	}
3955 	if (SCTP_TSN_GE(cumack, send_s)) {
3956 		struct mbuf *op_err;
3957 		char msg[SCTP_DIAG_INFO_LEN];
3958 
3959 		*abort_now = 1;
3960 		/* XXX */
3961 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3962 		    cumack, send_s);
3963 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3964 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3965 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3966 		return;
3967 	}
3968 	asoc->this_sack_highest_gap = cumack;
3969 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3970 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3971 		    stcb->asoc.overall_error_count,
3972 		    0,
3973 		    SCTP_FROM_SCTP_INDATA,
3974 		    __LINE__);
3975 	}
3976 	stcb->asoc.overall_error_count = 0;
3977 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3978 		/* process the new consecutive TSN first */
3979 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3980 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3981 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3982 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3983 				}
3984 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3985 					/*
3986 					 * If it is less than ACKED, it is
3987 					 * now no-longer in flight. Higher
3988 					 * values may occur during marking
3989 					 */
3990 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3991 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3992 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3993 							    tp1->whoTo->flight_size,
3994 							    tp1->book_size,
3995 							    (uint32_t)(uintptr_t)tp1->whoTo,
3996 							    tp1->rec.data.tsn);
3997 						}
3998 						sctp_flight_size_decrease(tp1);
3999 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4000 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4001 							    tp1);
4002 						}
4003 						/* sa_ignore NO_NULL_CHK */
4004 						sctp_total_flight_decrease(stcb, tp1);
4005 					}
4006 					tp1->whoTo->net_ack += tp1->send_size;
4007 					if (tp1->snd_count < 2) {
4008 						/*
4009 						 * True non-retransmited
4010 						 * chunk
4011 						 */
4012 						tp1->whoTo->net_ack2 +=
4013 						    tp1->send_size;
4014 
4015 						/* update RTO too? */
4016 						if (tp1->do_rtt) {
4017 							if (rto_ok) {
4018 								tp1->whoTo->RTO =
4019 								/*
4020 								 * sa_ignore
4021 								 * NO_NULL_CHK
4022 								 */
4023 								    sctp_calculate_rto(stcb,
4024 								    asoc, tp1->whoTo,
4025 								    &tp1->sent_rcv_time,
4026 								    sctp_align_safe_nocopy,
4027 								    SCTP_RTT_FROM_DATA);
4028 								rto_ok = 0;
4029 							}
4030 							if (tp1->whoTo->rto_needed == 0) {
4031 								tp1->whoTo->rto_needed = 1;
4032 							}
4033 							tp1->do_rtt = 0;
4034 						}
4035 					}
4036 					/*
4037 					 * CMT: CUCv2 algorithm. From the
4038 					 * cumack'd TSNs, for each TSN being
4039 					 * acked for the first time, set the
4040 					 * following variables for the
4041 					 * corresp destination.
4042 					 * new_pseudo_cumack will trigger a
4043 					 * cwnd update.
4044 					 * find_(rtx_)pseudo_cumack will
4045 					 * trigger search for the next
4046 					 * expected (rtx-)pseudo-cumack.
4047 					 */
4048 					tp1->whoTo->new_pseudo_cumack = 1;
4049 					tp1->whoTo->find_pseudo_cumack = 1;
4050 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4051 
4052 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4053 						/* sa_ignore NO_NULL_CHK */
4054 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4055 					}
4056 				}
4057 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4058 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4059 				}
4060 				if (tp1->rec.data.chunk_was_revoked) {
4061 					/* deflate the cwnd */
4062 					tp1->whoTo->cwnd -= tp1->book_size;
4063 					tp1->rec.data.chunk_was_revoked = 0;
4064 				}
4065 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4066 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4067 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4068 #ifdef INVARIANTS
4069 					} else {
4070 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4071 #endif
4072 					}
4073 				}
4074 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4075 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4076 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4077 					asoc->trigger_reset = 1;
4078 				}
4079 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4080 				if (tp1->data) {
4081 					/* sa_ignore NO_NULL_CHK */
4082 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4083 					sctp_m_freem(tp1->data);
4084 					tp1->data = NULL;
4085 				}
4086 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4087 					sctp_log_sack(asoc->last_acked_seq,
4088 					    cumack,
4089 					    tp1->rec.data.tsn,
4090 					    0,
4091 					    0,
4092 					    SCTP_LOG_FREE_SENT);
4093 				}
4094 				asoc->sent_queue_cnt--;
4095 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4096 			} else {
4097 				break;
4098 			}
4099 		}
4100 
4101 	}
4102 	/* sa_ignore NO_NULL_CHK */
4103 	if (stcb->sctp_socket) {
4104 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4105 		struct socket *so;
4106 
4107 #endif
4108 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4109 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4110 			/* sa_ignore NO_NULL_CHK */
4111 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4112 		}
4113 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4114 		so = SCTP_INP_SO(stcb->sctp_ep);
4115 		atomic_add_int(&stcb->asoc.refcnt, 1);
4116 		SCTP_TCB_UNLOCK(stcb);
4117 		SCTP_SOCKET_LOCK(so, 1);
4118 		SCTP_TCB_LOCK(stcb);
4119 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4120 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4121 			/* assoc was freed while we were unlocked */
4122 			SCTP_SOCKET_UNLOCK(so, 1);
4123 			return;
4124 		}
4125 #endif
4126 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 		SCTP_SOCKET_UNLOCK(so, 1);
4129 #endif
4130 	} else {
4131 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4132 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4133 		}
4134 	}
4135 
4136 	/* JRS - Use the congestion control given in the CC module */
4137 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4138 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4139 			if (net->net_ack2 > 0) {
4140 				/*
4141 				 * Karn's rule applies to clearing error
4142 				 * count, this is optional.
4143 				 */
4144 				net->error_count = 0;
4145 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4146 					/* addr came good */
4147 					net->dest_state |= SCTP_ADDR_REACHABLE;
4148 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4149 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4150 				}
4151 				if (net == stcb->asoc.primary_destination) {
4152 					if (stcb->asoc.alternate) {
4153 						/*
4154 						 * release the alternate,
4155 						 * primary is good
4156 						 */
4157 						sctp_free_remote_addr(stcb->asoc.alternate);
4158 						stcb->asoc.alternate = NULL;
4159 					}
4160 				}
4161 				if (net->dest_state & SCTP_ADDR_PF) {
4162 					net->dest_state &= ~SCTP_ADDR_PF;
4163 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4164 					    stcb->sctp_ep, stcb, net,
4165 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4166 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4167 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4168 					/* Done with this net */
4169 					net->net_ack = 0;
4170 				}
4171 				/* restore any doubled timers */
4172 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4173 				if (net->RTO < stcb->asoc.minrto) {
4174 					net->RTO = stcb->asoc.minrto;
4175 				}
4176 				if (net->RTO > stcb->asoc.maxrto) {
4177 					net->RTO = stcb->asoc.maxrto;
4178 				}
4179 			}
4180 		}
4181 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4182 	}
4183 	asoc->last_acked_seq = cumack;
4184 
4185 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4186 		/* nothing left in-flight */
4187 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4188 			net->flight_size = 0;
4189 			net->partial_bytes_acked = 0;
4190 		}
4191 		asoc->total_flight = 0;
4192 		asoc->total_flight_count = 0;
4193 	}
4194 	/* RWND update */
4195 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4196 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4197 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4198 		/* SWS sender side engages */
4199 		asoc->peers_rwnd = 0;
4200 	}
4201 	if (asoc->peers_rwnd > old_rwnd) {
4202 		win_probe_recovery = 1;
4203 	}
4204 	/* Now assure a timer where data is queued at */
4205 again:
4206 	j = 0;
4207 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4208 		int to_ticks;
4209 
4210 		if (win_probe_recovery && (net->window_probe)) {
4211 			win_probe_recovered = 1;
4212 			/*
4213 			 * Find first chunk that was used with window probe
4214 			 * and clear the sent
4215 			 */
4216 			/* sa_ignore FREED_MEMORY */
4217 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4218 				if (tp1->window_probe) {
4219 					/* move back to data send queue */
4220 					sctp_window_probe_recovery(stcb, asoc, tp1);
4221 					break;
4222 				}
4223 			}
4224 		}
4225 		if (net->RTO == 0) {
4226 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4227 		} else {
4228 			to_ticks = MSEC_TO_TICKS(net->RTO);
4229 		}
4230 		if (net->flight_size) {
4231 			j++;
4232 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4233 			    sctp_timeout_handler, &net->rxt_timer);
4234 			if (net->window_probe) {
4235 				net->window_probe = 0;
4236 			}
4237 		} else {
4238 			if (net->window_probe) {
4239 				/*
4240 				 * In window probes we must assure a timer
4241 				 * is still running there
4242 				 */
4243 				net->window_probe = 0;
4244 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4245 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4246 					    sctp_timeout_handler, &net->rxt_timer);
4247 				}
4248 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4249 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4250 				    stcb, net,
4251 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4252 			}
4253 		}
4254 	}
4255 	if ((j == 0) &&
4256 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4257 	    (asoc->sent_queue_retran_cnt == 0) &&
4258 	    (win_probe_recovered == 0) &&
4259 	    (done_once == 0)) {
4260 		/*
4261 		 * huh, this should not happen unless all packets are
4262 		 * PR-SCTP and marked to skip of course.
4263 		 */
4264 		if (sctp_fs_audit(asoc)) {
4265 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4266 				net->flight_size = 0;
4267 			}
4268 			asoc->total_flight = 0;
4269 			asoc->total_flight_count = 0;
4270 			asoc->sent_queue_retran_cnt = 0;
4271 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4272 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4273 					sctp_flight_size_increase(tp1);
4274 					sctp_total_flight_increase(stcb, tp1);
4275 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4276 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4277 				}
4278 			}
4279 		}
4280 		done_once = 1;
4281 		goto again;
4282 	}
4283 	/**********************************/
4284 	/* Now what about shutdown issues */
4285 	/**********************************/
4286 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4287 		/* nothing left on sendqueue.. consider done */
4288 		/* clean up */
4289 		if ((asoc->stream_queue_cnt == 1) &&
4290 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4291 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4292 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4293 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4294 		}
4295 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4296 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4297 		    (asoc->stream_queue_cnt == 1) &&
4298 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4299 			struct mbuf *op_err;
4300 
4301 			*abort_now = 1;
4302 			/* XXX */
4303 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4304 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4305 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4306 			return;
4307 		}
4308 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4309 		    (asoc->stream_queue_cnt == 0)) {
4310 			struct sctp_nets *netp;
4311 
4312 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4313 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4314 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4315 			}
4316 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4317 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4318 			sctp_stop_timers_for_shutdown(stcb);
4319 			if (asoc->alternate) {
4320 				netp = asoc->alternate;
4321 			} else {
4322 				netp = asoc->primary_destination;
4323 			}
4324 			sctp_send_shutdown(stcb, netp);
4325 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4326 			    stcb->sctp_ep, stcb, netp);
4327 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4328 			    stcb->sctp_ep, stcb, netp);
4329 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4330 		    (asoc->stream_queue_cnt == 0)) {
4331 			struct sctp_nets *netp;
4332 
4333 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4334 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4335 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4336 			sctp_stop_timers_for_shutdown(stcb);
4337 			if (asoc->alternate) {
4338 				netp = asoc->alternate;
4339 			} else {
4340 				netp = asoc->primary_destination;
4341 			}
4342 			sctp_send_shutdown_ack(stcb, netp);
4343 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4344 			    stcb->sctp_ep, stcb, netp);
4345 		}
4346 	}
4347 	/*********************************************/
4348 	/* Here we perform PR-SCTP procedures        */
4349 	/* (section 4.2)                             */
4350 	/*********************************************/
4351 	/* C1. update advancedPeerAckPoint */
4352 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4353 		asoc->advanced_peer_ack_point = cumack;
4354 	}
4355 	/* PR-Sctp issues need to be addressed too */
4356 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4357 		struct sctp_tmit_chunk *lchk;
4358 		uint32_t old_adv_peer_ack_point;
4359 
4360 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4361 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4362 		/* C3. See if we need to send a Fwd-TSN */
4363 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4364 			/*
4365 			 * ISSUE with ECN, see FWD-TSN processing.
4366 			 */
4367 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4368 				send_forward_tsn(stcb, asoc);
4369 			} else if (lchk) {
4370 				/* try to FR fwd-tsn's that get lost too */
4371 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4372 					send_forward_tsn(stcb, asoc);
4373 				}
4374 			}
4375 		}
4376 		if (lchk) {
4377 			/* Assure a timer is up */
4378 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4379 			    stcb->sctp_ep, stcb, lchk->whoTo);
4380 		}
4381 	}
4382 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4383 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4384 		    rwnd,
4385 		    stcb->asoc.peers_rwnd,
4386 		    stcb->asoc.total_flight,
4387 		    stcb->asoc.total_output_queue_size);
4388 	}
4389 }
4390 
4391 void
4392 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4393     struct sctp_tcb *stcb,
4394     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4395     int *abort_now, uint8_t flags,
4396     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4397 {
4398 	struct sctp_association *asoc;
4399 	struct sctp_tmit_chunk *tp1, *tp2;
4400 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4401 	uint16_t wake_him = 0;
4402 	uint32_t send_s = 0;
4403 	long j;
4404 	int accum_moved = 0;
4405 	int will_exit_fast_recovery = 0;
4406 	uint32_t a_rwnd, old_rwnd;
4407 	int win_probe_recovery = 0;
4408 	int win_probe_recovered = 0;
4409 	struct sctp_nets *net = NULL;
4410 	int done_once;
4411 	int rto_ok = 1;
4412 	uint8_t reneged_all = 0;
4413 	uint8_t cmt_dac_flag;
4414 
4415 	/*
4416 	 * we take any chance we can to service our queues since we cannot
4417 	 * get awoken when the socket is read from :<
4418 	 */
4419 	/*
4420 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4421 	 * old sack, if so discard. 2) If there is nothing left in the send
4422 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4423 	 * too, update any rwnd change and verify no timers are running.
4424 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4425 	 * moved process these first and note that it moved. 4) Process any
4426 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4427 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4428 	 * sync up flightsizes and things, stop all timers and also check
4429 	 * for shutdown_pending state. If so then go ahead and send off the
4430 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4431 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4432 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4433 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4434 	 * if in shutdown_recv state.
4435 	 */
4436 	SCTP_TCB_LOCK_ASSERT(stcb);
4437 	/* CMT DAC algo */
4438 	this_sack_lowest_newack = 0;
4439 	SCTP_STAT_INCR(sctps_slowpath_sack);
4440 	last_tsn = cum_ack;
4441 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4442 #ifdef SCTP_ASOCLOG_OF_TSNS
4443 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4444 	stcb->asoc.cumack_log_at++;
4445 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4446 		stcb->asoc.cumack_log_at = 0;
4447 	}
4448 #endif
4449 	a_rwnd = rwnd;
4450 
4451 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4452 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4453 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4454 	}
4455 	old_rwnd = stcb->asoc.peers_rwnd;
4456 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4457 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4458 		    stcb->asoc.overall_error_count,
4459 		    0,
4460 		    SCTP_FROM_SCTP_INDATA,
4461 		    __LINE__);
4462 	}
4463 	stcb->asoc.overall_error_count = 0;
4464 	asoc = &stcb->asoc;
4465 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4466 		sctp_log_sack(asoc->last_acked_seq,
4467 		    cum_ack,
4468 		    0,
4469 		    num_seg,
4470 		    num_dup,
4471 		    SCTP_LOG_NEW_SACK);
4472 	}
4473 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4474 		uint16_t i;
4475 		uint32_t *dupdata, dblock;
4476 
4477 		for (i = 0; i < num_dup; i++) {
4478 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4479 			    sizeof(uint32_t), (uint8_t *)&dblock);
4480 			if (dupdata == NULL) {
4481 				break;
4482 			}
4483 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4484 		}
4485 	}
4486 	/* reality check */
4487 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4488 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4489 		    sctpchunk_listhead);
4490 		send_s = tp1->rec.data.tsn + 1;
4491 	} else {
4492 		tp1 = NULL;
4493 		send_s = asoc->sending_seq;
4494 	}
4495 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4496 		struct mbuf *op_err;
4497 		char msg[SCTP_DIAG_INFO_LEN];
4498 
4499 		/*
4500 		 * no way, we have not even sent this TSN out yet. Peer is
4501 		 * hopelessly messed up with us.
4502 		 */
4503 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4504 		    cum_ack, send_s);
4505 		if (tp1) {
4506 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4507 			    tp1->rec.data.tsn, (void *)tp1);
4508 		}
4509 hopeless_peer:
4510 		*abort_now = 1;
4511 		/* XXX */
4512 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4513 		    cum_ack, send_s);
4514 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4515 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4516 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4517 		return;
4518 	}
4519 	/**********************/
4520 	/* 1) check the range */
4521 	/**********************/
4522 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4523 		/* acking something behind */
4524 		return;
4525 	}
4526 	/* update the Rwnd of the peer */
4527 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4528 	    TAILQ_EMPTY(&asoc->send_queue) &&
4529 	    (asoc->stream_queue_cnt == 0)) {
4530 		/* nothing left on send/sent and strmq */
4531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4532 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4533 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4534 		}
4535 		asoc->peers_rwnd = a_rwnd;
4536 		if (asoc->sent_queue_retran_cnt) {
4537 			asoc->sent_queue_retran_cnt = 0;
4538 		}
4539 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4540 			/* SWS sender side engages */
4541 			asoc->peers_rwnd = 0;
4542 		}
4543 		/* stop any timers */
4544 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4545 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4546 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4547 			net->partial_bytes_acked = 0;
4548 			net->flight_size = 0;
4549 		}
4550 		asoc->total_flight = 0;
4551 		asoc->total_flight_count = 0;
4552 		return;
4553 	}
4554 	/*
4555 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4556 	 * things. The total byte count acked is tracked in netAckSz AND
4557 	 * netAck2 is used to track the total bytes acked that are un-
4558 	 * amibguious and were never retransmitted. We track these on a per
4559 	 * destination address basis.
4560 	 */
4561 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4562 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4563 			/* Drag along the window_tsn for cwr's */
4564 			net->cwr_window_tsn = cum_ack;
4565 		}
4566 		net->prev_cwnd = net->cwnd;
4567 		net->net_ack = 0;
4568 		net->net_ack2 = 0;
4569 
4570 		/*
4571 		 * CMT: Reset CUC and Fast recovery algo variables before
4572 		 * SACK processing
4573 		 */
4574 		net->new_pseudo_cumack = 0;
4575 		net->will_exit_fast_recovery = 0;
4576 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4577 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4578 		}
4579 	}
4580 	/* process the new consecutive TSN first */
4581 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4582 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4583 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4584 				accum_moved = 1;
4585 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4586 					/*
4587 					 * If it is less than ACKED, it is
4588 					 * now no-longer in flight. Higher
4589 					 * values may occur during marking
4590 					 */
4591 					if ((tp1->whoTo->dest_state &
4592 					    SCTP_ADDR_UNCONFIRMED) &&
4593 					    (tp1->snd_count < 2)) {
4594 						/*
4595 						 * If there was no retran
4596 						 * and the address is
4597 						 * un-confirmed and we sent
4598 						 * there and are now
4599 						 * sacked.. its confirmed,
4600 						 * mark it so.
4601 						 */
4602 						tp1->whoTo->dest_state &=
4603 						    ~SCTP_ADDR_UNCONFIRMED;
4604 					}
4605 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4606 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4607 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4608 							    tp1->whoTo->flight_size,
4609 							    tp1->book_size,
4610 							    (uint32_t)(uintptr_t)tp1->whoTo,
4611 							    tp1->rec.data.tsn);
4612 						}
4613 						sctp_flight_size_decrease(tp1);
4614 						sctp_total_flight_decrease(stcb, tp1);
4615 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4616 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4617 							    tp1);
4618 						}
4619 					}
4620 					tp1->whoTo->net_ack += tp1->send_size;
4621 
4622 					/* CMT SFR and DAC algos */
4623 					this_sack_lowest_newack = tp1->rec.data.tsn;
4624 					tp1->whoTo->saw_newack = 1;
4625 
4626 					if (tp1->snd_count < 2) {
4627 						/*
4628 						 * True non-retransmited
4629 						 * chunk
4630 						 */
4631 						tp1->whoTo->net_ack2 +=
4632 						    tp1->send_size;
4633 
4634 						/* update RTO too? */
4635 						if (tp1->do_rtt) {
4636 							if (rto_ok) {
4637 								tp1->whoTo->RTO =
4638 								    sctp_calculate_rto(stcb,
4639 								    asoc, tp1->whoTo,
4640 								    &tp1->sent_rcv_time,
4641 								    sctp_align_safe_nocopy,
4642 								    SCTP_RTT_FROM_DATA);
4643 								rto_ok = 0;
4644 							}
4645 							if (tp1->whoTo->rto_needed == 0) {
4646 								tp1->whoTo->rto_needed = 1;
4647 							}
4648 							tp1->do_rtt = 0;
4649 						}
4650 					}
4651 					/*
4652 					 * CMT: CUCv2 algorithm. From the
4653 					 * cumack'd TSNs, for each TSN being
4654 					 * acked for the first time, set the
4655 					 * following variables for the
4656 					 * corresp destination.
4657 					 * new_pseudo_cumack will trigger a
4658 					 * cwnd update.
4659 					 * find_(rtx_)pseudo_cumack will
4660 					 * trigger search for the next
4661 					 * expected (rtx-)pseudo-cumack.
4662 					 */
4663 					tp1->whoTo->new_pseudo_cumack = 1;
4664 					tp1->whoTo->find_pseudo_cumack = 1;
4665 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4666 
4667 
4668 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4669 						sctp_log_sack(asoc->last_acked_seq,
4670 						    cum_ack,
4671 						    tp1->rec.data.tsn,
4672 						    0,
4673 						    0,
4674 						    SCTP_LOG_TSN_ACKED);
4675 					}
4676 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4677 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4678 					}
4679 				}
4680 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4681 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4682 #ifdef SCTP_AUDITING_ENABLED
4683 					sctp_audit_log(0xB3,
4684 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4685 #endif
4686 				}
4687 				if (tp1->rec.data.chunk_was_revoked) {
4688 					/* deflate the cwnd */
4689 					tp1->whoTo->cwnd -= tp1->book_size;
4690 					tp1->rec.data.chunk_was_revoked = 0;
4691 				}
4692 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4693 					tp1->sent = SCTP_DATAGRAM_ACKED;
4694 				}
4695 			}
4696 		} else {
4697 			break;
4698 		}
4699 	}
4700 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4701 	/* always set this up to cum-ack */
4702 	asoc->this_sack_highest_gap = last_tsn;
4703 
4704 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4705 
4706 		/*
4707 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4708 		 * to be greater than the cumack. Also reset saw_newack to 0
4709 		 * for all dests.
4710 		 */
4711 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4712 			net->saw_newack = 0;
4713 			net->this_sack_highest_newack = last_tsn;
4714 		}
4715 
4716 		/*
4717 		 * thisSackHighestGap will increase while handling NEW
4718 		 * segments this_sack_highest_newack will increase while
4719 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4720 		 * used for CMT DAC algo. saw_newack will also change.
4721 		 */
4722 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4723 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4724 		    num_seg, num_nr_seg, &rto_ok)) {
4725 			wake_him++;
4726 		}
4727 		/*
4728 		 * validate the biggest_tsn_acked in the gap acks if strict
4729 		 * adherence is wanted.
4730 		 */
4731 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4732 			/*
4733 			 * peer is either confused or we are under attack.
4734 			 * We must abort.
4735 			 */
4736 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4737 			    biggest_tsn_acked, send_s);
4738 			goto hopeless_peer;
4739 		}
4740 	}
4741 	/*******************************************/
4742 	/* cancel ALL T3-send timer if accum moved */
4743 	/*******************************************/
4744 	if (asoc->sctp_cmt_on_off > 0) {
4745 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4746 			if (net->new_pseudo_cumack)
4747 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4748 				    stcb, net,
4749 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4750 
4751 		}
4752 	} else {
4753 		if (accum_moved) {
4754 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4755 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4756 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4757 			}
4758 		}
4759 	}
4760 	/********************************************/
4761 	/* drop the acked chunks from the sentqueue */
4762 	/********************************************/
4763 	asoc->last_acked_seq = cum_ack;
4764 
4765 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4766 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4767 			break;
4768 		}
4769 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4770 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4771 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4772 #ifdef INVARIANTS
4773 			} else {
4774 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4775 #endif
4776 			}
4777 		}
4778 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4779 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4780 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4781 			asoc->trigger_reset = 1;
4782 		}
4783 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4784 		if (PR_SCTP_ENABLED(tp1->flags)) {
4785 			if (asoc->pr_sctp_cnt != 0)
4786 				asoc->pr_sctp_cnt--;
4787 		}
4788 		asoc->sent_queue_cnt--;
4789 		if (tp1->data) {
4790 			/* sa_ignore NO_NULL_CHK */
4791 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4792 			sctp_m_freem(tp1->data);
4793 			tp1->data = NULL;
4794 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4795 				asoc->sent_queue_cnt_removeable--;
4796 			}
4797 		}
4798 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4799 			sctp_log_sack(asoc->last_acked_seq,
4800 			    cum_ack,
4801 			    tp1->rec.data.tsn,
4802 			    0,
4803 			    0,
4804 			    SCTP_LOG_FREE_SENT);
4805 		}
4806 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4807 		wake_him++;
4808 	}
4809 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4810 #ifdef INVARIANTS
4811 		panic("Warning flight size is positive and should be 0");
4812 #else
4813 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4814 		    asoc->total_flight);
4815 #endif
4816 		asoc->total_flight = 0;
4817 	}
4818 	/* sa_ignore NO_NULL_CHK */
4819 	if ((wake_him) && (stcb->sctp_socket)) {
4820 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4821 		struct socket *so;
4822 
4823 #endif
4824 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4825 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4826 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4827 		}
4828 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4829 		so = SCTP_INP_SO(stcb->sctp_ep);
4830 		atomic_add_int(&stcb->asoc.refcnt, 1);
4831 		SCTP_TCB_UNLOCK(stcb);
4832 		SCTP_SOCKET_LOCK(so, 1);
4833 		SCTP_TCB_LOCK(stcb);
4834 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4835 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4836 			/* assoc was freed while we were unlocked */
4837 			SCTP_SOCKET_UNLOCK(so, 1);
4838 			return;
4839 		}
4840 #endif
4841 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4842 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4843 		SCTP_SOCKET_UNLOCK(so, 1);
4844 #endif
4845 	} else {
4846 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4847 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4848 		}
4849 	}
4850 
4851 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4852 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4853 			/* Setup so we will exit RFC2582 fast recovery */
4854 			will_exit_fast_recovery = 1;
4855 		}
4856 	}
4857 	/*
4858 	 * Check for revoked fragments:
4859 	 *
4860 	 * if Previous sack - Had no frags then we can't have any revoked if
4861 	 * Previous sack - Had frag's then - If we now have frags aka
4862 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4863 	 * some of them. else - The peer revoked all ACKED fragments, since
4864 	 * we had some before and now we have NONE.
4865 	 */
4866 
4867 	if (num_seg) {
4868 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4869 		asoc->saw_sack_with_frags = 1;
4870 	} else if (asoc->saw_sack_with_frags) {
4871 		int cnt_revoked = 0;
4872 
4873 		/* Peer revoked all dg's marked or acked */
4874 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4875 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4876 				tp1->sent = SCTP_DATAGRAM_SENT;
4877 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4878 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4879 					    tp1->whoTo->flight_size,
4880 					    tp1->book_size,
4881 					    (uint32_t)(uintptr_t)tp1->whoTo,
4882 					    tp1->rec.data.tsn);
4883 				}
4884 				sctp_flight_size_increase(tp1);
4885 				sctp_total_flight_increase(stcb, tp1);
4886 				tp1->rec.data.chunk_was_revoked = 1;
4887 				/*
4888 				 * To ensure that this increase in
4889 				 * flightsize, which is artificial, does not
4890 				 * throttle the sender, we also increase the
4891 				 * cwnd artificially.
4892 				 */
4893 				tp1->whoTo->cwnd += tp1->book_size;
4894 				cnt_revoked++;
4895 			}
4896 		}
4897 		if (cnt_revoked) {
4898 			reneged_all = 1;
4899 		}
4900 		asoc->saw_sack_with_frags = 0;
4901 	}
4902 	if (num_nr_seg > 0)
4903 		asoc->saw_sack_with_nr_frags = 1;
4904 	else
4905 		asoc->saw_sack_with_nr_frags = 0;
4906 
4907 	/* JRS - Use the congestion control given in the CC module */
4908 	if (ecne_seen == 0) {
4909 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4910 			if (net->net_ack2 > 0) {
4911 				/*
4912 				 * Karn's rule applies to clearing error
4913 				 * count, this is optional.
4914 				 */
4915 				net->error_count = 0;
4916 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4917 					/* addr came good */
4918 					net->dest_state |= SCTP_ADDR_REACHABLE;
4919 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4920 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4921 				}
4922 				if (net == stcb->asoc.primary_destination) {
4923 					if (stcb->asoc.alternate) {
4924 						/*
4925 						 * release the alternate,
4926 						 * primary is good
4927 						 */
4928 						sctp_free_remote_addr(stcb->asoc.alternate);
4929 						stcb->asoc.alternate = NULL;
4930 					}
4931 				}
4932 				if (net->dest_state & SCTP_ADDR_PF) {
4933 					net->dest_state &= ~SCTP_ADDR_PF;
4934 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4935 					    stcb->sctp_ep, stcb, net,
4936 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4937 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4938 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4939 					/* Done with this net */
4940 					net->net_ack = 0;
4941 				}
4942 				/* restore any doubled timers */
4943 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4944 				if (net->RTO < stcb->asoc.minrto) {
4945 					net->RTO = stcb->asoc.minrto;
4946 				}
4947 				if (net->RTO > stcb->asoc.maxrto) {
4948 					net->RTO = stcb->asoc.maxrto;
4949 				}
4950 			}
4951 		}
4952 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4953 	}
4954 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4955 		/* nothing left in-flight */
4956 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4957 			/* stop all timers */
4958 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4959 			    stcb, net,
4960 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4961 			net->flight_size = 0;
4962 			net->partial_bytes_acked = 0;
4963 		}
4964 		asoc->total_flight = 0;
4965 		asoc->total_flight_count = 0;
4966 	}
4967 	/**********************************/
4968 	/* Now what about shutdown issues */
4969 	/**********************************/
4970 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4971 		/* nothing left on sendqueue.. consider done */
4972 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4973 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4974 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4975 		}
4976 		asoc->peers_rwnd = a_rwnd;
4977 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4978 			/* SWS sender side engages */
4979 			asoc->peers_rwnd = 0;
4980 		}
4981 		/* clean up */
4982 		if ((asoc->stream_queue_cnt == 1) &&
4983 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4984 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4985 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4986 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4987 		}
4988 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4989 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4990 		    (asoc->stream_queue_cnt == 1) &&
4991 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4992 			struct mbuf *op_err;
4993 
4994 			*abort_now = 1;
4995 			/* XXX */
4996 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4997 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4998 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4999 			return;
5000 		}
5001 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5002 		    (asoc->stream_queue_cnt == 0)) {
5003 			struct sctp_nets *netp;
5004 
5005 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5006 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5007 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5008 			}
5009 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5010 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5011 			sctp_stop_timers_for_shutdown(stcb);
5012 			if (asoc->alternate) {
5013 				netp = asoc->alternate;
5014 			} else {
5015 				netp = asoc->primary_destination;
5016 			}
5017 			sctp_send_shutdown(stcb, netp);
5018 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5019 			    stcb->sctp_ep, stcb, netp);
5020 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5021 			    stcb->sctp_ep, stcb, netp);
5022 			return;
5023 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5024 		    (asoc->stream_queue_cnt == 0)) {
5025 			struct sctp_nets *netp;
5026 
5027 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5028 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5029 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5030 			sctp_stop_timers_for_shutdown(stcb);
5031 			if (asoc->alternate) {
5032 				netp = asoc->alternate;
5033 			} else {
5034 				netp = asoc->primary_destination;
5035 			}
5036 			sctp_send_shutdown_ack(stcb, netp);
5037 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5038 			    stcb->sctp_ep, stcb, netp);
5039 			return;
5040 		}
5041 	}
5042 	/*
5043 	 * Now here we are going to recycle net_ack for a different use...
5044 	 * HEADS UP.
5045 	 */
5046 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5047 		net->net_ack = 0;
5048 	}
5049 
5050 	/*
5051 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5052 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5053 	 * automatically ensure that.
5054 	 */
5055 	if ((asoc->sctp_cmt_on_off > 0) &&
5056 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5057 	    (cmt_dac_flag == 0)) {
5058 		this_sack_lowest_newack = cum_ack;
5059 	}
5060 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5061 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5062 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5063 	}
5064 	/* JRS - Use the congestion control given in the CC module */
5065 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5066 
5067 	/* Now are we exiting loss recovery ? */
5068 	if (will_exit_fast_recovery) {
5069 		/* Ok, we must exit fast recovery */
5070 		asoc->fast_retran_loss_recovery = 0;
5071 	}
5072 	if ((asoc->sat_t3_loss_recovery) &&
5073 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5074 		/* end satellite t3 loss recovery */
5075 		asoc->sat_t3_loss_recovery = 0;
5076 	}
5077 	/*
5078 	 * CMT Fast recovery
5079 	 */
5080 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5081 		if (net->will_exit_fast_recovery) {
5082 			/* Ok, we must exit fast recovery */
5083 			net->fast_retran_loss_recovery = 0;
5084 		}
5085 	}
5086 
5087 	/* Adjust and set the new rwnd value */
5088 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5089 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5090 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5091 	}
5092 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5093 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5094 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5095 		/* SWS sender side engages */
5096 		asoc->peers_rwnd = 0;
5097 	}
5098 	if (asoc->peers_rwnd > old_rwnd) {
5099 		win_probe_recovery = 1;
5100 	}
5101 	/*
5102 	 * Now we must setup so we have a timer up for anyone with
5103 	 * outstanding data.
5104 	 */
5105 	done_once = 0;
5106 again:
5107 	j = 0;
5108 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5109 		if (win_probe_recovery && (net->window_probe)) {
5110 			win_probe_recovered = 1;
5111 			/*-
5112 			 * Find first chunk that was used with
5113 			 * window probe and clear the event. Put
5114 			 * it back into the send queue as if has
5115 			 * not been sent.
5116 			 */
5117 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5118 				if (tp1->window_probe) {
5119 					sctp_window_probe_recovery(stcb, asoc, tp1);
5120 					break;
5121 				}
5122 			}
5123 		}
5124 		if (net->flight_size) {
5125 			j++;
5126 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5127 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5128 				    stcb->sctp_ep, stcb, net);
5129 			}
5130 			if (net->window_probe) {
5131 				net->window_probe = 0;
5132 			}
5133 		} else {
5134 			if (net->window_probe) {
5135 				/*
5136 				 * In window probes we must assure a timer
5137 				 * is still running there
5138 				 */
5139 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5140 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5141 					    stcb->sctp_ep, stcb, net);
5142 
5143 				}
5144 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5145 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5146 				    stcb, net,
5147 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5148 			}
5149 		}
5150 	}
5151 	if ((j == 0) &&
5152 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5153 	    (asoc->sent_queue_retran_cnt == 0) &&
5154 	    (win_probe_recovered == 0) &&
5155 	    (done_once == 0)) {
5156 		/*
5157 		 * huh, this should not happen unless all packets are
5158 		 * PR-SCTP and marked to skip of course.
5159 		 */
5160 		if (sctp_fs_audit(asoc)) {
5161 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5162 				net->flight_size = 0;
5163 			}
5164 			asoc->total_flight = 0;
5165 			asoc->total_flight_count = 0;
5166 			asoc->sent_queue_retran_cnt = 0;
5167 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5168 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5169 					sctp_flight_size_increase(tp1);
5170 					sctp_total_flight_increase(stcb, tp1);
5171 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5172 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5173 				}
5174 			}
5175 		}
5176 		done_once = 1;
5177 		goto again;
5178 	}
5179 	/*********************************************/
5180 	/* Here we perform PR-SCTP procedures        */
5181 	/* (section 4.2)                             */
5182 	/*********************************************/
5183 	/* C1. update advancedPeerAckPoint */
5184 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5185 		asoc->advanced_peer_ack_point = cum_ack;
5186 	}
5187 	/* C2. try to further move advancedPeerAckPoint ahead */
5188 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5189 		struct sctp_tmit_chunk *lchk;
5190 		uint32_t old_adv_peer_ack_point;
5191 
5192 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5193 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5194 		/* C3. See if we need to send a Fwd-TSN */
5195 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5196 			/*
5197 			 * ISSUE with ECN, see FWD-TSN processing.
5198 			 */
5199 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5200 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5201 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5202 				    old_adv_peer_ack_point);
5203 			}
5204 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5205 				send_forward_tsn(stcb, asoc);
5206 			} else if (lchk) {
5207 				/* try to FR fwd-tsn's that get lost too */
5208 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5209 					send_forward_tsn(stcb, asoc);
5210 				}
5211 			}
5212 		}
5213 		if (lchk) {
5214 			/* Assure a timer is up */
5215 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5216 			    stcb->sctp_ep, stcb, lchk->whoTo);
5217 		}
5218 	}
5219 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5220 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5221 		    a_rwnd,
5222 		    stcb->asoc.peers_rwnd,
5223 		    stcb->asoc.total_flight,
5224 		    stcb->asoc.total_output_queue_size);
5225 	}
5226 }
5227 
5228 void
5229 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5230 {
5231 	/* Copy cum-ack */
5232 	uint32_t cum_ack, a_rwnd;
5233 
5234 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5235 	/* Arrange so a_rwnd does NOT change */
5236 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5237 
5238 	/* Now call the express sack handling */
5239 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5240 }
5241 
5242 static void
5243 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5244     struct sctp_stream_in *strmin)
5245 {
5246 	struct sctp_queued_to_read *control, *ncontrol;
5247 	struct sctp_association *asoc;
5248 	uint32_t mid;
5249 	int need_reasm_check = 0;
5250 
5251 	asoc = &stcb->asoc;
5252 	mid = strmin->last_mid_delivered;
5253 	/*
5254 	 * First deliver anything prior to and including the stream no that
5255 	 * came in.
5256 	 */
5257 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5258 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5259 			/* this is deliverable now */
5260 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5261 				if (control->on_strm_q) {
5262 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5263 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5264 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5265 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5266 #ifdef INVARIANTS
5267 					} else {
5268 						panic("strmin: %p ctl: %p unknown %d",
5269 						    strmin, control, control->on_strm_q);
5270 #endif
5271 					}
5272 					control->on_strm_q = 0;
5273 				}
5274 				/* subtract pending on streams */
5275 				if (asoc->size_on_all_streams >= control->length) {
5276 					asoc->size_on_all_streams -= control->length;
5277 				} else {
5278 #ifdef INVARIANTS
5279 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5280 #else
5281 					asoc->size_on_all_streams = 0;
5282 #endif
5283 				}
5284 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5285 				/* deliver it to at least the delivery-q */
5286 				if (stcb->sctp_socket) {
5287 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5288 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5289 					    control,
5290 					    &stcb->sctp_socket->so_rcv,
5291 					    1, SCTP_READ_LOCK_HELD,
5292 					    SCTP_SO_NOT_LOCKED);
5293 				}
5294 			} else {
5295 				/* Its a fragmented message */
5296 				if (control->first_frag_seen) {
5297 					/*
5298 					 * Make it so this is next to
5299 					 * deliver, we restore later
5300 					 */
5301 					strmin->last_mid_delivered = control->mid - 1;
5302 					need_reasm_check = 1;
5303 					break;
5304 				}
5305 			}
5306 		} else {
5307 			/* no more delivery now. */
5308 			break;
5309 		}
5310 	}
5311 	if (need_reasm_check) {
5312 		int ret;
5313 
5314 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5315 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5316 			/* Restore the next to deliver unless we are ahead */
5317 			strmin->last_mid_delivered = mid;
5318 		}
5319 		if (ret == 0) {
5320 			/* Left the front Partial one on */
5321 			return;
5322 		}
5323 		need_reasm_check = 0;
5324 	}
5325 	/*
5326 	 * now we must deliver things in queue the normal way  if any are
5327 	 * now ready.
5328 	 */
5329 	mid = strmin->last_mid_delivered + 1;
5330 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5331 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5332 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5333 				/* this is deliverable now */
5334 				if (control->on_strm_q) {
5335 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5336 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5337 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5338 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5339 #ifdef INVARIANTS
5340 					} else {
5341 						panic("strmin: %p ctl: %p unknown %d",
5342 						    strmin, control, control->on_strm_q);
5343 #endif
5344 					}
5345 					control->on_strm_q = 0;
5346 				}
5347 				/* subtract pending on streams */
5348 				if (asoc->size_on_all_streams >= control->length) {
5349 					asoc->size_on_all_streams -= control->length;
5350 				} else {
5351 #ifdef INVARIANTS
5352 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5353 #else
5354 					asoc->size_on_all_streams = 0;
5355 #endif
5356 				}
5357 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5358 				/* deliver it to at least the delivery-q */
5359 				strmin->last_mid_delivered = control->mid;
5360 				if (stcb->sctp_socket) {
5361 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5362 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5363 					    control,
5364 					    &stcb->sctp_socket->so_rcv, 1,
5365 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5366 
5367 				}
5368 				mid = strmin->last_mid_delivered + 1;
5369 			} else {
5370 				/* Its a fragmented message */
5371 				if (control->first_frag_seen) {
5372 					/*
5373 					 * Make it so this is next to
5374 					 * deliver
5375 					 */
5376 					strmin->last_mid_delivered = control->mid - 1;
5377 					need_reasm_check = 1;
5378 					break;
5379 				}
5380 			}
5381 		} else {
5382 			break;
5383 		}
5384 	}
5385 	if (need_reasm_check) {
5386 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5387 	}
5388 }
5389 
5390 
5391 
5392 static void
5393 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5394     struct sctp_association *asoc,
5395     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5396 {
5397 	struct sctp_queued_to_read *control;
5398 	struct sctp_stream_in *strm;
5399 	struct sctp_tmit_chunk *chk, *nchk;
5400 	int cnt_removed = 0;
5401 
5402 	/*
5403 	 * For now large messages held on the stream reasm that are complete
5404 	 * will be tossed too. We could in theory do more work to spin
5405 	 * through and stop after dumping one msg aka seeing the start of a
5406 	 * new msg at the head, and call the delivery function... to see if
5407 	 * it can be delivered... But for now we just dump everything on the
5408 	 * queue.
5409 	 */
5410 	strm = &asoc->strmin[stream];
5411 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5412 	if (control == NULL) {
5413 		/* Not found */
5414 		return;
5415 	}
5416 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5417 		return;
5418 	}
5419 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5420 		/* Purge hanging chunks */
5421 		if (!asoc->idata_supported && (ordered == 0)) {
5422 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5423 				break;
5424 			}
5425 		}
5426 		cnt_removed++;
5427 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5428 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5429 			asoc->size_on_reasm_queue -= chk->send_size;
5430 		} else {
5431 #ifdef INVARIANTS
5432 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5433 #else
5434 			asoc->size_on_reasm_queue = 0;
5435 #endif
5436 		}
5437 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5438 		if (chk->data) {
5439 			sctp_m_freem(chk->data);
5440 			chk->data = NULL;
5441 		}
5442 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5443 	}
5444 	if (!TAILQ_EMPTY(&control->reasm)) {
5445 		/* This has to be old data, unordered */
5446 		if (control->data) {
5447 			sctp_m_freem(control->data);
5448 			control->data = NULL;
5449 		}
5450 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5451 		chk = TAILQ_FIRST(&control->reasm);
5452 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5453 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5454 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5455 			    chk, SCTP_READ_LOCK_HELD);
5456 		}
5457 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5458 		return;
5459 	}
5460 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5461 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5462 		if (asoc->size_on_all_streams >= control->length) {
5463 			asoc->size_on_all_streams -= control->length;
5464 		} else {
5465 #ifdef INVARIANTS
5466 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5467 #else
5468 			asoc->size_on_all_streams = 0;
5469 #endif
5470 		}
5471 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5472 		control->on_strm_q = 0;
5473 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5474 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5475 		control->on_strm_q = 0;
5476 #ifdef INVARIANTS
5477 	} else if (control->on_strm_q) {
5478 		panic("strm: %p ctl: %p unknown %d",
5479 		    strm, control, control->on_strm_q);
5480 #endif
5481 	}
5482 	control->on_strm_q = 0;
5483 	if (control->on_read_q == 0) {
5484 		sctp_free_remote_addr(control->whoFrom);
5485 		if (control->data) {
5486 			sctp_m_freem(control->data);
5487 			control->data = NULL;
5488 		}
5489 		sctp_free_a_readq(stcb, control);
5490 	}
5491 }
5492 
5493 void
5494 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5495     struct sctp_forward_tsn_chunk *fwd,
5496     int *abort_flag, struct mbuf *m, int offset)
5497 {
5498 	/* The pr-sctp fwd tsn */
5499 	/*
5500 	 * here we will perform all the data receiver side steps for
5501 	 * processing FwdTSN, as required in by pr-sctp draft:
5502 	 *
5503 	 * Assume we get FwdTSN(x):
5504 	 *
5505 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5506 	 * + others we have 3) examine and update re-ordering queue on
5507 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5508 	 * report where we are.
5509 	 */
5510 	struct sctp_association *asoc;
5511 	uint32_t new_cum_tsn, gap;
5512 	unsigned int i, fwd_sz, m_size;
5513 	uint32_t str_seq;
5514 	struct sctp_stream_in *strm;
5515 	struct sctp_queued_to_read *control, *sv;
5516 
5517 	asoc = &stcb->asoc;
5518 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5519 		SCTPDBG(SCTP_DEBUG_INDATA1,
5520 		    "Bad size too small/big fwd-tsn\n");
5521 		return;
5522 	}
5523 	m_size = (stcb->asoc.mapping_array_size << 3);
5524 	/*************************************************************/
5525 	/* 1. Here we update local cumTSN and shift the bitmap array */
5526 	/*************************************************************/
5527 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5528 
5529 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5530 		/* Already got there ... */
5531 		return;
5532 	}
5533 	/*
5534 	 * now we know the new TSN is more advanced, let's find the actual
5535 	 * gap
5536 	 */
5537 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5538 	asoc->cumulative_tsn = new_cum_tsn;
5539 	if (gap >= m_size) {
5540 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5541 			struct mbuf *op_err;
5542 			char msg[SCTP_DIAG_INFO_LEN];
5543 
5544 			/*
5545 			 * out of range (of single byte chunks in the rwnd I
5546 			 * give out). This must be an attacker.
5547 			 */
5548 			*abort_flag = 1;
5549 			snprintf(msg, sizeof(msg),
5550 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5551 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5552 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5553 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5554 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5555 			return;
5556 		}
5557 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5558 
5559 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5560 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5561 		asoc->highest_tsn_inside_map = new_cum_tsn;
5562 
5563 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5564 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5565 
5566 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5567 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5568 		}
5569 	} else {
5570 		SCTP_TCB_LOCK_ASSERT(stcb);
5571 		for (i = 0; i <= gap; i++) {
5572 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5573 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5574 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5575 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5576 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5577 				}
5578 			}
5579 		}
5580 	}
5581 	/*************************************************************/
5582 	/* 2. Clear up re-assembly queue                             */
5583 	/*************************************************************/
5584 
5585 	/* This is now done as part of clearing up the stream/seq */
5586 	if (asoc->idata_supported == 0) {
5587 		uint16_t sid;
5588 
5589 		/* Flush all the un-ordered data based on cum-tsn */
5590 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5591 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5592 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5593 		}
5594 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5595 	}
5596 	/*******************************************************/
5597 	/* 3. Update the PR-stream re-ordering queues and fix  */
5598 	/* delivery issues as needed.                       */
5599 	/*******************************************************/
5600 	fwd_sz -= sizeof(*fwd);
5601 	if (m && fwd_sz) {
5602 		/* New method. */
5603 		unsigned int num_str;
5604 		uint32_t mid, cur_mid;
5605 		uint16_t sid;
5606 		uint16_t ordered, flags;
5607 		struct sctp_strseq *stseq, strseqbuf;
5608 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5609 
5610 		offset += sizeof(*fwd);
5611 
5612 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5613 		if (asoc->idata_supported) {
5614 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5615 		} else {
5616 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5617 		}
5618 		for (i = 0; i < num_str; i++) {
5619 			if (asoc->idata_supported) {
5620 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5621 				    sizeof(struct sctp_strseq_mid),
5622 				    (uint8_t *)&strseqbuf_m);
5623 				offset += sizeof(struct sctp_strseq_mid);
5624 				if (stseq_m == NULL) {
5625 					break;
5626 				}
5627 				sid = ntohs(stseq_m->sid);
5628 				mid = ntohl(stseq_m->mid);
5629 				flags = ntohs(stseq_m->flags);
5630 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5631 					ordered = 0;
5632 				} else {
5633 					ordered = 1;
5634 				}
5635 			} else {
5636 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5637 				    sizeof(struct sctp_strseq),
5638 				    (uint8_t *)&strseqbuf);
5639 				offset += sizeof(struct sctp_strseq);
5640 				if (stseq == NULL) {
5641 					break;
5642 				}
5643 				sid = ntohs(stseq->sid);
5644 				mid = (uint32_t)ntohs(stseq->ssn);
5645 				ordered = 1;
5646 			}
5647 			/* Convert */
5648 
5649 			/* now process */
5650 
5651 			/*
5652 			 * Ok we now look for the stream/seq on the read
5653 			 * queue where its not all delivered. If we find it
5654 			 * we transmute the read entry into a PDI_ABORTED.
5655 			 */
5656 			if (sid >= asoc->streamincnt) {
5657 				/* screwed up streams, stop!  */
5658 				break;
5659 			}
5660 			if ((asoc->str_of_pdapi == sid) &&
5661 			    (asoc->ssn_of_pdapi == mid)) {
5662 				/*
5663 				 * If this is the one we were partially
5664 				 * delivering now then we no longer are.
5665 				 * Note this will change with the reassembly
5666 				 * re-write.
5667 				 */
5668 				asoc->fragmented_delivery_inprogress = 0;
5669 			}
5670 			strm = &asoc->strmin[sid];
5671 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5672 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5673 			}
5674 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5675 				if ((control->sinfo_stream == sid) &&
5676 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5677 					str_seq = (sid << 16) | (0x0000ffff & mid);
5678 					control->pdapi_aborted = 1;
5679 					sv = stcb->asoc.control_pdapi;
5680 					control->end_added = 1;
5681 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5682 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5683 						if (asoc->size_on_all_streams >= control->length) {
5684 							asoc->size_on_all_streams -= control->length;
5685 						} else {
5686 #ifdef INVARIANTS
5687 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5688 #else
5689 							asoc->size_on_all_streams = 0;
5690 #endif
5691 						}
5692 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5693 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5694 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5695 #ifdef INVARIANTS
5696 					} else if (control->on_strm_q) {
5697 						panic("strm: %p ctl: %p unknown %d",
5698 						    strm, control, control->on_strm_q);
5699 #endif
5700 					}
5701 					control->on_strm_q = 0;
5702 					stcb->asoc.control_pdapi = control;
5703 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5704 					    stcb,
5705 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5706 					    (void *)&str_seq,
5707 					    SCTP_SO_NOT_LOCKED);
5708 					stcb->asoc.control_pdapi = sv;
5709 					break;
5710 				} else if ((control->sinfo_stream == sid) &&
5711 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5712 					/* We are past our victim SSN */
5713 					break;
5714 				}
5715 			}
5716 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5717 				/* Update the sequence number */
5718 				strm->last_mid_delivered = mid;
5719 			}
5720 			/* now kick the stream the new way */
5721 			/* sa_ignore NO_NULL_CHK */
5722 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5723 		}
5724 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5725 	}
5726 	/*
5727 	 * Now slide thing forward.
5728 	 */
5729 	sctp_slide_mapping_arrays(stcb);
5730 }
5731