xref: /freebsd/sys/netinet/sctp_indata.c (revision 58a0f0d00c0cc4a90ce584a61470290751bfcac7)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 	if (stcb->asoc.sb_cc == 0 &&
98 	    asoc->cnt_on_reasm_queue == 0 &&
99 	    asoc->cnt_on_all_streams == 0) {
100 		/* Full rwnd granted */
101 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
102 		return (calc);
103 	}
104 	/* get actual space */
105 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 	/*
107 	 * take out what has NOT been put on socket queue and we yet hold
108 	 * for putting up.
109 	 */
110 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 	    asoc->cnt_on_reasm_queue * MSIZE));
112 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 	    asoc->cnt_on_all_streams * MSIZE));
114 	if (calc == 0) {
115 		/* out of space */
116 		return (calc);
117 	}
118 	/* what is the overhead of all these rwnd's */
119 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
120 	/*
121 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
122 	 * even it is 0. SWS engaged
123 	 */
124 	if (calc < stcb->asoc.my_rwnd_control_len) {
125 		calc = 1;
126 	}
127 	return (calc);
128 }
129 
130 
131 
132 /*
133  * Build out our readq entry based on the incoming packet.
134  */
135 struct sctp_queued_to_read *
136 sctp_build_readq_entry(struct sctp_tcb *stcb,
137     struct sctp_nets *net,
138     uint32_t tsn, uint32_t ppid,
139     uint32_t context, uint16_t sid,
140     uint32_t mid, uint8_t flags,
141     struct mbuf *dm)
142 {
143 	struct sctp_queued_to_read *read_queue_e = NULL;
144 
145 	sctp_alloc_a_readq(stcb, read_queue_e);
146 	if (read_queue_e == NULL) {
147 		goto failed_build;
148 	}
149 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
150 	read_queue_e->sinfo_stream = sid;
151 	read_queue_e->sinfo_flags = (flags << 8);
152 	read_queue_e->sinfo_ppid = ppid;
153 	read_queue_e->sinfo_context = context;
154 	read_queue_e->sinfo_tsn = tsn;
155 	read_queue_e->sinfo_cumtsn = tsn;
156 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
157 	read_queue_e->mid = mid;
158 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
159 	TAILQ_INIT(&read_queue_e->reasm);
160 	read_queue_e->whoFrom = net;
161 	atomic_add_int(&net->ref_count, 1);
162 	read_queue_e->data = dm;
163 	read_queue_e->stcb = stcb;
164 	read_queue_e->port_from = stcb->rport;
165 failed_build:
166 	return (read_queue_e);
167 }
168 
169 struct mbuf *
170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
171 {
172 	struct sctp_extrcvinfo *seinfo;
173 	struct sctp_sndrcvinfo *outinfo;
174 	struct sctp_rcvinfo *rcvinfo;
175 	struct sctp_nxtinfo *nxtinfo;
176 	struct cmsghdr *cmh;
177 	struct mbuf *ret;
178 	int len;
179 	int use_extended;
180 	int provide_nxt;
181 
182 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
183 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
184 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
185 		/* user does not want any ancillary data */
186 		return (NULL);
187 	}
188 	len = 0;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
190 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
191 	}
192 	seinfo = (struct sctp_extrcvinfo *)sinfo;
193 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
194 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
195 		provide_nxt = 1;
196 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
197 	} else {
198 		provide_nxt = 0;
199 	}
200 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
201 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
202 			use_extended = 1;
203 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
204 		} else {
205 			use_extended = 0;
206 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
207 		}
208 	} else {
209 		use_extended = 0;
210 	}
211 
212 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
213 	if (ret == NULL) {
214 		/* No space */
215 		return (ret);
216 	}
217 	SCTP_BUF_LEN(ret) = 0;
218 
219 	/* We need a CMSG header followed by the struct */
220 	cmh = mtod(ret, struct cmsghdr *);
221 	/*
222 	 * Make sure that there is no un-initialized padding between the
223 	 * cmsg header and cmsg data and after the cmsg data.
224 	 */
225 	memset(cmh, 0, len);
226 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
227 		cmh->cmsg_level = IPPROTO_SCTP;
228 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
229 		cmh->cmsg_type = SCTP_RCVINFO;
230 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
231 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
232 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
233 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
234 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
235 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
236 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
237 		rcvinfo->rcv_context = sinfo->sinfo_context;
238 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
239 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
240 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
241 	}
242 	if (provide_nxt) {
243 		cmh->cmsg_level = IPPROTO_SCTP;
244 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
245 		cmh->cmsg_type = SCTP_NXTINFO;
246 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
247 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
248 		nxtinfo->nxt_flags = 0;
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
250 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
251 		}
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
253 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
256 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
257 		}
258 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
259 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
260 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
261 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
262 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
263 	}
264 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
265 		cmh->cmsg_level = IPPROTO_SCTP;
266 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
267 		if (use_extended) {
268 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
269 			cmh->cmsg_type = SCTP_EXTRCV;
270 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
271 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
272 		} else {
273 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
274 			cmh->cmsg_type = SCTP_SNDRCV;
275 			*outinfo = *sinfo;
276 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
277 		}
278 	}
279 	return (ret);
280 }
281 
282 
283 static void
284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
285 {
286 	uint32_t gap, i, cumackp1;
287 	int fnd = 0;
288 	int in_r = 0, in_nr = 0;
289 
290 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
291 		return;
292 	}
293 	cumackp1 = asoc->cumulative_tsn + 1;
294 	if (SCTP_TSN_GT(cumackp1, tsn)) {
295 		/*
296 		 * this tsn is behind the cum ack and thus we don't need to
297 		 * worry about it being moved from one to the other.
298 		 */
299 		return;
300 	}
301 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 	if ((in_r == 0) && (in_nr == 0)) {
305 #ifdef INVARIANTS
306 		panic("Things are really messed up now");
307 #else
308 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
309 		sctp_print_mapping_array(asoc);
310 #endif
311 	}
312 	if (in_nr == 0)
313 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
314 	if (in_r)
315 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
317 		asoc->highest_tsn_inside_nr_map = tsn;
318 	}
319 	if (tsn == asoc->highest_tsn_inside_map) {
320 		/* We must back down to see what the new highest is */
321 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
322 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
323 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
324 				asoc->highest_tsn_inside_map = i;
325 				fnd = 1;
326 				break;
327 			}
328 		}
329 		if (!fnd) {
330 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
331 		}
332 	}
333 }
334 
335 static int
336 sctp_place_control_in_stream(struct sctp_stream_in *strm,
337     struct sctp_association *asoc,
338     struct sctp_queued_to_read *control)
339 {
340 	struct sctp_queued_to_read *at;
341 	struct sctp_readhead *q;
342 	uint8_t flags, unordered;
343 
344 	flags = (control->sinfo_flags >> 8);
345 	unordered = flags & SCTP_DATA_UNORDERED;
346 	if (unordered) {
347 		q = &strm->uno_inqueue;
348 		if (asoc->idata_supported == 0) {
349 			if (!TAILQ_EMPTY(q)) {
350 				/*
351 				 * Only one stream can be here in old style
352 				 * -- abort
353 				 */
354 				return (-1);
355 			}
356 			TAILQ_INSERT_TAIL(q, control, next_instrm);
357 			control->on_strm_q = SCTP_ON_UNORDERED;
358 			return (0);
359 		}
360 	} else {
361 		q = &strm->inqueue;
362 	}
363 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
364 		control->end_added = 1;
365 		control->first_frag_seen = 1;
366 		control->last_frag_seen = 1;
367 	}
368 	if (TAILQ_EMPTY(q)) {
369 		/* Empty queue */
370 		TAILQ_INSERT_HEAD(q, control, next_instrm);
371 		if (unordered) {
372 			control->on_strm_q = SCTP_ON_UNORDERED;
373 		} else {
374 			control->on_strm_q = SCTP_ON_ORDERED;
375 		}
376 		return (0);
377 	} else {
378 		TAILQ_FOREACH(at, q, next_instrm) {
379 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
380 				/*
381 				 * one in queue is bigger than the new one,
382 				 * insert before this one
383 				 */
384 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
385 				if (unordered) {
386 					control->on_strm_q = SCTP_ON_UNORDERED;
387 				} else {
388 					control->on_strm_q = SCTP_ON_ORDERED;
389 				}
390 				break;
391 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
392 				/*
393 				 * Gak, He sent me a duplicate msg id
394 				 * number?? return -1 to abort.
395 				 */
396 				return (-1);
397 			} else {
398 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
399 					/*
400 					 * We are at the end, insert it
401 					 * after this one
402 					 */
403 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
404 						sctp_log_strm_del(control, at,
405 						    SCTP_STR_LOG_FROM_INSERT_TL);
406 					}
407 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
408 					if (unordered) {
409 						control->on_strm_q = SCTP_ON_UNORDERED;
410 					} else {
411 						control->on_strm_q = SCTP_ON_ORDERED;
412 					}
413 					break;
414 				}
415 			}
416 		}
417 	}
418 	return (0);
419 }
420 
421 static void
422 sctp_abort_in_reasm(struct sctp_tcb *stcb,
423     struct sctp_queued_to_read *control,
424     struct sctp_tmit_chunk *chk,
425     int *abort_flag, int opspot)
426 {
427 	char msg[SCTP_DIAG_INFO_LEN];
428 	struct mbuf *oper;
429 
430 	if (stcb->asoc.idata_supported) {
431 		snprintf(msg, sizeof(msg),
432 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
433 		    opspot,
434 		    control->fsn_included,
435 		    chk->rec.data.tsn,
436 		    chk->rec.data.sid,
437 		    chk->rec.data.fsn, chk->rec.data.mid);
438 	} else {
439 		snprintf(msg, sizeof(msg),
440 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
441 		    opspot,
442 		    control->fsn_included,
443 		    chk->rec.data.tsn,
444 		    chk->rec.data.sid,
445 		    chk->rec.data.fsn,
446 		    (uint16_t)chk->rec.data.mid);
447 	}
448 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
449 	sctp_m_freem(chk->data);
450 	chk->data = NULL;
451 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
452 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
453 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
454 	*abort_flag = 1;
455 }
456 
457 static void
458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
459 {
460 	/*
461 	 * The control could not be placed and must be cleaned.
462 	 */
463 	struct sctp_tmit_chunk *chk, *nchk;
464 
465 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
466 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
467 		if (chk->data)
468 			sctp_m_freem(chk->data);
469 		chk->data = NULL;
470 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
471 	}
472 	sctp_free_a_readq(stcb, control);
473 }
474 
475 /*
476  * Queue the chunk either right into the socket buffer if it is the next one
477  * to go OR put it in the correct place in the delivery queue.  If we do
478  * append to the so_buf, keep doing so until we are out of order as
479  * long as the control's entered are non-fragmented.
480  */
481 static void
482 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
483     struct sctp_association *asoc,
484     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
485 {
486 	/*
487 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
488 	 * all the data in one stream this could happen quite rapidly. One
489 	 * could use the TSN to keep track of things, but this scheme breaks
490 	 * down in the other type of stream usage that could occur. Send a
491 	 * single msg to stream 0, send 4Billion messages to stream 1, now
492 	 * send a message to stream 0. You have a situation where the TSN
493 	 * has wrapped but not in the stream. Is this worth worrying about
494 	 * or should we just change our queue sort at the bottom to be by
495 	 * TSN.
496 	 *
497 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
498 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
499 	 * assignment this could happen... and I don't see how this would be
500 	 * a violation. So for now I am undecided an will leave the sort by
501 	 * SSN alone. Maybe a hybred approach is the answer
502 	 *
503 	 */
504 	struct sctp_queued_to_read *at;
505 	int queue_needed;
506 	uint32_t nxt_todel;
507 	struct mbuf *op_err;
508 	struct sctp_stream_in *strm;
509 	char msg[SCTP_DIAG_INFO_LEN];
510 
511 	strm = &asoc->strmin[control->sinfo_stream];
512 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
513 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
514 	}
515 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
516 		/* The incoming sseq is behind where we last delivered? */
517 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
518 		    strm->last_mid_delivered, control->mid);
519 		/*
520 		 * throw it in the stream so it gets cleaned up in
521 		 * association destruction
522 		 */
523 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
524 		if (asoc->idata_supported) {
525 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
526 			    strm->last_mid_delivered, control->sinfo_tsn,
527 			    control->sinfo_stream, control->mid);
528 		} else {
529 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
530 			    (uint16_t)strm->last_mid_delivered,
531 			    control->sinfo_tsn,
532 			    control->sinfo_stream,
533 			    (uint16_t)control->mid);
534 		}
535 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
536 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
537 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
538 		*abort_flag = 1;
539 		return;
540 
541 	}
542 	queue_needed = 1;
543 	asoc->size_on_all_streams += control->length;
544 	sctp_ucount_incr(asoc->cnt_on_all_streams);
545 	nxt_todel = strm->last_mid_delivered + 1;
546 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
548 		struct socket *so;
549 
550 		so = SCTP_INP_SO(stcb->sctp_ep);
551 		atomic_add_int(&stcb->asoc.refcnt, 1);
552 		SCTP_TCB_UNLOCK(stcb);
553 		SCTP_SOCKET_LOCK(so, 1);
554 		SCTP_TCB_LOCK(stcb);
555 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
556 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
557 			SCTP_SOCKET_UNLOCK(so, 1);
558 			return;
559 		}
560 #endif
561 		/* can be delivered right away? */
562 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
563 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
564 		}
565 		/* EY it wont be queued if it could be delivered directly */
566 		queue_needed = 0;
567 		if (asoc->size_on_all_streams >= control->length) {
568 			asoc->size_on_all_streams -= control->length;
569 		} else {
570 #ifdef INVARIANTS
571 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
572 #else
573 			asoc->size_on_all_streams = 0;
574 #endif
575 		}
576 		sctp_ucount_decr(asoc->cnt_on_all_streams);
577 		strm->last_mid_delivered++;
578 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
579 		sctp_add_to_readq(stcb->sctp_ep, stcb,
580 		    control,
581 		    &stcb->sctp_socket->so_rcv, 1,
582 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
583 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
584 			/* all delivered */
585 			nxt_todel = strm->last_mid_delivered + 1;
586 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
587 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
588 				if (control->on_strm_q == SCTP_ON_ORDERED) {
589 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
590 					if (asoc->size_on_all_streams >= control->length) {
591 						asoc->size_on_all_streams -= control->length;
592 					} else {
593 #ifdef INVARIANTS
594 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
595 #else
596 						asoc->size_on_all_streams = 0;
597 #endif
598 					}
599 					sctp_ucount_decr(asoc->cnt_on_all_streams);
600 #ifdef INVARIANTS
601 				} else {
602 					panic("Huh control: %p is on_strm_q: %d",
603 					    control, control->on_strm_q);
604 #endif
605 				}
606 				control->on_strm_q = 0;
607 				strm->last_mid_delivered++;
608 				/*
609 				 * We ignore the return of deliver_data here
610 				 * since we always can hold the chunk on the
611 				 * d-queue. And we have a finite number that
612 				 * can be delivered from the strq.
613 				 */
614 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
615 					sctp_log_strm_del(control, NULL,
616 					    SCTP_STR_LOG_FROM_IMMED_DEL);
617 				}
618 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
619 				sctp_add_to_readq(stcb->sctp_ep, stcb,
620 				    control,
621 				    &stcb->sctp_socket->so_rcv, 1,
622 				    SCTP_READ_LOCK_NOT_HELD,
623 				    SCTP_SO_LOCKED);
624 				continue;
625 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
626 				*need_reasm = 1;
627 			}
628 			break;
629 		}
630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
631 		SCTP_SOCKET_UNLOCK(so, 1);
632 #endif
633 	}
634 	if (queue_needed) {
635 		/*
636 		 * Ok, we did not deliver this guy, find the correct place
637 		 * to put it on the queue.
638 		 */
639 		if (sctp_place_control_in_stream(strm, asoc, control)) {
640 			snprintf(msg, sizeof(msg),
641 			    "Queue to str MID: %u duplicate",
642 			    control->mid);
643 			sctp_clean_up_control(stcb, control);
644 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
645 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
646 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
647 			*abort_flag = 1;
648 		}
649 	}
650 }
651 
652 
653 static void
654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
655 {
656 	struct mbuf *m, *prev = NULL;
657 	struct sctp_tcb *stcb;
658 
659 	stcb = control->stcb;
660 	control->held_length = 0;
661 	control->length = 0;
662 	m = control->data;
663 	while (m) {
664 		if (SCTP_BUF_LEN(m) == 0) {
665 			/* Skip mbufs with NO length */
666 			if (prev == NULL) {
667 				/* First one */
668 				control->data = sctp_m_free(m);
669 				m = control->data;
670 			} else {
671 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
672 				m = SCTP_BUF_NEXT(prev);
673 			}
674 			if (m == NULL) {
675 				control->tail_mbuf = prev;
676 			}
677 			continue;
678 		}
679 		prev = m;
680 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
681 		if (control->on_read_q) {
682 			/*
683 			 * On read queue so we must increment the SB stuff,
684 			 * we assume caller has done any locks of SB.
685 			 */
686 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
687 		}
688 		m = SCTP_BUF_NEXT(m);
689 	}
690 	if (prev) {
691 		control->tail_mbuf = prev;
692 	}
693 }
694 
695 static void
696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
697 {
698 	struct mbuf *prev = NULL;
699 	struct sctp_tcb *stcb;
700 
701 	stcb = control->stcb;
702 	if (stcb == NULL) {
703 #ifdef INVARIANTS
704 		panic("Control broken");
705 #else
706 		return;
707 #endif
708 	}
709 	if (control->tail_mbuf == NULL) {
710 		/* TSNH */
711 		control->data = m;
712 		sctp_setup_tail_pointer(control);
713 		return;
714 	}
715 	control->tail_mbuf->m_next = m;
716 	while (m) {
717 		if (SCTP_BUF_LEN(m) == 0) {
718 			/* Skip mbufs with NO length */
719 			if (prev == NULL) {
720 				/* First one */
721 				control->tail_mbuf->m_next = sctp_m_free(m);
722 				m = control->tail_mbuf->m_next;
723 			} else {
724 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
725 				m = SCTP_BUF_NEXT(prev);
726 			}
727 			if (m == NULL) {
728 				control->tail_mbuf = prev;
729 			}
730 			continue;
731 		}
732 		prev = m;
733 		if (control->on_read_q) {
734 			/*
735 			 * On read queue so we must increment the SB stuff,
736 			 * we assume caller has done any locks of SB.
737 			 */
738 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
739 		}
740 		*added += SCTP_BUF_LEN(m);
741 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
742 		m = SCTP_BUF_NEXT(m);
743 	}
744 	if (prev) {
745 		control->tail_mbuf = prev;
746 	}
747 }
748 
749 static void
750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
751 {
752 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
753 	nc->sinfo_stream = control->sinfo_stream;
754 	nc->mid = control->mid;
755 	TAILQ_INIT(&nc->reasm);
756 	nc->top_fsn = control->top_fsn;
757 	nc->mid = control->mid;
758 	nc->sinfo_flags = control->sinfo_flags;
759 	nc->sinfo_ppid = control->sinfo_ppid;
760 	nc->sinfo_context = control->sinfo_context;
761 	nc->fsn_included = 0xffffffff;
762 	nc->sinfo_tsn = control->sinfo_tsn;
763 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
764 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
765 	nc->whoFrom = control->whoFrom;
766 	atomic_add_int(&nc->whoFrom->ref_count, 1);
767 	nc->stcb = control->stcb;
768 	nc->port_from = control->port_from;
769 }
770 
771 static void
772 sctp_reset_a_control(struct sctp_queued_to_read *control,
773     struct sctp_inpcb *inp, uint32_t tsn)
774 {
775 	control->fsn_included = tsn;
776 	if (control->on_read_q) {
777 		/*
778 		 * We have to purge it from there, hopefully this will work
779 		 * :-)
780 		 */
781 		TAILQ_REMOVE(&inp->read_queue, control, next);
782 		control->on_read_q = 0;
783 	}
784 }
785 
786 static int
787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
788     struct sctp_association *asoc,
789     struct sctp_stream_in *strm,
790     struct sctp_queued_to_read *control,
791     uint32_t pd_point,
792     int inp_read_lock_held)
793 {
794 	/*
795 	 * Special handling for the old un-ordered data chunk. All the
796 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
797 	 * to see if we have it all. If you return one, no other control
798 	 * entries on the un-ordered queue will be looked at. In theory
799 	 * there should be no others entries in reality, unless the guy is
800 	 * sending both unordered NDATA and unordered DATA...
801 	 */
802 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
803 	uint32_t fsn;
804 	struct sctp_queued_to_read *nc;
805 	int cnt_added;
806 
807 	if (control->first_frag_seen == 0) {
808 		/* Nothing we can do, we have not seen the first piece yet */
809 		return (1);
810 	}
811 	/* Collapse any we can */
812 	cnt_added = 0;
813 restart:
814 	fsn = control->fsn_included + 1;
815 	/* Now what can we add? */
816 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
817 		if (chk->rec.data.fsn == fsn) {
818 			/* Ok lets add it */
819 			sctp_alloc_a_readq(stcb, nc);
820 			if (nc == NULL) {
821 				break;
822 			}
823 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
824 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
825 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
826 			fsn++;
827 			cnt_added++;
828 			chk = NULL;
829 			if (control->end_added) {
830 				/* We are done */
831 				if (!TAILQ_EMPTY(&control->reasm)) {
832 					/*
833 					 * Ok we have to move anything left
834 					 * on the control queue to a new
835 					 * control.
836 					 */
837 					sctp_build_readq_entry_from_ctl(nc, control);
838 					tchk = TAILQ_FIRST(&control->reasm);
839 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
840 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
841 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
842 							asoc->size_on_reasm_queue -= tchk->send_size;
843 						} else {
844 #ifdef INVARIANTS
845 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
846 #else
847 							asoc->size_on_reasm_queue = 0;
848 #endif
849 						}
850 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
851 						nc->first_frag_seen = 1;
852 						nc->fsn_included = tchk->rec.data.fsn;
853 						nc->data = tchk->data;
854 						nc->sinfo_ppid = tchk->rec.data.ppid;
855 						nc->sinfo_tsn = tchk->rec.data.tsn;
856 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
857 						tchk->data = NULL;
858 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
859 						sctp_setup_tail_pointer(nc);
860 						tchk = TAILQ_FIRST(&control->reasm);
861 					}
862 					/* Spin the rest onto the queue */
863 					while (tchk) {
864 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
865 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
866 						tchk = TAILQ_FIRST(&control->reasm);
867 					}
868 					/*
869 					 * Now lets add it to the queue
870 					 * after removing control
871 					 */
872 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
873 					nc->on_strm_q = SCTP_ON_UNORDERED;
874 					if (control->on_strm_q) {
875 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
876 						control->on_strm_q = 0;
877 					}
878 				}
879 				if (control->pdapi_started) {
880 					strm->pd_api_started = 0;
881 					control->pdapi_started = 0;
882 				}
883 				if (control->on_strm_q) {
884 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 					control->on_strm_q = 0;
886 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
887 				}
888 				if (control->on_read_q == 0) {
889 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
890 					    &stcb->sctp_socket->so_rcv, control->end_added,
891 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
892 				}
893 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
894 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
895 					/*
896 					 * Switch to the new guy and
897 					 * continue
898 					 */
899 					control = nc;
900 					goto restart;
901 				} else {
902 					if (nc->on_strm_q == 0) {
903 						sctp_free_a_readq(stcb, nc);
904 					}
905 				}
906 				return (1);
907 			} else {
908 				sctp_free_a_readq(stcb, nc);
909 			}
910 		} else {
911 			/* Can't add more */
912 			break;
913 		}
914 	}
915 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
916 		strm->pd_api_started = 1;
917 		control->pdapi_started = 1;
918 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
919 		    &stcb->sctp_socket->so_rcv, control->end_added,
920 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
921 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
922 		return (0);
923 	} else {
924 		return (1);
925 	}
926 }
927 
928 static void
929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
930     struct sctp_association *asoc,
931     struct sctp_queued_to_read *control,
932     struct sctp_tmit_chunk *chk,
933     int *abort_flag)
934 {
935 	struct sctp_tmit_chunk *at;
936 	int inserted;
937 
938 	/*
939 	 * Here we need to place the chunk into the control structure sorted
940 	 * in the correct order.
941 	 */
942 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
943 		/* Its the very first one. */
944 		SCTPDBG(SCTP_DEBUG_XXX,
945 		    "chunk is a first fsn: %u becomes fsn_included\n",
946 		    chk->rec.data.fsn);
947 		if (control->first_frag_seen) {
948 			/*
949 			 * In old un-ordered we can reassembly on one
950 			 * control multiple messages. As long as the next
951 			 * FIRST is greater then the old first (TSN i.e. FSN
952 			 * wise)
953 			 */
954 			struct mbuf *tdata;
955 			uint32_t tmp;
956 
957 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
958 				/*
959 				 * Easy way the start of a new guy beyond
960 				 * the lowest
961 				 */
962 				goto place_chunk;
963 			}
964 			if ((chk->rec.data.fsn == control->fsn_included) ||
965 			    (control->pdapi_started)) {
966 				/*
967 				 * Ok this should not happen, if it does we
968 				 * started the pd-api on the higher TSN
969 				 * (since the equals part is a TSN failure
970 				 * it must be that).
971 				 *
972 				 * We are completly hosed in that case since
973 				 * I have no way to recover. This really
974 				 * will only happen if we can get more TSN's
975 				 * higher before the pd-api-point.
976 				 */
977 				sctp_abort_in_reasm(stcb, control, chk,
978 				    abort_flag,
979 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
980 
981 				return;
982 			}
983 			/*
984 			 * Ok we have two firsts and the one we just got is
985 			 * smaller than the one we previously placed.. yuck!
986 			 * We must swap them out.
987 			 */
988 			/* swap the mbufs */
989 			tdata = control->data;
990 			control->data = chk->data;
991 			chk->data = tdata;
992 			/* Save the lengths */
993 			chk->send_size = control->length;
994 			/* Recompute length of control and tail pointer */
995 			sctp_setup_tail_pointer(control);
996 			/* Fix the FSN included */
997 			tmp = control->fsn_included;
998 			control->fsn_included = chk->rec.data.fsn;
999 			chk->rec.data.fsn = tmp;
1000 			/* Fix the TSN included */
1001 			tmp = control->sinfo_tsn;
1002 			control->sinfo_tsn = chk->rec.data.tsn;
1003 			chk->rec.data.tsn = tmp;
1004 			/* Fix the PPID included */
1005 			tmp = control->sinfo_ppid;
1006 			control->sinfo_ppid = chk->rec.data.ppid;
1007 			chk->rec.data.ppid = tmp;
1008 			/* Fix tail pointer */
1009 			goto place_chunk;
1010 		}
1011 		control->first_frag_seen = 1;
1012 		control->fsn_included = chk->rec.data.fsn;
1013 		control->top_fsn = chk->rec.data.fsn;
1014 		control->sinfo_tsn = chk->rec.data.tsn;
1015 		control->sinfo_ppid = chk->rec.data.ppid;
1016 		control->data = chk->data;
1017 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1018 		chk->data = NULL;
1019 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1020 		sctp_setup_tail_pointer(control);
1021 		return;
1022 	}
1023 place_chunk:
1024 	inserted = 0;
1025 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1026 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1027 			/*
1028 			 * This one in queue is bigger than the new one,
1029 			 * insert the new one before at.
1030 			 */
1031 			asoc->size_on_reasm_queue += chk->send_size;
1032 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1033 			inserted = 1;
1034 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1035 			break;
1036 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1037 			/*
1038 			 * They sent a duplicate fsn number. This really
1039 			 * should not happen since the FSN is a TSN and it
1040 			 * should have been dropped earlier.
1041 			 */
1042 			sctp_abort_in_reasm(stcb, control, chk,
1043 			    abort_flag,
1044 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1045 			return;
1046 		}
1047 	}
1048 	if (inserted == 0) {
1049 		/* Its at the end */
1050 		asoc->size_on_reasm_queue += chk->send_size;
1051 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1052 		control->top_fsn = chk->rec.data.fsn;
1053 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1054 	}
1055 }
1056 
1057 static int
1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1059     struct sctp_stream_in *strm, int inp_read_lock_held)
1060 {
1061 	/*
1062 	 * Given a stream, strm, see if any of the SSN's on it that are
1063 	 * fragmented are ready to deliver. If so go ahead and place them on
1064 	 * the read queue. In so placing if we have hit the end, then we
1065 	 * need to remove them from the stream's queue.
1066 	 */
1067 	struct sctp_queued_to_read *control, *nctl = NULL;
1068 	uint32_t next_to_del;
1069 	uint32_t pd_point;
1070 	int ret = 0;
1071 
1072 	if (stcb->sctp_socket) {
1073 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1074 		    stcb->sctp_ep->partial_delivery_point);
1075 	} else {
1076 		pd_point = stcb->sctp_ep->partial_delivery_point;
1077 	}
1078 	control = TAILQ_FIRST(&strm->uno_inqueue);
1079 
1080 	if ((control != NULL) &&
1081 	    (asoc->idata_supported == 0)) {
1082 		/* Special handling needed for "old" data format */
1083 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1084 			goto done_un;
1085 		}
1086 	}
1087 	if (strm->pd_api_started) {
1088 		/* Can't add more */
1089 		return (0);
1090 	}
1091 	while (control) {
1092 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1093 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1094 		nctl = TAILQ_NEXT(control, next_instrm);
1095 		if (control->end_added) {
1096 			/* We just put the last bit on */
1097 			if (control->on_strm_q) {
1098 #ifdef INVARIANTS
1099 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1100 					panic("Huh control: %p on_q: %d -- not unordered?",
1101 					    control, control->on_strm_q);
1102 				}
1103 #endif
1104 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1105 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1106 				control->on_strm_q = 0;
1107 			}
1108 			if (control->on_read_q == 0) {
1109 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1110 				    control,
1111 				    &stcb->sctp_socket->so_rcv, control->end_added,
1112 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1113 			}
1114 		} else {
1115 			/* Can we do a PD-API for this un-ordered guy? */
1116 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1117 				strm->pd_api_started = 1;
1118 				control->pdapi_started = 1;
1119 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1120 				    control,
1121 				    &stcb->sctp_socket->so_rcv, control->end_added,
1122 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1123 
1124 				break;
1125 			}
1126 		}
1127 		control = nctl;
1128 	}
1129 done_un:
1130 	control = TAILQ_FIRST(&strm->inqueue);
1131 	if (strm->pd_api_started) {
1132 		/* Can't add more */
1133 		return (0);
1134 	}
1135 	if (control == NULL) {
1136 		return (ret);
1137 	}
1138 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1139 		/*
1140 		 * Ok the guy at the top was being partially delivered
1141 		 * completed, so we remove it. Note the pd_api flag was
1142 		 * taken off when the chunk was merged on in
1143 		 * sctp_queue_data_for_reasm below.
1144 		 */
1145 		nctl = TAILQ_NEXT(control, next_instrm);
1146 		SCTPDBG(SCTP_DEBUG_XXX,
1147 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1148 		    control, control->end_added, control->mid,
1149 		    control->top_fsn, control->fsn_included,
1150 		    strm->last_mid_delivered);
1151 		if (control->end_added) {
1152 			if (control->on_strm_q) {
1153 #ifdef INVARIANTS
1154 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1155 					panic("Huh control: %p on_q: %d -- not ordered?",
1156 					    control, control->on_strm_q);
1157 				}
1158 #endif
1159 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1160 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1161 				if (asoc->size_on_all_streams >= control->length) {
1162 					asoc->size_on_all_streams -= control->length;
1163 				} else {
1164 #ifdef INVARIANTS
1165 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1166 #else
1167 					asoc->size_on_all_streams = 0;
1168 #endif
1169 				}
1170 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1171 				control->on_strm_q = 0;
1172 			}
1173 			if (strm->pd_api_started && control->pdapi_started) {
1174 				control->pdapi_started = 0;
1175 				strm->pd_api_started = 0;
1176 			}
1177 			if (control->on_read_q == 0) {
1178 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1179 				    control,
1180 				    &stcb->sctp_socket->so_rcv, control->end_added,
1181 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1182 			}
1183 			control = nctl;
1184 		}
1185 	}
1186 	if (strm->pd_api_started) {
1187 		/*
1188 		 * Can't add more must have gotten an un-ordered above being
1189 		 * partially delivered.
1190 		 */
1191 		return (0);
1192 	}
1193 deliver_more:
1194 	next_to_del = strm->last_mid_delivered + 1;
1195 	if (control) {
1196 		SCTPDBG(SCTP_DEBUG_XXX,
1197 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1198 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1199 		    next_to_del);
1200 		nctl = TAILQ_NEXT(control, next_instrm);
1201 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1202 		    (control->first_frag_seen)) {
1203 			int done;
1204 
1205 			/* Ok we can deliver it onto the stream. */
1206 			if (control->end_added) {
1207 				/* We are done with it afterwards */
1208 				if (control->on_strm_q) {
1209 #ifdef INVARIANTS
1210 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1211 						panic("Huh control: %p on_q: %d -- not ordered?",
1212 						    control, control->on_strm_q);
1213 					}
1214 #endif
1215 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1216 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1217 					if (asoc->size_on_all_streams >= control->length) {
1218 						asoc->size_on_all_streams -= control->length;
1219 					} else {
1220 #ifdef INVARIANTS
1221 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1222 #else
1223 						asoc->size_on_all_streams = 0;
1224 #endif
1225 					}
1226 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1227 					control->on_strm_q = 0;
1228 				}
1229 				ret++;
1230 			}
1231 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1232 				/*
1233 				 * A singleton now slipping through - mark
1234 				 * it non-revokable too
1235 				 */
1236 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1237 			} else if (control->end_added == 0) {
1238 				/*
1239 				 * Check if we can defer adding until its
1240 				 * all there
1241 				 */
1242 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1243 					/*
1244 					 * Don't need it or cannot add more
1245 					 * (one being delivered that way)
1246 					 */
1247 					goto out;
1248 				}
1249 			}
1250 			done = (control->end_added) && (control->last_frag_seen);
1251 			if (control->on_read_q == 0) {
1252 				if (!done) {
1253 					if (asoc->size_on_all_streams >= control->length) {
1254 						asoc->size_on_all_streams -= control->length;
1255 					} else {
1256 #ifdef INVARIANTS
1257 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1258 #else
1259 						asoc->size_on_all_streams = 0;
1260 #endif
1261 					}
1262 					strm->pd_api_started = 1;
1263 					control->pdapi_started = 1;
1264 				}
1265 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1266 				    control,
1267 				    &stcb->sctp_socket->so_rcv, control->end_added,
1268 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1269 			}
1270 			strm->last_mid_delivered = next_to_del;
1271 			if (done) {
1272 				control = nctl;
1273 				goto deliver_more;
1274 			}
1275 		}
1276 	}
1277 out:
1278 	return (ret);
1279 }
1280 
1281 
1282 uint32_t
1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1284     struct sctp_stream_in *strm,
1285     struct sctp_tcb *stcb, struct sctp_association *asoc,
1286     struct sctp_tmit_chunk *chk, int hold_rlock)
1287 {
1288 	/*
1289 	 * Given a control and a chunk, merge the data from the chk onto the
1290 	 * control and free up the chunk resources.
1291 	 */
1292 	uint32_t added = 0;
1293 	int i_locked = 0;
1294 
1295 	if (control->on_read_q && (hold_rlock == 0)) {
1296 		/*
1297 		 * Its being pd-api'd so we must do some locks.
1298 		 */
1299 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1300 		i_locked = 1;
1301 	}
1302 	if (control->data == NULL) {
1303 		control->data = chk->data;
1304 		sctp_setup_tail_pointer(control);
1305 	} else {
1306 		sctp_add_to_tail_pointer(control, chk->data, &added);
1307 	}
1308 	control->fsn_included = chk->rec.data.fsn;
1309 	asoc->size_on_reasm_queue -= chk->send_size;
1310 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1311 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1312 	chk->data = NULL;
1313 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1314 		control->first_frag_seen = 1;
1315 		control->sinfo_tsn = chk->rec.data.tsn;
1316 		control->sinfo_ppid = chk->rec.data.ppid;
1317 	}
1318 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1319 		/* Its complete */
1320 		if ((control->on_strm_q) && (control->on_read_q)) {
1321 			if (control->pdapi_started) {
1322 				control->pdapi_started = 0;
1323 				strm->pd_api_started = 0;
1324 			}
1325 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1326 				/* Unordered */
1327 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1328 				control->on_strm_q = 0;
1329 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1330 				/* Ordered */
1331 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1332 				/*
1333 				 * Don't need to decrement
1334 				 * size_on_all_streams, since control is on
1335 				 * the read queue.
1336 				 */
1337 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1338 				control->on_strm_q = 0;
1339 #ifdef INVARIANTS
1340 			} else if (control->on_strm_q) {
1341 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1342 				    control->on_strm_q);
1343 #endif
1344 			}
1345 		}
1346 		control->end_added = 1;
1347 		control->last_frag_seen = 1;
1348 	}
1349 	if (i_locked) {
1350 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1351 	}
1352 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1353 	return (added);
1354 }
1355 
1356 /*
1357  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1358  * queue, see if anthing can be delivered. If so pull it off (or as much as
1359  * we can. If we run out of space then we must dump what we can and set the
1360  * appropriate flag to say we queued what we could.
1361  */
1362 static void
1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1364     struct sctp_queued_to_read *control,
1365     struct sctp_tmit_chunk *chk,
1366     int created_control,
1367     int *abort_flag, uint32_t tsn)
1368 {
1369 	uint32_t next_fsn;
1370 	struct sctp_tmit_chunk *at, *nat;
1371 	struct sctp_stream_in *strm;
1372 	int do_wakeup, unordered;
1373 	uint32_t lenadded;
1374 
1375 	strm = &asoc->strmin[control->sinfo_stream];
1376 	/*
1377 	 * For old un-ordered data chunks.
1378 	 */
1379 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1380 		unordered = 1;
1381 	} else {
1382 		unordered = 0;
1383 	}
1384 	/* Must be added to the stream-in queue */
1385 	if (created_control) {
1386 		if (unordered == 0) {
1387 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1388 		}
1389 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1390 			/* Duplicate SSN? */
1391 			sctp_abort_in_reasm(stcb, control, chk,
1392 			    abort_flag,
1393 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1394 			sctp_clean_up_control(stcb, control);
1395 			return;
1396 		}
1397 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1398 			/*
1399 			 * Ok we created this control and now lets validate
1400 			 * that its legal i.e. there is a B bit set, if not
1401 			 * and we have up to the cum-ack then its invalid.
1402 			 */
1403 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1404 				sctp_abort_in_reasm(stcb, control, chk,
1405 				    abort_flag,
1406 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1407 				return;
1408 			}
1409 		}
1410 	}
1411 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1412 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1413 		return;
1414 	}
1415 	/*
1416 	 * Ok we must queue the chunk into the reasembly portion: o if its
1417 	 * the first it goes to the control mbuf. o if its not first but the
1418 	 * next in sequence it goes to the control, and each succeeding one
1419 	 * in order also goes. o if its not in order we place it on the list
1420 	 * in its place.
1421 	 */
1422 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1423 		/* Its the very first one. */
1424 		SCTPDBG(SCTP_DEBUG_XXX,
1425 		    "chunk is a first fsn: %u becomes fsn_included\n",
1426 		    chk->rec.data.fsn);
1427 		if (control->first_frag_seen) {
1428 			/*
1429 			 * Error on senders part, they either sent us two
1430 			 * data chunks with FIRST, or they sent two
1431 			 * un-ordered chunks that were fragmented at the
1432 			 * same time in the same stream.
1433 			 */
1434 			sctp_abort_in_reasm(stcb, control, chk,
1435 			    abort_flag,
1436 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1437 			return;
1438 		}
1439 		control->first_frag_seen = 1;
1440 		control->sinfo_ppid = chk->rec.data.ppid;
1441 		control->sinfo_tsn = chk->rec.data.tsn;
1442 		control->fsn_included = chk->rec.data.fsn;
1443 		control->data = chk->data;
1444 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1445 		chk->data = NULL;
1446 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1447 		sctp_setup_tail_pointer(control);
1448 		asoc->size_on_all_streams += control->length;
1449 	} else {
1450 		/* Place the chunk in our list */
1451 		int inserted = 0;
1452 
1453 		if (control->last_frag_seen == 0) {
1454 			/* Still willing to raise highest FSN seen */
1455 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1456 				SCTPDBG(SCTP_DEBUG_XXX,
1457 				    "We have a new top_fsn: %u\n",
1458 				    chk->rec.data.fsn);
1459 				control->top_fsn = chk->rec.data.fsn;
1460 			}
1461 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1462 				SCTPDBG(SCTP_DEBUG_XXX,
1463 				    "The last fsn is now in place fsn: %u\n",
1464 				    chk->rec.data.fsn);
1465 				control->last_frag_seen = 1;
1466 			}
1467 			if (asoc->idata_supported || control->first_frag_seen) {
1468 				/*
1469 				 * For IDATA we always check since we know
1470 				 * that the first fragment is 0. For old
1471 				 * DATA we have to receive the first before
1472 				 * we know the first FSN (which is the TSN).
1473 				 */
1474 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1475 					/*
1476 					 * We have already delivered up to
1477 					 * this so its a dup
1478 					 */
1479 					sctp_abort_in_reasm(stcb, control, chk,
1480 					    abort_flag,
1481 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1482 					return;
1483 				}
1484 			}
1485 		} else {
1486 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 				/* Second last? huh? */
1488 				SCTPDBG(SCTP_DEBUG_XXX,
1489 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1490 				    chk->rec.data.fsn, control->top_fsn);
1491 				sctp_abort_in_reasm(stcb, control,
1492 				    chk, abort_flag,
1493 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1494 				return;
1495 			}
1496 			if (asoc->idata_supported || control->first_frag_seen) {
1497 				/*
1498 				 * For IDATA we always check since we know
1499 				 * that the first fragment is 0. For old
1500 				 * DATA we have to receive the first before
1501 				 * we know the first FSN (which is the TSN).
1502 				 */
1503 
1504 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1505 					/*
1506 					 * We have already delivered up to
1507 					 * this so its a dup
1508 					 */
1509 					SCTPDBG(SCTP_DEBUG_XXX,
1510 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1511 					    chk->rec.data.fsn, control->fsn_included);
1512 					sctp_abort_in_reasm(stcb, control, chk,
1513 					    abort_flag,
1514 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1515 					return;
1516 				}
1517 			}
1518 			/*
1519 			 * validate not beyond top FSN if we have seen last
1520 			 * one
1521 			 */
1522 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1523 				SCTPDBG(SCTP_DEBUG_XXX,
1524 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1525 				    chk->rec.data.fsn,
1526 				    control->top_fsn);
1527 				sctp_abort_in_reasm(stcb, control, chk,
1528 				    abort_flag,
1529 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1530 				return;
1531 			}
1532 		}
1533 		/*
1534 		 * If we reach here, we need to place the new chunk in the
1535 		 * reassembly for this control.
1536 		 */
1537 		SCTPDBG(SCTP_DEBUG_XXX,
1538 		    "chunk is a not first fsn: %u needs to be inserted\n",
1539 		    chk->rec.data.fsn);
1540 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1541 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1542 				/*
1543 				 * This one in queue is bigger than the new
1544 				 * one, insert the new one before at.
1545 				 */
1546 				SCTPDBG(SCTP_DEBUG_XXX,
1547 				    "Insert it before fsn: %u\n",
1548 				    at->rec.data.fsn);
1549 				asoc->size_on_reasm_queue += chk->send_size;
1550 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1551 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1552 				inserted = 1;
1553 				break;
1554 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1555 				/*
1556 				 * Gak, He sent me a duplicate str seq
1557 				 * number
1558 				 */
1559 				/*
1560 				 * foo bar, I guess I will just free this
1561 				 * new guy, should we abort too? FIX ME
1562 				 * MAYBE? Or it COULD be that the SSN's have
1563 				 * wrapped. Maybe I should compare to TSN
1564 				 * somehow... sigh for now just blow away
1565 				 * the chunk!
1566 				 */
1567 				SCTPDBG(SCTP_DEBUG_XXX,
1568 				    "Duplicate to fsn: %u -- abort\n",
1569 				    at->rec.data.fsn);
1570 				sctp_abort_in_reasm(stcb, control,
1571 				    chk, abort_flag,
1572 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1573 				return;
1574 			}
1575 		}
1576 		if (inserted == 0) {
1577 			/* Goes on the end */
1578 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1579 			    chk->rec.data.fsn);
1580 			asoc->size_on_reasm_queue += chk->send_size;
1581 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1582 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1583 		}
1584 	}
1585 	/*
1586 	 * Ok lets see if we can suck any up into the control structure that
1587 	 * are in seq if it makes sense.
1588 	 */
1589 	do_wakeup = 0;
1590 	/*
1591 	 * If the first fragment has not been seen there is no sense in
1592 	 * looking.
1593 	 */
1594 	if (control->first_frag_seen) {
1595 		next_fsn = control->fsn_included + 1;
1596 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1597 			if (at->rec.data.fsn == next_fsn) {
1598 				/* We can add this one now to the control */
1599 				SCTPDBG(SCTP_DEBUG_XXX,
1600 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1601 				    control, at,
1602 				    at->rec.data.fsn,
1603 				    next_fsn, control->fsn_included);
1604 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1605 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1606 				if (control->on_read_q) {
1607 					do_wakeup = 1;
1608 				} else {
1609 					/*
1610 					 * We only add to the
1611 					 * size-on-all-streams if its not on
1612 					 * the read q. The read q flag will
1613 					 * cause a sballoc so its accounted
1614 					 * for there.
1615 					 */
1616 					asoc->size_on_all_streams += lenadded;
1617 				}
1618 				next_fsn++;
1619 				if (control->end_added && control->pdapi_started) {
1620 					if (strm->pd_api_started) {
1621 						strm->pd_api_started = 0;
1622 						control->pdapi_started = 0;
1623 					}
1624 					if (control->on_read_q == 0) {
1625 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1626 						    control,
1627 						    &stcb->sctp_socket->so_rcv, control->end_added,
1628 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1629 					}
1630 					break;
1631 				}
1632 			} else {
1633 				break;
1634 			}
1635 		}
1636 	}
1637 	if (do_wakeup) {
1638 		/* Need to wakeup the reader */
1639 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1640 	}
1641 }
1642 
1643 static struct sctp_queued_to_read *
1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1645 {
1646 	struct sctp_queued_to_read *control;
1647 
1648 	if (ordered) {
1649 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1650 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1651 				break;
1652 			}
1653 		}
1654 	} else {
1655 		if (idata_supported) {
1656 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1657 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1658 					break;
1659 				}
1660 			}
1661 		} else {
1662 			control = TAILQ_FIRST(&strm->uno_inqueue);
1663 		}
1664 	}
1665 	return (control);
1666 }
1667 
1668 static int
1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1670     struct mbuf **m, int offset, int chk_length,
1671     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1672     int *break_flag, int last_chunk, uint8_t chk_type)
1673 {
1674 	/* Process a data chunk */
1675 	struct sctp_tmit_chunk *chk = NULL;
1676 	uint32_t tsn, fsn, gap, mid;
1677 	struct mbuf *dmbuf;
1678 	int the_len;
1679 	int need_reasm_check = 0;
1680 	uint16_t sid;
1681 	struct mbuf *op_err;
1682 	char msg[SCTP_DIAG_INFO_LEN];
1683 	struct sctp_queued_to_read *control, *ncontrol;
1684 	uint32_t ppid;
1685 	uint8_t chk_flags;
1686 	struct sctp_stream_reset_list *liste;
1687 	int ordered;
1688 	size_t clen;
1689 	int created_control = 0;
1690 
1691 	if (chk_type == SCTP_IDATA) {
1692 		struct sctp_idata_chunk *chunk, chunk_buf;
1693 
1694 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1695 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1696 		chk_flags = chunk->ch.chunk_flags;
1697 		clen = sizeof(struct sctp_idata_chunk);
1698 		tsn = ntohl(chunk->dp.tsn);
1699 		sid = ntohs(chunk->dp.sid);
1700 		mid = ntohl(chunk->dp.mid);
1701 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1702 			fsn = 0;
1703 			ppid = chunk->dp.ppid_fsn.ppid;
1704 		} else {
1705 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1706 			ppid = 0xffffffff;	/* Use as an invalid value. */
1707 		}
1708 	} else {
1709 		struct sctp_data_chunk *chunk, chunk_buf;
1710 
1711 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1712 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1713 		chk_flags = chunk->ch.chunk_flags;
1714 		clen = sizeof(struct sctp_data_chunk);
1715 		tsn = ntohl(chunk->dp.tsn);
1716 		sid = ntohs(chunk->dp.sid);
1717 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1718 		fsn = tsn;
1719 		ppid = chunk->dp.ppid;
1720 	}
1721 	if ((size_t)chk_length == clen) {
1722 		/*
1723 		 * Need to send an abort since we had a empty data chunk.
1724 		 */
1725 		op_err = sctp_generate_no_user_data_cause(tsn);
1726 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1727 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1728 		*abort_flag = 1;
1729 		return (0);
1730 	}
1731 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1732 		asoc->send_sack = 1;
1733 	}
1734 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1735 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1736 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1737 	}
1738 	if (stcb == NULL) {
1739 		return (0);
1740 	}
1741 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1742 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1743 		/* It is a duplicate */
1744 		SCTP_STAT_INCR(sctps_recvdupdata);
1745 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1746 			/* Record a dup for the next outbound sack */
1747 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1748 			asoc->numduptsns++;
1749 		}
1750 		asoc->send_sack = 1;
1751 		return (0);
1752 	}
1753 	/* Calculate the number of TSN's between the base and this TSN */
1754 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1755 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1756 		/* Can't hold the bit in the mapping at max array, toss it */
1757 		return (0);
1758 	}
1759 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1760 		SCTP_TCB_LOCK_ASSERT(stcb);
1761 		if (sctp_expand_mapping_array(asoc, gap)) {
1762 			/* Can't expand, drop it */
1763 			return (0);
1764 		}
1765 	}
1766 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1767 		*high_tsn = tsn;
1768 	}
1769 	/* See if we have received this one already */
1770 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1771 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1772 		SCTP_STAT_INCR(sctps_recvdupdata);
1773 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1774 			/* Record a dup for the next outbound sack */
1775 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1776 			asoc->numduptsns++;
1777 		}
1778 		asoc->send_sack = 1;
1779 		return (0);
1780 	}
1781 	/*
1782 	 * Check to see about the GONE flag, duplicates would cause a sack
1783 	 * to be sent up above
1784 	 */
1785 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1786 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1787 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1788 		/*
1789 		 * wait a minute, this guy is gone, there is no longer a
1790 		 * receiver. Send peer an ABORT!
1791 		 */
1792 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1793 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1794 		*abort_flag = 1;
1795 		return (0);
1796 	}
1797 	/*
1798 	 * Now before going further we see if there is room. If NOT then we
1799 	 * MAY let one through only IF this TSN is the one we are waiting
1800 	 * for on a partial delivery API.
1801 	 */
1802 
1803 	/* Is the stream valid? */
1804 	if (sid >= asoc->streamincnt) {
1805 		struct sctp_error_invalid_stream *cause;
1806 
1807 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1808 		    0, M_NOWAIT, 1, MT_DATA);
1809 		if (op_err != NULL) {
1810 			/* add some space up front so prepend will work well */
1811 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1812 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1813 			/*
1814 			 * Error causes are just param's and this one has
1815 			 * two back to back phdr, one with the error type
1816 			 * and size, the other with the streamid and a rsvd
1817 			 */
1818 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1819 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1820 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1821 			cause->stream_id = htons(sid);
1822 			cause->reserved = htons(0);
1823 			sctp_queue_op_err(stcb, op_err);
1824 		}
1825 		SCTP_STAT_INCR(sctps_badsid);
1826 		SCTP_TCB_LOCK_ASSERT(stcb);
1827 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1828 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1829 			asoc->highest_tsn_inside_nr_map = tsn;
1830 		}
1831 		if (tsn == (asoc->cumulative_tsn + 1)) {
1832 			/* Update cum-ack */
1833 			asoc->cumulative_tsn = tsn;
1834 		}
1835 		return (0);
1836 	}
1837 	/*
1838 	 * If its a fragmented message, lets see if we can find the control
1839 	 * on the reassembly queues.
1840 	 */
1841 	if ((chk_type == SCTP_IDATA) &&
1842 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1843 	    (fsn == 0)) {
1844 		/*
1845 		 * The first *must* be fsn 0, and other (middle/end) pieces
1846 		 * can *not* be fsn 0. XXX: This can happen in case of a
1847 		 * wrap around. Ignore is for now.
1848 		 */
1849 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1850 		    mid, chk_flags);
1851 		goto err_out;
1852 	}
1853 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1854 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1855 	    chk_flags, control);
1856 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1857 		/* See if we can find the re-assembly entity */
1858 		if (control != NULL) {
1859 			/* We found something, does it belong? */
1860 			if (ordered && (mid != control->mid)) {
1861 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1862 		err_out:
1863 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1864 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1865 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1866 				*abort_flag = 1;
1867 				return (0);
1868 			}
1869 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1870 				/*
1871 				 * We can't have a switched order with an
1872 				 * unordered chunk
1873 				 */
1874 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1875 				    tsn);
1876 				goto err_out;
1877 			}
1878 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1879 				/*
1880 				 * We can't have a switched unordered with a
1881 				 * ordered chunk
1882 				 */
1883 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1884 				    tsn);
1885 				goto err_out;
1886 			}
1887 		}
1888 	} else {
1889 		/*
1890 		 * Its a complete segment. Lets validate we don't have a
1891 		 * re-assembly going on with the same Stream/Seq (for
1892 		 * ordered) or in the same Stream for unordered.
1893 		 */
1894 		if (control != NULL) {
1895 			if (ordered || asoc->idata_supported) {
1896 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1897 				    chk_flags, mid);
1898 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1899 				goto err_out;
1900 			} else {
1901 				if ((tsn == control->fsn_included + 1) &&
1902 				    (control->end_added == 0)) {
1903 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1904 					goto err_out;
1905 				} else {
1906 					control = NULL;
1907 				}
1908 			}
1909 		}
1910 	}
1911 	/* now do the tests */
1912 	if (((asoc->cnt_on_all_streams +
1913 	    asoc->cnt_on_reasm_queue +
1914 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1915 	    (((int)asoc->my_rwnd) <= 0)) {
1916 		/*
1917 		 * When we have NO room in the rwnd we check to make sure
1918 		 * the reader is doing its job...
1919 		 */
1920 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1921 			/* some to read, wake-up */
1922 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1923 			struct socket *so;
1924 
1925 			so = SCTP_INP_SO(stcb->sctp_ep);
1926 			atomic_add_int(&stcb->asoc.refcnt, 1);
1927 			SCTP_TCB_UNLOCK(stcb);
1928 			SCTP_SOCKET_LOCK(so, 1);
1929 			SCTP_TCB_LOCK(stcb);
1930 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1931 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1932 				/* assoc was freed while we were unlocked */
1933 				SCTP_SOCKET_UNLOCK(so, 1);
1934 				return (0);
1935 			}
1936 #endif
1937 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1938 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1939 			SCTP_SOCKET_UNLOCK(so, 1);
1940 #endif
1941 		}
1942 		/* now is it in the mapping array of what we have accepted? */
1943 		if (chk_type == SCTP_DATA) {
1944 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1945 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1946 				/* Nope not in the valid range dump it */
1947 		dump_packet:
1948 				sctp_set_rwnd(stcb, asoc);
1949 				if ((asoc->cnt_on_all_streams +
1950 				    asoc->cnt_on_reasm_queue +
1951 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1952 					SCTP_STAT_INCR(sctps_datadropchklmt);
1953 				} else {
1954 					SCTP_STAT_INCR(sctps_datadroprwnd);
1955 				}
1956 				*break_flag = 1;
1957 				return (0);
1958 			}
1959 		} else {
1960 			if (control == NULL) {
1961 				goto dump_packet;
1962 			}
1963 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1964 				goto dump_packet;
1965 			}
1966 		}
1967 	}
1968 #ifdef SCTP_ASOCLOG_OF_TSNS
1969 	SCTP_TCB_LOCK_ASSERT(stcb);
1970 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1971 		asoc->tsn_in_at = 0;
1972 		asoc->tsn_in_wrapped = 1;
1973 	}
1974 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1975 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1976 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1977 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1978 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1979 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1980 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1981 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1982 	asoc->tsn_in_at++;
1983 #endif
1984 	/*
1985 	 * Before we continue lets validate that we are not being fooled by
1986 	 * an evil attacker. We can only have Nk chunks based on our TSN
1987 	 * spread allowed by the mapping array N * 8 bits, so there is no
1988 	 * way our stream sequence numbers could have wrapped. We of course
1989 	 * only validate the FIRST fragment so the bit must be set.
1990 	 */
1991 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1992 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1993 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1994 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1995 		/* The incoming sseq is behind where we last delivered? */
1996 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1997 		    mid, asoc->strmin[sid].last_mid_delivered);
1998 
1999 		if (asoc->idata_supported) {
2000 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2001 			    asoc->strmin[sid].last_mid_delivered,
2002 			    tsn,
2003 			    sid,
2004 			    mid);
2005 		} else {
2006 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2007 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2008 			    tsn,
2009 			    sid,
2010 			    (uint16_t)mid);
2011 		}
2012 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2013 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2014 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2015 		*abort_flag = 1;
2016 		return (0);
2017 	}
2018 	if (chk_type == SCTP_IDATA) {
2019 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2020 	} else {
2021 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2022 	}
2023 	if (last_chunk == 0) {
2024 		if (chk_type == SCTP_IDATA) {
2025 			dmbuf = SCTP_M_COPYM(*m,
2026 			    (offset + sizeof(struct sctp_idata_chunk)),
2027 			    the_len, M_NOWAIT);
2028 		} else {
2029 			dmbuf = SCTP_M_COPYM(*m,
2030 			    (offset + sizeof(struct sctp_data_chunk)),
2031 			    the_len, M_NOWAIT);
2032 		}
2033 #ifdef SCTP_MBUF_LOGGING
2034 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2035 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2036 		}
2037 #endif
2038 	} else {
2039 		/* We can steal the last chunk */
2040 		int l_len;
2041 
2042 		dmbuf = *m;
2043 		/* lop off the top part */
2044 		if (chk_type == SCTP_IDATA) {
2045 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2046 		} else {
2047 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2048 		}
2049 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2050 			l_len = SCTP_BUF_LEN(dmbuf);
2051 		} else {
2052 			/*
2053 			 * need to count up the size hopefully does not hit
2054 			 * this to often :-0
2055 			 */
2056 			struct mbuf *lat;
2057 
2058 			l_len = 0;
2059 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2060 				l_len += SCTP_BUF_LEN(lat);
2061 			}
2062 		}
2063 		if (l_len > the_len) {
2064 			/* Trim the end round bytes off  too */
2065 			m_adj(dmbuf, -(l_len - the_len));
2066 		}
2067 	}
2068 	if (dmbuf == NULL) {
2069 		SCTP_STAT_INCR(sctps_nomem);
2070 		return (0);
2071 	}
2072 	/*
2073 	 * Now no matter what, we need a control, get one if we don't have
2074 	 * one (we may have gotten it above when we found the message was
2075 	 * fragmented
2076 	 */
2077 	if (control == NULL) {
2078 		sctp_alloc_a_readq(stcb, control);
2079 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2080 		    ppid,
2081 		    sid,
2082 		    chk_flags,
2083 		    NULL, fsn, mid);
2084 		if (control == NULL) {
2085 			SCTP_STAT_INCR(sctps_nomem);
2086 			return (0);
2087 		}
2088 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2089 			struct mbuf *mm;
2090 
2091 			control->data = dmbuf;
2092 			for (mm = control->data; mm; mm = mm->m_next) {
2093 				control->length += SCTP_BUF_LEN(mm);
2094 			}
2095 			control->tail_mbuf = NULL;
2096 			control->end_added = 1;
2097 			control->last_frag_seen = 1;
2098 			control->first_frag_seen = 1;
2099 			control->fsn_included = fsn;
2100 			control->top_fsn = fsn;
2101 		}
2102 		created_control = 1;
2103 	}
2104 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2105 	    chk_flags, ordered, mid, control);
2106 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2107 	    TAILQ_EMPTY(&asoc->resetHead) &&
2108 	    ((ordered == 0) ||
2109 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2110 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2111 		/* Candidate for express delivery */
2112 		/*
2113 		 * Its not fragmented, No PD-API is up, Nothing in the
2114 		 * delivery queue, Its un-ordered OR ordered and the next to
2115 		 * deliver AND nothing else is stuck on the stream queue,
2116 		 * And there is room for it in the socket buffer. Lets just
2117 		 * stuff it up the buffer....
2118 		 */
2119 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2120 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2121 			asoc->highest_tsn_inside_nr_map = tsn;
2122 		}
2123 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2124 		    control, mid);
2125 
2126 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2127 		    control, &stcb->sctp_socket->so_rcv,
2128 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2129 
2130 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2131 			/* for ordered, bump what we delivered */
2132 			asoc->strmin[sid].last_mid_delivered++;
2133 		}
2134 		SCTP_STAT_INCR(sctps_recvexpress);
2135 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2136 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2137 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2138 		}
2139 		control = NULL;
2140 		goto finish_express_del;
2141 	}
2142 	/* Now will we need a chunk too? */
2143 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2144 		sctp_alloc_a_chunk(stcb, chk);
2145 		if (chk == NULL) {
2146 			/* No memory so we drop the chunk */
2147 			SCTP_STAT_INCR(sctps_nomem);
2148 			if (last_chunk == 0) {
2149 				/* we copied it, free the copy */
2150 				sctp_m_freem(dmbuf);
2151 			}
2152 			return (0);
2153 		}
2154 		chk->rec.data.tsn = tsn;
2155 		chk->no_fr_allowed = 0;
2156 		chk->rec.data.fsn = fsn;
2157 		chk->rec.data.mid = mid;
2158 		chk->rec.data.sid = sid;
2159 		chk->rec.data.ppid = ppid;
2160 		chk->rec.data.context = stcb->asoc.context;
2161 		chk->rec.data.doing_fast_retransmit = 0;
2162 		chk->rec.data.rcv_flags = chk_flags;
2163 		chk->asoc = asoc;
2164 		chk->send_size = the_len;
2165 		chk->whoTo = net;
2166 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2167 		    chk,
2168 		    control, mid);
2169 		atomic_add_int(&net->ref_count, 1);
2170 		chk->data = dmbuf;
2171 	}
2172 	/* Set the appropriate TSN mark */
2173 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2174 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2175 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2176 			asoc->highest_tsn_inside_nr_map = tsn;
2177 		}
2178 	} else {
2179 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2180 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2181 			asoc->highest_tsn_inside_map = tsn;
2182 		}
2183 	}
2184 	/* Now is it complete (i.e. not fragmented)? */
2185 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2186 		/*
2187 		 * Special check for when streams are resetting. We could be
2188 		 * more smart about this and check the actual stream to see
2189 		 * if it is not being reset.. that way we would not create a
2190 		 * HOLB when amongst streams being reset and those not being
2191 		 * reset.
2192 		 *
2193 		 */
2194 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2195 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2196 			/*
2197 			 * yep its past where we need to reset... go ahead
2198 			 * and queue it.
2199 			 */
2200 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2201 				/* first one on */
2202 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2203 			} else {
2204 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2205 				unsigned char inserted = 0;
2206 
2207 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2208 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2209 
2210 						continue;
2211 					} else {
2212 						/* found it */
2213 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2214 						inserted = 1;
2215 						break;
2216 					}
2217 				}
2218 				if (inserted == 0) {
2219 					/*
2220 					 * must be put at end, use prevP
2221 					 * (all setup from loop) to setup
2222 					 * nextP.
2223 					 */
2224 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2225 				}
2226 			}
2227 			goto finish_express_del;
2228 		}
2229 		if (chk_flags & SCTP_DATA_UNORDERED) {
2230 			/* queue directly into socket buffer */
2231 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2232 			    control, mid);
2233 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2234 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2235 			    control,
2236 			    &stcb->sctp_socket->so_rcv, 1,
2237 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2238 
2239 		} else {
2240 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2241 			    mid);
2242 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2243 			if (*abort_flag) {
2244 				if (last_chunk) {
2245 					*m = NULL;
2246 				}
2247 				return (0);
2248 			}
2249 		}
2250 		goto finish_express_del;
2251 	}
2252 	/* If we reach here its a reassembly */
2253 	need_reasm_check = 1;
2254 	SCTPDBG(SCTP_DEBUG_XXX,
2255 	    "Queue data to stream for reasm control: %p MID: %u\n",
2256 	    control, mid);
2257 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2258 	if (*abort_flag) {
2259 		/*
2260 		 * the assoc is now gone and chk was put onto the reasm
2261 		 * queue, which has all been freed.
2262 		 */
2263 		if (last_chunk) {
2264 			*m = NULL;
2265 		}
2266 		return (0);
2267 	}
2268 finish_express_del:
2269 	/* Here we tidy up things */
2270 	if (tsn == (asoc->cumulative_tsn + 1)) {
2271 		/* Update cum-ack */
2272 		asoc->cumulative_tsn = tsn;
2273 	}
2274 	if (last_chunk) {
2275 		*m = NULL;
2276 	}
2277 	if (ordered) {
2278 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2279 	} else {
2280 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2281 	}
2282 	SCTP_STAT_INCR(sctps_recvdata);
2283 	/* Set it present please */
2284 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2285 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2286 	}
2287 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2288 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2289 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2290 	}
2291 	if (need_reasm_check) {
2292 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2293 		need_reasm_check = 0;
2294 	}
2295 	/* check the special flag for stream resets */
2296 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2297 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2298 		/*
2299 		 * we have finished working through the backlogged TSN's now
2300 		 * time to reset streams. 1: call reset function. 2: free
2301 		 * pending_reply space 3: distribute any chunks in
2302 		 * pending_reply_queue.
2303 		 */
2304 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2305 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2306 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2307 		SCTP_FREE(liste, SCTP_M_STRESET);
2308 		/* sa_ignore FREED_MEMORY */
2309 		liste = TAILQ_FIRST(&asoc->resetHead);
2310 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2311 			/* All can be removed */
2312 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2313 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2314 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2315 				if (*abort_flag) {
2316 					return (0);
2317 				}
2318 				if (need_reasm_check) {
2319 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2320 					need_reasm_check = 0;
2321 				}
2322 			}
2323 		} else {
2324 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2325 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2326 					break;
2327 				}
2328 				/*
2329 				 * if control->sinfo_tsn is <= liste->tsn we
2330 				 * can process it which is the NOT of
2331 				 * control->sinfo_tsn > liste->tsn
2332 				 */
2333 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2334 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2335 				if (*abort_flag) {
2336 					return (0);
2337 				}
2338 				if (need_reasm_check) {
2339 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2340 					need_reasm_check = 0;
2341 				}
2342 			}
2343 		}
2344 	}
2345 	return (1);
2346 }
2347 
2348 static const int8_t sctp_map_lookup_tab[256] = {
2349 	0, 1, 0, 2, 0, 1, 0, 3,
2350 	0, 1, 0, 2, 0, 1, 0, 4,
2351 	0, 1, 0, 2, 0, 1, 0, 3,
2352 	0, 1, 0, 2, 0, 1, 0, 5,
2353 	0, 1, 0, 2, 0, 1, 0, 3,
2354 	0, 1, 0, 2, 0, 1, 0, 4,
2355 	0, 1, 0, 2, 0, 1, 0, 3,
2356 	0, 1, 0, 2, 0, 1, 0, 6,
2357 	0, 1, 0, 2, 0, 1, 0, 3,
2358 	0, 1, 0, 2, 0, 1, 0, 4,
2359 	0, 1, 0, 2, 0, 1, 0, 3,
2360 	0, 1, 0, 2, 0, 1, 0, 5,
2361 	0, 1, 0, 2, 0, 1, 0, 3,
2362 	0, 1, 0, 2, 0, 1, 0, 4,
2363 	0, 1, 0, 2, 0, 1, 0, 3,
2364 	0, 1, 0, 2, 0, 1, 0, 7,
2365 	0, 1, 0, 2, 0, 1, 0, 3,
2366 	0, 1, 0, 2, 0, 1, 0, 4,
2367 	0, 1, 0, 2, 0, 1, 0, 3,
2368 	0, 1, 0, 2, 0, 1, 0, 5,
2369 	0, 1, 0, 2, 0, 1, 0, 3,
2370 	0, 1, 0, 2, 0, 1, 0, 4,
2371 	0, 1, 0, 2, 0, 1, 0, 3,
2372 	0, 1, 0, 2, 0, 1, 0, 6,
2373 	0, 1, 0, 2, 0, 1, 0, 3,
2374 	0, 1, 0, 2, 0, 1, 0, 4,
2375 	0, 1, 0, 2, 0, 1, 0, 3,
2376 	0, 1, 0, 2, 0, 1, 0, 5,
2377 	0, 1, 0, 2, 0, 1, 0, 3,
2378 	0, 1, 0, 2, 0, 1, 0, 4,
2379 	0, 1, 0, 2, 0, 1, 0, 3,
2380 	0, 1, 0, 2, 0, 1, 0, 8
2381 };
2382 
2383 
2384 void
2385 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2386 {
2387 	/*
2388 	 * Now we also need to check the mapping array in a couple of ways.
2389 	 * 1) Did we move the cum-ack point?
2390 	 *
2391 	 * When you first glance at this you might think that all entries
2392 	 * that make up the position of the cum-ack would be in the
2393 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2394 	 * deliverable. Thats true with one exception, when its a fragmented
2395 	 * message we may not deliver the data until some threshold (or all
2396 	 * of it) is in place. So we must OR the nr_mapping_array and
2397 	 * mapping_array to get a true picture of the cum-ack.
2398 	 */
2399 	struct sctp_association *asoc;
2400 	int at;
2401 	uint8_t val;
2402 	int slide_from, slide_end, lgap, distance;
2403 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2404 
2405 	asoc = &stcb->asoc;
2406 
2407 	old_cumack = asoc->cumulative_tsn;
2408 	old_base = asoc->mapping_array_base_tsn;
2409 	old_highest = asoc->highest_tsn_inside_map;
2410 	/*
2411 	 * We could probably improve this a small bit by calculating the
2412 	 * offset of the current cum-ack as the starting point.
2413 	 */
2414 	at = 0;
2415 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2416 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2417 		if (val == 0xff) {
2418 			at += 8;
2419 		} else {
2420 			/* there is a 0 bit */
2421 			at += sctp_map_lookup_tab[val];
2422 			break;
2423 		}
2424 	}
2425 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2426 
2427 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2428 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2429 #ifdef INVARIANTS
2430 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2431 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2432 #else
2433 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2434 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2435 		sctp_print_mapping_array(asoc);
2436 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2437 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2438 		}
2439 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2440 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2441 #endif
2442 	}
2443 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2444 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2445 	} else {
2446 		highest_tsn = asoc->highest_tsn_inside_map;
2447 	}
2448 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2449 		/* The complete array was completed by a single FR */
2450 		/* highest becomes the cum-ack */
2451 		int clr;
2452 #ifdef INVARIANTS
2453 		unsigned int i;
2454 #endif
2455 
2456 		/* clear the array */
2457 		clr = ((at + 7) >> 3);
2458 		if (clr > asoc->mapping_array_size) {
2459 			clr = asoc->mapping_array_size;
2460 		}
2461 		memset(asoc->mapping_array, 0, clr);
2462 		memset(asoc->nr_mapping_array, 0, clr);
2463 #ifdef INVARIANTS
2464 		for (i = 0; i < asoc->mapping_array_size; i++) {
2465 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2466 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2467 				sctp_print_mapping_array(asoc);
2468 			}
2469 		}
2470 #endif
2471 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2472 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2473 	} else if (at >= 8) {
2474 		/* we can slide the mapping array down */
2475 		/* slide_from holds where we hit the first NON 0xff byte */
2476 
2477 		/*
2478 		 * now calculate the ceiling of the move using our highest
2479 		 * TSN value
2480 		 */
2481 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2482 		slide_end = (lgap >> 3);
2483 		if (slide_end < slide_from) {
2484 			sctp_print_mapping_array(asoc);
2485 #ifdef INVARIANTS
2486 			panic("impossible slide");
2487 #else
2488 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2489 			    lgap, slide_end, slide_from, at);
2490 			return;
2491 #endif
2492 		}
2493 		if (slide_end > asoc->mapping_array_size) {
2494 #ifdef INVARIANTS
2495 			panic("would overrun buffer");
2496 #else
2497 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2498 			    asoc->mapping_array_size, slide_end);
2499 			slide_end = asoc->mapping_array_size;
2500 #endif
2501 		}
2502 		distance = (slide_end - slide_from) + 1;
2503 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2504 			sctp_log_map(old_base, old_cumack, old_highest,
2505 			    SCTP_MAP_PREPARE_SLIDE);
2506 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2507 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2508 		}
2509 		if (distance + slide_from > asoc->mapping_array_size ||
2510 		    distance < 0) {
2511 			/*
2512 			 * Here we do NOT slide forward the array so that
2513 			 * hopefully when more data comes in to fill it up
2514 			 * we will be able to slide it forward. Really I
2515 			 * don't think this should happen :-0
2516 			 */
2517 
2518 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2519 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2520 				    (uint32_t)asoc->mapping_array_size,
2521 				    SCTP_MAP_SLIDE_NONE);
2522 			}
2523 		} else {
2524 			int ii;
2525 
2526 			for (ii = 0; ii < distance; ii++) {
2527 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2528 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2529 
2530 			}
2531 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2532 				asoc->mapping_array[ii] = 0;
2533 				asoc->nr_mapping_array[ii] = 0;
2534 			}
2535 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2536 				asoc->highest_tsn_inside_map += (slide_from << 3);
2537 			}
2538 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2539 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2540 			}
2541 			asoc->mapping_array_base_tsn += (slide_from << 3);
2542 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2543 				sctp_log_map(asoc->mapping_array_base_tsn,
2544 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2545 				    SCTP_MAP_SLIDE_RESULT);
2546 			}
2547 		}
2548 	}
2549 }
2550 
2551 void
2552 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2553 {
2554 	struct sctp_association *asoc;
2555 	uint32_t highest_tsn;
2556 	int is_a_gap;
2557 
2558 	sctp_slide_mapping_arrays(stcb);
2559 	asoc = &stcb->asoc;
2560 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2561 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2562 	} else {
2563 		highest_tsn = asoc->highest_tsn_inside_map;
2564 	}
2565 	/* Is there a gap now? */
2566 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2567 
2568 	/*
2569 	 * Now we need to see if we need to queue a sack or just start the
2570 	 * timer (if allowed).
2571 	 */
2572 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2573 		/*
2574 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2575 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2576 		 * SACK
2577 		 */
2578 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2579 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2580 			    stcb->sctp_ep, stcb, NULL,
2581 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2582 		}
2583 		sctp_send_shutdown(stcb,
2584 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2585 		if (is_a_gap) {
2586 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2587 		}
2588 	} else {
2589 		/*
2590 		 * CMT DAC algorithm: increase number of packets received
2591 		 * since last ack
2592 		 */
2593 		stcb->asoc.cmt_dac_pkts_rcvd++;
2594 
2595 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2596 							 * SACK */
2597 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2598 							 * longer is one */
2599 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2600 		    (is_a_gap) ||	/* is still a gap */
2601 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2602 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2603 		    ) {
2604 
2605 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2606 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2607 			    (stcb->asoc.send_sack == 0) &&
2608 			    (stcb->asoc.numduptsns == 0) &&
2609 			    (stcb->asoc.delayed_ack) &&
2610 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2611 
2612 				/*
2613 				 * CMT DAC algorithm: With CMT, delay acks
2614 				 * even in the face of
2615 				 *
2616 				 * reordering. Therefore, if acks that do
2617 				 * not have to be sent because of the above
2618 				 * reasons, will be delayed. That is, acks
2619 				 * that would have been sent due to gap
2620 				 * reports will be delayed with DAC. Start
2621 				 * the delayed ack timer.
2622 				 */
2623 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2624 				    stcb->sctp_ep, stcb, NULL);
2625 			} else {
2626 				/*
2627 				 * Ok we must build a SACK since the timer
2628 				 * is pending, we got our first packet OR
2629 				 * there are gaps or duplicates.
2630 				 */
2631 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2632 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2633 			}
2634 		} else {
2635 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2636 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2637 				    stcb->sctp_ep, stcb, NULL);
2638 			}
2639 		}
2640 	}
2641 }
2642 
2643 int
2644 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2645     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2646     struct sctp_nets *net, uint32_t *high_tsn)
2647 {
2648 	struct sctp_chunkhdr *ch, chunk_buf;
2649 	struct sctp_association *asoc;
2650 	int num_chunks = 0;	/* number of control chunks processed */
2651 	int stop_proc = 0;
2652 	int break_flag, last_chunk;
2653 	int abort_flag = 0, was_a_gap;
2654 	struct mbuf *m;
2655 	uint32_t highest_tsn;
2656 	uint16_t chk_length;
2657 
2658 	/* set the rwnd */
2659 	sctp_set_rwnd(stcb, &stcb->asoc);
2660 
2661 	m = *mm;
2662 	SCTP_TCB_LOCK_ASSERT(stcb);
2663 	asoc = &stcb->asoc;
2664 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2665 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2666 	} else {
2667 		highest_tsn = asoc->highest_tsn_inside_map;
2668 	}
2669 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2670 	/*
2671 	 * setup where we got the last DATA packet from for any SACK that
2672 	 * may need to go out. Don't bump the net. This is done ONLY when a
2673 	 * chunk is assigned.
2674 	 */
2675 	asoc->last_data_chunk_from = net;
2676 
2677 	/*-
2678 	 * Now before we proceed we must figure out if this is a wasted
2679 	 * cluster... i.e. it is a small packet sent in and yet the driver
2680 	 * underneath allocated a full cluster for it. If so we must copy it
2681 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2682 	 * with cluster starvation. Note for __Panda__ we don't do this
2683 	 * since it has clusters all the way down to 64 bytes.
2684 	 */
2685 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2686 		/* we only handle mbufs that are singletons.. not chains */
2687 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2688 		if (m) {
2689 			/* ok lets see if we can copy the data up */
2690 			caddr_t *from, *to;
2691 
2692 			/* get the pointers and copy */
2693 			to = mtod(m, caddr_t *);
2694 			from = mtod((*mm), caddr_t *);
2695 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2696 			/* copy the length and free up the old */
2697 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2698 			sctp_m_freem(*mm);
2699 			/* success, back copy */
2700 			*mm = m;
2701 		} else {
2702 			/* We are in trouble in the mbuf world .. yikes */
2703 			m = *mm;
2704 		}
2705 	}
2706 	/* get pointer to the first chunk header */
2707 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2708 	    sizeof(struct sctp_chunkhdr),
2709 	    (uint8_t *)&chunk_buf);
2710 	if (ch == NULL) {
2711 		return (1);
2712 	}
2713 	/*
2714 	 * process all DATA chunks...
2715 	 */
2716 	*high_tsn = asoc->cumulative_tsn;
2717 	break_flag = 0;
2718 	asoc->data_pkts_seen++;
2719 	while (stop_proc == 0) {
2720 		/* validate chunk length */
2721 		chk_length = ntohs(ch->chunk_length);
2722 		if (length - *offset < chk_length) {
2723 			/* all done, mutulated chunk */
2724 			stop_proc = 1;
2725 			continue;
2726 		}
2727 		if ((asoc->idata_supported == 1) &&
2728 		    (ch->chunk_type == SCTP_DATA)) {
2729 			struct mbuf *op_err;
2730 			char msg[SCTP_DIAG_INFO_LEN];
2731 
2732 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2733 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2734 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2735 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2736 			return (2);
2737 		}
2738 		if ((asoc->idata_supported == 0) &&
2739 		    (ch->chunk_type == SCTP_IDATA)) {
2740 			struct mbuf *op_err;
2741 			char msg[SCTP_DIAG_INFO_LEN];
2742 
2743 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2744 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2745 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2746 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2747 			return (2);
2748 		}
2749 		if ((ch->chunk_type == SCTP_DATA) ||
2750 		    (ch->chunk_type == SCTP_IDATA)) {
2751 			uint16_t clen;
2752 
2753 			if (ch->chunk_type == SCTP_DATA) {
2754 				clen = sizeof(struct sctp_data_chunk);
2755 			} else {
2756 				clen = sizeof(struct sctp_idata_chunk);
2757 			}
2758 			if (chk_length < clen) {
2759 				/*
2760 				 * Need to send an abort since we had a
2761 				 * invalid data chunk.
2762 				 */
2763 				struct mbuf *op_err;
2764 				char msg[SCTP_DIAG_INFO_LEN];
2765 
2766 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2767 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2768 				    chk_length);
2769 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2770 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2771 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2772 				return (2);
2773 			}
2774 #ifdef SCTP_AUDITING_ENABLED
2775 			sctp_audit_log(0xB1, 0);
2776 #endif
2777 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2778 				last_chunk = 1;
2779 			} else {
2780 				last_chunk = 0;
2781 			}
2782 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2783 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2784 			    last_chunk, ch->chunk_type)) {
2785 				num_chunks++;
2786 			}
2787 			if (abort_flag)
2788 				return (2);
2789 
2790 			if (break_flag) {
2791 				/*
2792 				 * Set because of out of rwnd space and no
2793 				 * drop rep space left.
2794 				 */
2795 				stop_proc = 1;
2796 				continue;
2797 			}
2798 		} else {
2799 			/* not a data chunk in the data region */
2800 			switch (ch->chunk_type) {
2801 			case SCTP_INITIATION:
2802 			case SCTP_INITIATION_ACK:
2803 			case SCTP_SELECTIVE_ACK:
2804 			case SCTP_NR_SELECTIVE_ACK:
2805 			case SCTP_HEARTBEAT_REQUEST:
2806 			case SCTP_HEARTBEAT_ACK:
2807 			case SCTP_ABORT_ASSOCIATION:
2808 			case SCTP_SHUTDOWN:
2809 			case SCTP_SHUTDOWN_ACK:
2810 			case SCTP_OPERATION_ERROR:
2811 			case SCTP_COOKIE_ECHO:
2812 			case SCTP_COOKIE_ACK:
2813 			case SCTP_ECN_ECHO:
2814 			case SCTP_ECN_CWR:
2815 			case SCTP_SHUTDOWN_COMPLETE:
2816 			case SCTP_AUTHENTICATION:
2817 			case SCTP_ASCONF_ACK:
2818 			case SCTP_PACKET_DROPPED:
2819 			case SCTP_STREAM_RESET:
2820 			case SCTP_FORWARD_CUM_TSN:
2821 			case SCTP_ASCONF:
2822 				{
2823 					/*
2824 					 * Now, what do we do with KNOWN
2825 					 * chunks that are NOT in the right
2826 					 * place?
2827 					 *
2828 					 * For now, I do nothing but ignore
2829 					 * them. We may later want to add
2830 					 * sysctl stuff to switch out and do
2831 					 * either an ABORT() or possibly
2832 					 * process them.
2833 					 */
2834 					struct mbuf *op_err;
2835 					char msg[SCTP_DIAG_INFO_LEN];
2836 
2837 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2838 					    ch->chunk_type);
2839 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2840 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2841 					return (2);
2842 				}
2843 			default:
2844 				/*
2845 				 * Unknown chunk type: use bit rules after
2846 				 * checking length
2847 				 */
2848 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2849 					/*
2850 					 * Need to send an abort since we
2851 					 * had a invalid chunk.
2852 					 */
2853 					struct mbuf *op_err;
2854 					char msg[SCTP_DIAG_INFO_LEN];
2855 
2856 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2857 					    chk_length);
2858 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2859 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2860 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2861 					return (2);
2862 				}
2863 				if (ch->chunk_type & 0x40) {
2864 					/* Add a error report to the queue */
2865 					struct mbuf *op_err;
2866 					struct sctp_gen_error_cause *cause;
2867 
2868 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2869 					    0, M_NOWAIT, 1, MT_DATA);
2870 					if (op_err != NULL) {
2871 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2872 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2873 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2874 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2875 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2876 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2877 							sctp_queue_op_err(stcb, op_err);
2878 						} else {
2879 							sctp_m_freem(op_err);
2880 						}
2881 					}
2882 				}
2883 				if ((ch->chunk_type & 0x80) == 0) {
2884 					/* discard the rest of this packet */
2885 					stop_proc = 1;
2886 				}	/* else skip this bad chunk and
2887 					 * continue... */
2888 				break;
2889 			}	/* switch of chunk type */
2890 		}
2891 		*offset += SCTP_SIZE32(chk_length);
2892 		if ((*offset >= length) || stop_proc) {
2893 			/* no more data left in the mbuf chain */
2894 			stop_proc = 1;
2895 			continue;
2896 		}
2897 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2898 		    sizeof(struct sctp_chunkhdr),
2899 		    (uint8_t *)&chunk_buf);
2900 		if (ch == NULL) {
2901 			*offset = length;
2902 			stop_proc = 1;
2903 			continue;
2904 		}
2905 	}
2906 	if (break_flag) {
2907 		/*
2908 		 * we need to report rwnd overrun drops.
2909 		 */
2910 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2911 	}
2912 	if (num_chunks) {
2913 		/*
2914 		 * Did we get data, if so update the time for auto-close and
2915 		 * give peer credit for being alive.
2916 		 */
2917 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2918 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2919 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2920 			    stcb->asoc.overall_error_count,
2921 			    0,
2922 			    SCTP_FROM_SCTP_INDATA,
2923 			    __LINE__);
2924 		}
2925 		stcb->asoc.overall_error_count = 0;
2926 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2927 	}
2928 	/* now service all of the reassm queue if needed */
2929 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2930 		/* Assure that we ack right away */
2931 		stcb->asoc.send_sack = 1;
2932 	}
2933 	/* Start a sack timer or QUEUE a SACK for sending */
2934 	sctp_sack_check(stcb, was_a_gap);
2935 	return (0);
2936 }
2937 
2938 static int
2939 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2940     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2941     int *num_frs,
2942     uint32_t *biggest_newly_acked_tsn,
2943     uint32_t *this_sack_lowest_newack,
2944     int *rto_ok)
2945 {
2946 	struct sctp_tmit_chunk *tp1;
2947 	unsigned int theTSN;
2948 	int j, wake_him = 0, circled = 0;
2949 
2950 	/* Recover the tp1 we last saw */
2951 	tp1 = *p_tp1;
2952 	if (tp1 == NULL) {
2953 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2954 	}
2955 	for (j = frag_strt; j <= frag_end; j++) {
2956 		theTSN = j + last_tsn;
2957 		while (tp1) {
2958 			if (tp1->rec.data.doing_fast_retransmit)
2959 				(*num_frs) += 1;
2960 
2961 			/*-
2962 			 * CMT: CUCv2 algorithm. For each TSN being
2963 			 * processed from the sent queue, track the
2964 			 * next expected pseudo-cumack, or
2965 			 * rtx_pseudo_cumack, if required. Separate
2966 			 * cumack trackers for first transmissions,
2967 			 * and retransmissions.
2968 			 */
2969 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2970 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2971 			    (tp1->snd_count == 1)) {
2972 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2973 				tp1->whoTo->find_pseudo_cumack = 0;
2974 			}
2975 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2976 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2977 			    (tp1->snd_count > 1)) {
2978 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2979 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2980 			}
2981 			if (tp1->rec.data.tsn == theTSN) {
2982 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2983 					/*-
2984 					 * must be held until
2985 					 * cum-ack passes
2986 					 */
2987 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2988 						/*-
2989 						 * If it is less than RESEND, it is
2990 						 * now no-longer in flight.
2991 						 * Higher values may already be set
2992 						 * via previous Gap Ack Blocks...
2993 						 * i.e. ACKED or RESEND.
2994 						 */
2995 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2996 						    *biggest_newly_acked_tsn)) {
2997 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2998 						}
2999 						/*-
3000 						 * CMT: SFR algo (and HTNA) - set
3001 						 * saw_newack to 1 for dest being
3002 						 * newly acked. update
3003 						 * this_sack_highest_newack if
3004 						 * appropriate.
3005 						 */
3006 						if (tp1->rec.data.chunk_was_revoked == 0)
3007 							tp1->whoTo->saw_newack = 1;
3008 
3009 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3010 						    tp1->whoTo->this_sack_highest_newack)) {
3011 							tp1->whoTo->this_sack_highest_newack =
3012 							    tp1->rec.data.tsn;
3013 						}
3014 						/*-
3015 						 * CMT DAC algo: also update
3016 						 * this_sack_lowest_newack
3017 						 */
3018 						if (*this_sack_lowest_newack == 0) {
3019 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3020 								sctp_log_sack(*this_sack_lowest_newack,
3021 								    last_tsn,
3022 								    tp1->rec.data.tsn,
3023 								    0,
3024 								    0,
3025 								    SCTP_LOG_TSN_ACKED);
3026 							}
3027 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3028 						}
3029 						/*-
3030 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3031 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3032 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3033 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3034 						 * Separate pseudo_cumack trackers for first transmissions and
3035 						 * retransmissions.
3036 						 */
3037 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3038 							if (tp1->rec.data.chunk_was_revoked == 0) {
3039 								tp1->whoTo->new_pseudo_cumack = 1;
3040 							}
3041 							tp1->whoTo->find_pseudo_cumack = 1;
3042 						}
3043 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3044 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3045 						}
3046 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3047 							if (tp1->rec.data.chunk_was_revoked == 0) {
3048 								tp1->whoTo->new_pseudo_cumack = 1;
3049 							}
3050 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3051 						}
3052 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3053 							sctp_log_sack(*biggest_newly_acked_tsn,
3054 							    last_tsn,
3055 							    tp1->rec.data.tsn,
3056 							    frag_strt,
3057 							    frag_end,
3058 							    SCTP_LOG_TSN_ACKED);
3059 						}
3060 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3061 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3062 							    tp1->whoTo->flight_size,
3063 							    tp1->book_size,
3064 							    (uint32_t)(uintptr_t)tp1->whoTo,
3065 							    tp1->rec.data.tsn);
3066 						}
3067 						sctp_flight_size_decrease(tp1);
3068 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3069 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3070 							    tp1);
3071 						}
3072 						sctp_total_flight_decrease(stcb, tp1);
3073 
3074 						tp1->whoTo->net_ack += tp1->send_size;
3075 						if (tp1->snd_count < 2) {
3076 							/*-
3077 							 * True non-retransmited chunk
3078 							 */
3079 							tp1->whoTo->net_ack2 += tp1->send_size;
3080 
3081 							/*-
3082 							 * update RTO too ?
3083 							 */
3084 							if (tp1->do_rtt) {
3085 								if (*rto_ok) {
3086 									tp1->whoTo->RTO =
3087 									    sctp_calculate_rto(stcb,
3088 									    &stcb->asoc,
3089 									    tp1->whoTo,
3090 									    &tp1->sent_rcv_time,
3091 									    SCTP_RTT_FROM_DATA);
3092 									*rto_ok = 0;
3093 								}
3094 								if (tp1->whoTo->rto_needed == 0) {
3095 									tp1->whoTo->rto_needed = 1;
3096 								}
3097 								tp1->do_rtt = 0;
3098 							}
3099 						}
3100 					}
3101 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3102 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3103 						    stcb->asoc.this_sack_highest_gap)) {
3104 							stcb->asoc.this_sack_highest_gap =
3105 							    tp1->rec.data.tsn;
3106 						}
3107 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3108 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3109 #ifdef SCTP_AUDITING_ENABLED
3110 							sctp_audit_log(0xB2,
3111 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3112 #endif
3113 						}
3114 					}
3115 					/*-
3116 					 * All chunks NOT UNSENT fall through here and are marked
3117 					 * (leave PR-SCTP ones that are to skip alone though)
3118 					 */
3119 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3120 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3121 						tp1->sent = SCTP_DATAGRAM_MARKED;
3122 					}
3123 					if (tp1->rec.data.chunk_was_revoked) {
3124 						/* deflate the cwnd */
3125 						tp1->whoTo->cwnd -= tp1->book_size;
3126 						tp1->rec.data.chunk_was_revoked = 0;
3127 					}
3128 					/* NR Sack code here */
3129 					if (nr_sacking &&
3130 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3131 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3132 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3133 #ifdef INVARIANTS
3134 						} else {
3135 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3136 #endif
3137 						}
3138 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3139 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3140 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3141 							stcb->asoc.trigger_reset = 1;
3142 						}
3143 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3144 						if (tp1->data) {
3145 							/*
3146 							 * sa_ignore
3147 							 * NO_NULL_CHK
3148 							 */
3149 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3150 							sctp_m_freem(tp1->data);
3151 							tp1->data = NULL;
3152 						}
3153 						wake_him++;
3154 					}
3155 				}
3156 				break;
3157 			}	/* if (tp1->tsn == theTSN) */
3158 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3159 				break;
3160 			}
3161 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3162 			if ((tp1 == NULL) && (circled == 0)) {
3163 				circled++;
3164 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3165 			}
3166 		}		/* end while (tp1) */
3167 		if (tp1 == NULL) {
3168 			circled = 0;
3169 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3170 		}
3171 		/* In case the fragments were not in order we must reset */
3172 	}			/* end for (j = fragStart */
3173 	*p_tp1 = tp1;
3174 	return (wake_him);	/* Return value only used for nr-sack */
3175 }
3176 
3177 
3178 static int
3179 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3180     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3181     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3182     int num_seg, int num_nr_seg, int *rto_ok)
3183 {
3184 	struct sctp_gap_ack_block *frag, block;
3185 	struct sctp_tmit_chunk *tp1;
3186 	int i;
3187 	int num_frs = 0;
3188 	int chunk_freed;
3189 	int non_revocable;
3190 	uint16_t frag_strt, frag_end, prev_frag_end;
3191 
3192 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3193 	prev_frag_end = 0;
3194 	chunk_freed = 0;
3195 
3196 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3197 		if (i == num_seg) {
3198 			prev_frag_end = 0;
3199 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3200 		}
3201 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3202 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3203 		*offset += sizeof(block);
3204 		if (frag == NULL) {
3205 			return (chunk_freed);
3206 		}
3207 		frag_strt = ntohs(frag->start);
3208 		frag_end = ntohs(frag->end);
3209 
3210 		if (frag_strt > frag_end) {
3211 			/* This gap report is malformed, skip it. */
3212 			continue;
3213 		}
3214 		if (frag_strt <= prev_frag_end) {
3215 			/* This gap report is not in order, so restart. */
3216 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3217 		}
3218 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3219 			*biggest_tsn_acked = last_tsn + frag_end;
3220 		}
3221 		if (i < num_seg) {
3222 			non_revocable = 0;
3223 		} else {
3224 			non_revocable = 1;
3225 		}
3226 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3227 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3228 		    this_sack_lowest_newack, rto_ok)) {
3229 			chunk_freed = 1;
3230 		}
3231 		prev_frag_end = frag_end;
3232 	}
3233 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3234 		if (num_frs)
3235 			sctp_log_fr(*biggest_tsn_acked,
3236 			    *biggest_newly_acked_tsn,
3237 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3238 	}
3239 	return (chunk_freed);
3240 }
3241 
3242 static void
3243 sctp_check_for_revoked(struct sctp_tcb *stcb,
3244     struct sctp_association *asoc, uint32_t cumack,
3245     uint32_t biggest_tsn_acked)
3246 {
3247 	struct sctp_tmit_chunk *tp1;
3248 
3249 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3250 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3251 			/*
3252 			 * ok this guy is either ACK or MARKED. If it is
3253 			 * ACKED it has been previously acked but not this
3254 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3255 			 * again.
3256 			 */
3257 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3258 				break;
3259 			}
3260 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3261 				/* it has been revoked */
3262 				tp1->sent = SCTP_DATAGRAM_SENT;
3263 				tp1->rec.data.chunk_was_revoked = 1;
3264 				/*
3265 				 * We must add this stuff back in to assure
3266 				 * timers and such get started.
3267 				 */
3268 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3269 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3270 					    tp1->whoTo->flight_size,
3271 					    tp1->book_size,
3272 					    (uint32_t)(uintptr_t)tp1->whoTo,
3273 					    tp1->rec.data.tsn);
3274 				}
3275 				sctp_flight_size_increase(tp1);
3276 				sctp_total_flight_increase(stcb, tp1);
3277 				/*
3278 				 * We inflate the cwnd to compensate for our
3279 				 * artificial inflation of the flight_size.
3280 				 */
3281 				tp1->whoTo->cwnd += tp1->book_size;
3282 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3283 					sctp_log_sack(asoc->last_acked_seq,
3284 					    cumack,
3285 					    tp1->rec.data.tsn,
3286 					    0,
3287 					    0,
3288 					    SCTP_LOG_TSN_REVOKED);
3289 				}
3290 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3291 				/* it has been re-acked in this SACK */
3292 				tp1->sent = SCTP_DATAGRAM_ACKED;
3293 			}
3294 		}
3295 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3296 			break;
3297 	}
3298 }
3299 
3300 
3301 static void
3302 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3303     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3304 {
3305 	struct sctp_tmit_chunk *tp1;
3306 	int strike_flag = 0;
3307 	struct timeval now;
3308 	int tot_retrans = 0;
3309 	uint32_t sending_seq;
3310 	struct sctp_nets *net;
3311 	int num_dests_sacked = 0;
3312 
3313 	/*
3314 	 * select the sending_seq, this is either the next thing ready to be
3315 	 * sent but not transmitted, OR, the next seq we assign.
3316 	 */
3317 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3318 	if (tp1 == NULL) {
3319 		sending_seq = asoc->sending_seq;
3320 	} else {
3321 		sending_seq = tp1->rec.data.tsn;
3322 	}
3323 
3324 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3325 	if ((asoc->sctp_cmt_on_off > 0) &&
3326 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3327 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3328 			if (net->saw_newack)
3329 				num_dests_sacked++;
3330 		}
3331 	}
3332 	if (stcb->asoc.prsctp_supported) {
3333 		(void)SCTP_GETTIME_TIMEVAL(&now);
3334 	}
3335 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3336 		strike_flag = 0;
3337 		if (tp1->no_fr_allowed) {
3338 			/* this one had a timeout or something */
3339 			continue;
3340 		}
3341 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3342 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3343 				sctp_log_fr(biggest_tsn_newly_acked,
3344 				    tp1->rec.data.tsn,
3345 				    tp1->sent,
3346 				    SCTP_FR_LOG_CHECK_STRIKE);
3347 		}
3348 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3349 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3350 			/* done */
3351 			break;
3352 		}
3353 		if (stcb->asoc.prsctp_supported) {
3354 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3355 				/* Is it expired? */
3356 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3357 					/* Yes so drop it */
3358 					if (tp1->data != NULL) {
3359 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3360 						    SCTP_SO_NOT_LOCKED);
3361 					}
3362 					continue;
3363 				}
3364 			}
3365 		}
3366 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3367 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3368 			/* we are beyond the tsn in the sack  */
3369 			break;
3370 		}
3371 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3372 			/* either a RESEND, ACKED, or MARKED */
3373 			/* skip */
3374 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3375 				/* Continue strikin FWD-TSN chunks */
3376 				tp1->rec.data.fwd_tsn_cnt++;
3377 			}
3378 			continue;
3379 		}
3380 		/*
3381 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3382 		 */
3383 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3384 			/*
3385 			 * No new acks were receieved for data sent to this
3386 			 * dest. Therefore, according to the SFR algo for
3387 			 * CMT, no data sent to this dest can be marked for
3388 			 * FR using this SACK.
3389 			 */
3390 			continue;
3391 		} else if (tp1->whoTo &&
3392 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3393 			    tp1->whoTo->this_sack_highest_newack) &&
3394 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3395 			/*
3396 			 * CMT: New acks were receieved for data sent to
3397 			 * this dest. But no new acks were seen for data
3398 			 * sent after tp1. Therefore, according to the SFR
3399 			 * algo for CMT, tp1 cannot be marked for FR using
3400 			 * this SACK. This step covers part of the DAC algo
3401 			 * and the HTNA algo as well.
3402 			 */
3403 			continue;
3404 		}
3405 		/*
3406 		 * Here we check to see if we were have already done a FR
3407 		 * and if so we see if the biggest TSN we saw in the sack is
3408 		 * smaller than the recovery point. If so we don't strike
3409 		 * the tsn... otherwise we CAN strike the TSN.
3410 		 */
3411 		/*
3412 		 * @@@ JRI: Check for CMT if (accum_moved &&
3413 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3414 		 * 0)) {
3415 		 */
3416 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3417 			/*
3418 			 * Strike the TSN if in fast-recovery and cum-ack
3419 			 * moved.
3420 			 */
3421 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3422 				sctp_log_fr(biggest_tsn_newly_acked,
3423 				    tp1->rec.data.tsn,
3424 				    tp1->sent,
3425 				    SCTP_FR_LOG_STRIKE_CHUNK);
3426 			}
3427 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3428 				tp1->sent++;
3429 			}
3430 			if ((asoc->sctp_cmt_on_off > 0) &&
3431 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3432 				/*
3433 				 * CMT DAC algorithm: If SACK flag is set to
3434 				 * 0, then lowest_newack test will not pass
3435 				 * because it would have been set to the
3436 				 * cumack earlier. If not already to be
3437 				 * rtx'd, If not a mixed sack and if tp1 is
3438 				 * not between two sacked TSNs, then mark by
3439 				 * one more. NOTE that we are marking by one
3440 				 * additional time since the SACK DAC flag
3441 				 * indicates that two packets have been
3442 				 * received after this missing TSN.
3443 				 */
3444 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3445 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3446 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 						sctp_log_fr(16 + num_dests_sacked,
3448 						    tp1->rec.data.tsn,
3449 						    tp1->sent,
3450 						    SCTP_FR_LOG_STRIKE_CHUNK);
3451 					}
3452 					tp1->sent++;
3453 				}
3454 			}
3455 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3456 		    (asoc->sctp_cmt_on_off == 0)) {
3457 			/*
3458 			 * For those that have done a FR we must take
3459 			 * special consideration if we strike. I.e the
3460 			 * biggest_newly_acked must be higher than the
3461 			 * sending_seq at the time we did the FR.
3462 			 */
3463 			if (
3464 #ifdef SCTP_FR_TO_ALTERNATE
3465 			/*
3466 			 * If FR's go to new networks, then we must only do
3467 			 * this for singly homed asoc's. However if the FR's
3468 			 * go to the same network (Armando's work) then its
3469 			 * ok to FR multiple times.
3470 			 */
3471 			    (asoc->numnets < 2)
3472 #else
3473 			    (1)
3474 #endif
3475 			    ) {
3476 
3477 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3478 				    tp1->rec.data.fast_retran_tsn)) {
3479 					/*
3480 					 * Strike the TSN, since this ack is
3481 					 * beyond where things were when we
3482 					 * did a FR.
3483 					 */
3484 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3485 						sctp_log_fr(biggest_tsn_newly_acked,
3486 						    tp1->rec.data.tsn,
3487 						    tp1->sent,
3488 						    SCTP_FR_LOG_STRIKE_CHUNK);
3489 					}
3490 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3491 						tp1->sent++;
3492 					}
3493 					strike_flag = 1;
3494 					if ((asoc->sctp_cmt_on_off > 0) &&
3495 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3496 						/*
3497 						 * CMT DAC algorithm: If
3498 						 * SACK flag is set to 0,
3499 						 * then lowest_newack test
3500 						 * will not pass because it
3501 						 * would have been set to
3502 						 * the cumack earlier. If
3503 						 * not already to be rtx'd,
3504 						 * If not a mixed sack and
3505 						 * if tp1 is not between two
3506 						 * sacked TSNs, then mark by
3507 						 * one more. NOTE that we
3508 						 * are marking by one
3509 						 * additional time since the
3510 						 * SACK DAC flag indicates
3511 						 * that two packets have
3512 						 * been received after this
3513 						 * missing TSN.
3514 						 */
3515 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3516 						    (num_dests_sacked == 1) &&
3517 						    SCTP_TSN_GT(this_sack_lowest_newack,
3518 						    tp1->rec.data.tsn)) {
3519 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3520 								sctp_log_fr(32 + num_dests_sacked,
3521 								    tp1->rec.data.tsn,
3522 								    tp1->sent,
3523 								    SCTP_FR_LOG_STRIKE_CHUNK);
3524 							}
3525 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3526 								tp1->sent++;
3527 							}
3528 						}
3529 					}
3530 				}
3531 			}
3532 			/*
3533 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3534 			 * algo covers HTNA.
3535 			 */
3536 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3537 		    biggest_tsn_newly_acked)) {
3538 			/*
3539 			 * We don't strike these: This is the  HTNA
3540 			 * algorithm i.e. we don't strike If our TSN is
3541 			 * larger than the Highest TSN Newly Acked.
3542 			 */
3543 			;
3544 		} else {
3545 			/* Strike the TSN */
3546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3547 				sctp_log_fr(biggest_tsn_newly_acked,
3548 				    tp1->rec.data.tsn,
3549 				    tp1->sent,
3550 				    SCTP_FR_LOG_STRIKE_CHUNK);
3551 			}
3552 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3553 				tp1->sent++;
3554 			}
3555 			if ((asoc->sctp_cmt_on_off > 0) &&
3556 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3557 				/*
3558 				 * CMT DAC algorithm: If SACK flag is set to
3559 				 * 0, then lowest_newack test will not pass
3560 				 * because it would have been set to the
3561 				 * cumack earlier. If not already to be
3562 				 * rtx'd, If not a mixed sack and if tp1 is
3563 				 * not between two sacked TSNs, then mark by
3564 				 * one more. NOTE that we are marking by one
3565 				 * additional time since the SACK DAC flag
3566 				 * indicates that two packets have been
3567 				 * received after this missing TSN.
3568 				 */
3569 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3570 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3571 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3572 						sctp_log_fr(48 + num_dests_sacked,
3573 						    tp1->rec.data.tsn,
3574 						    tp1->sent,
3575 						    SCTP_FR_LOG_STRIKE_CHUNK);
3576 					}
3577 					tp1->sent++;
3578 				}
3579 			}
3580 		}
3581 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3582 			struct sctp_nets *alt;
3583 
3584 			/* fix counts and things */
3585 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3586 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3587 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3588 				    tp1->book_size,
3589 				    (uint32_t)(uintptr_t)tp1->whoTo,
3590 				    tp1->rec.data.tsn);
3591 			}
3592 			if (tp1->whoTo) {
3593 				tp1->whoTo->net_ack++;
3594 				sctp_flight_size_decrease(tp1);
3595 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3596 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3597 					    tp1);
3598 				}
3599 			}
3600 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3601 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3602 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3603 			}
3604 			/* add back to the rwnd */
3605 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3606 
3607 			/* remove from the total flight */
3608 			sctp_total_flight_decrease(stcb, tp1);
3609 
3610 			if ((stcb->asoc.prsctp_supported) &&
3611 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3612 				/*
3613 				 * Has it been retransmitted tv_sec times? -
3614 				 * we store the retran count there.
3615 				 */
3616 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3617 					/* Yes, so drop it */
3618 					if (tp1->data != NULL) {
3619 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3620 						    SCTP_SO_NOT_LOCKED);
3621 					}
3622 					/* Make sure to flag we had a FR */
3623 					if (tp1->whoTo != NULL) {
3624 						tp1->whoTo->net_ack++;
3625 					}
3626 					continue;
3627 				}
3628 			}
3629 			/*
3630 			 * SCTP_PRINTF("OK, we are now ready to FR this
3631 			 * guy\n");
3632 			 */
3633 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3634 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3635 				    0, SCTP_FR_MARKED);
3636 			}
3637 			if (strike_flag) {
3638 				/* This is a subsequent FR */
3639 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3640 			}
3641 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3642 			if (asoc->sctp_cmt_on_off > 0) {
3643 				/*
3644 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3645 				 * If CMT is being used, then pick dest with
3646 				 * largest ssthresh for any retransmission.
3647 				 */
3648 				tp1->no_fr_allowed = 1;
3649 				alt = tp1->whoTo;
3650 				/* sa_ignore NO_NULL_CHK */
3651 				if (asoc->sctp_cmt_pf > 0) {
3652 					/*
3653 					 * JRS 5/18/07 - If CMT PF is on,
3654 					 * use the PF version of
3655 					 * find_alt_net()
3656 					 */
3657 					alt = sctp_find_alternate_net(stcb, alt, 2);
3658 				} else {
3659 					/*
3660 					 * JRS 5/18/07 - If only CMT is on,
3661 					 * use the CMT version of
3662 					 * find_alt_net()
3663 					 */
3664 					/* sa_ignore NO_NULL_CHK */
3665 					alt = sctp_find_alternate_net(stcb, alt, 1);
3666 				}
3667 				if (alt == NULL) {
3668 					alt = tp1->whoTo;
3669 				}
3670 				/*
3671 				 * CUCv2: If a different dest is picked for
3672 				 * the retransmission, then new
3673 				 * (rtx-)pseudo_cumack needs to be tracked
3674 				 * for orig dest. Let CUCv2 track new (rtx-)
3675 				 * pseudo-cumack always.
3676 				 */
3677 				if (tp1->whoTo) {
3678 					tp1->whoTo->find_pseudo_cumack = 1;
3679 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3680 				}
3681 			} else {	/* CMT is OFF */
3682 
3683 #ifdef SCTP_FR_TO_ALTERNATE
3684 				/* Can we find an alternate? */
3685 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3686 #else
3687 				/*
3688 				 * default behavior is to NOT retransmit
3689 				 * FR's to an alternate. Armando Caro's
3690 				 * paper details why.
3691 				 */
3692 				alt = tp1->whoTo;
3693 #endif
3694 			}
3695 
3696 			tp1->rec.data.doing_fast_retransmit = 1;
3697 			tot_retrans++;
3698 			/* mark the sending seq for possible subsequent FR's */
3699 			/*
3700 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3701 			 * (uint32_t)tpi->rec.data.tsn);
3702 			 */
3703 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3704 				/*
3705 				 * If the queue of send is empty then its
3706 				 * the next sequence number that will be
3707 				 * assigned so we subtract one from this to
3708 				 * get the one we last sent.
3709 				 */
3710 				tp1->rec.data.fast_retran_tsn = sending_seq;
3711 			} else {
3712 				/*
3713 				 * If there are chunks on the send queue
3714 				 * (unsent data that has made it from the
3715 				 * stream queues but not out the door, we
3716 				 * take the first one (which will have the
3717 				 * lowest TSN) and subtract one to get the
3718 				 * one we last sent.
3719 				 */
3720 				struct sctp_tmit_chunk *ttt;
3721 
3722 				ttt = TAILQ_FIRST(&asoc->send_queue);
3723 				tp1->rec.data.fast_retran_tsn =
3724 				    ttt->rec.data.tsn;
3725 			}
3726 
3727 			if (tp1->do_rtt) {
3728 				/*
3729 				 * this guy had a RTO calculation pending on
3730 				 * it, cancel it
3731 				 */
3732 				if ((tp1->whoTo != NULL) &&
3733 				    (tp1->whoTo->rto_needed == 0)) {
3734 					tp1->whoTo->rto_needed = 1;
3735 				}
3736 				tp1->do_rtt = 0;
3737 			}
3738 			if (alt != tp1->whoTo) {
3739 				/* yes, there is an alternate. */
3740 				sctp_free_remote_addr(tp1->whoTo);
3741 				/* sa_ignore FREED_MEMORY */
3742 				tp1->whoTo = alt;
3743 				atomic_add_int(&alt->ref_count, 1);
3744 			}
3745 		}
3746 	}
3747 }
3748 
3749 struct sctp_tmit_chunk *
3750 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3751     struct sctp_association *asoc)
3752 {
3753 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3754 	struct timeval now;
3755 	int now_filled = 0;
3756 
3757 	if (asoc->prsctp_supported == 0) {
3758 		return (NULL);
3759 	}
3760 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3761 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3762 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3763 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3764 			/* no chance to advance, out of here */
3765 			break;
3766 		}
3767 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3768 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3769 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3770 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3771 				    asoc->advanced_peer_ack_point,
3772 				    tp1->rec.data.tsn, 0, 0);
3773 			}
3774 		}
3775 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3776 			/*
3777 			 * We can't fwd-tsn past any that are reliable aka
3778 			 * retransmitted until the asoc fails.
3779 			 */
3780 			break;
3781 		}
3782 		if (!now_filled) {
3783 			(void)SCTP_GETTIME_TIMEVAL(&now);
3784 			now_filled = 1;
3785 		}
3786 		/*
3787 		 * now we got a chunk which is marked for another
3788 		 * retransmission to a PR-stream but has run out its chances
3789 		 * already maybe OR has been marked to skip now. Can we skip
3790 		 * it if its a resend?
3791 		 */
3792 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3793 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3794 			/*
3795 			 * Now is this one marked for resend and its time is
3796 			 * now up?
3797 			 */
3798 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3799 				/* Yes so drop it */
3800 				if (tp1->data) {
3801 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3802 					    1, SCTP_SO_NOT_LOCKED);
3803 				}
3804 			} else {
3805 				/*
3806 				 * No, we are done when hit one for resend
3807 				 * whos time as not expired.
3808 				 */
3809 				break;
3810 			}
3811 		}
3812 		/*
3813 		 * Ok now if this chunk is marked to drop it we can clean up
3814 		 * the chunk, advance our peer ack point and we can check
3815 		 * the next chunk.
3816 		 */
3817 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3818 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3819 			/* advance PeerAckPoint goes forward */
3820 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3821 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3822 				a_adv = tp1;
3823 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3824 				/* No update but we do save the chk */
3825 				a_adv = tp1;
3826 			}
3827 		} else {
3828 			/*
3829 			 * If it is still in RESEND we can advance no
3830 			 * further
3831 			 */
3832 			break;
3833 		}
3834 	}
3835 	return (a_adv);
3836 }
3837 
3838 static int
3839 sctp_fs_audit(struct sctp_association *asoc)
3840 {
3841 	struct sctp_tmit_chunk *chk;
3842 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3843 	int ret;
3844 #ifndef INVARIANTS
3845 	int entry_flight, entry_cnt;
3846 #endif
3847 
3848 	ret = 0;
3849 #ifndef INVARIANTS
3850 	entry_flight = asoc->total_flight;
3851 	entry_cnt = asoc->total_flight_count;
3852 #endif
3853 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3854 		return (0);
3855 
3856 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3857 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3858 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3859 			    chk->rec.data.tsn,
3860 			    chk->send_size,
3861 			    chk->snd_count);
3862 			inflight++;
3863 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3864 			resend++;
3865 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3866 			inbetween++;
3867 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3868 			above++;
3869 		} else {
3870 			acked++;
3871 		}
3872 	}
3873 
3874 	if ((inflight > 0) || (inbetween > 0)) {
3875 #ifdef INVARIANTS
3876 		panic("Flight size-express incorrect? \n");
3877 #else
3878 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3879 		    entry_flight, entry_cnt);
3880 
3881 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3882 		    inflight, inbetween, resend, above, acked);
3883 		ret = 1;
3884 #endif
3885 	}
3886 	return (ret);
3887 }
3888 
3889 
3890 static void
3891 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3892     struct sctp_association *asoc,
3893     struct sctp_tmit_chunk *tp1)
3894 {
3895 	tp1->window_probe = 0;
3896 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3897 		/* TSN's skipped we do NOT move back. */
3898 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3899 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3900 		    tp1->book_size,
3901 		    (uint32_t)(uintptr_t)tp1->whoTo,
3902 		    tp1->rec.data.tsn);
3903 		return;
3904 	}
3905 	/* First setup this by shrinking flight */
3906 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3907 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3908 		    tp1);
3909 	}
3910 	sctp_flight_size_decrease(tp1);
3911 	sctp_total_flight_decrease(stcb, tp1);
3912 	/* Now mark for resend */
3913 	tp1->sent = SCTP_DATAGRAM_RESEND;
3914 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3915 
3916 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3917 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3918 		    tp1->whoTo->flight_size,
3919 		    tp1->book_size,
3920 		    (uint32_t)(uintptr_t)tp1->whoTo,
3921 		    tp1->rec.data.tsn);
3922 	}
3923 }
3924 
3925 void
3926 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3927     uint32_t rwnd, int *abort_now, int ecne_seen)
3928 {
3929 	struct sctp_nets *net;
3930 	struct sctp_association *asoc;
3931 	struct sctp_tmit_chunk *tp1, *tp2;
3932 	uint32_t old_rwnd;
3933 	int win_probe_recovery = 0;
3934 	int win_probe_recovered = 0;
3935 	int j, done_once = 0;
3936 	int rto_ok = 1;
3937 	uint32_t send_s;
3938 
3939 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3940 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3941 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3942 	}
3943 	SCTP_TCB_LOCK_ASSERT(stcb);
3944 #ifdef SCTP_ASOCLOG_OF_TSNS
3945 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3946 	stcb->asoc.cumack_log_at++;
3947 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3948 		stcb->asoc.cumack_log_at = 0;
3949 	}
3950 #endif
3951 	asoc = &stcb->asoc;
3952 	old_rwnd = asoc->peers_rwnd;
3953 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3954 		/* old ack */
3955 		return;
3956 	} else if (asoc->last_acked_seq == cumack) {
3957 		/* Window update sack */
3958 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3959 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3960 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3961 			/* SWS sender side engages */
3962 			asoc->peers_rwnd = 0;
3963 		}
3964 		if (asoc->peers_rwnd > old_rwnd) {
3965 			goto again;
3966 		}
3967 		return;
3968 	}
3969 	/* First setup for CC stuff */
3970 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3971 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3972 			/* Drag along the window_tsn for cwr's */
3973 			net->cwr_window_tsn = cumack;
3974 		}
3975 		net->prev_cwnd = net->cwnd;
3976 		net->net_ack = 0;
3977 		net->net_ack2 = 0;
3978 
3979 		/*
3980 		 * CMT: Reset CUC and Fast recovery algo variables before
3981 		 * SACK processing
3982 		 */
3983 		net->new_pseudo_cumack = 0;
3984 		net->will_exit_fast_recovery = 0;
3985 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3986 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3987 		}
3988 	}
3989 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3990 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3991 		    sctpchunk_listhead);
3992 		send_s = tp1->rec.data.tsn + 1;
3993 	} else {
3994 		send_s = asoc->sending_seq;
3995 	}
3996 	if (SCTP_TSN_GE(cumack, send_s)) {
3997 		struct mbuf *op_err;
3998 		char msg[SCTP_DIAG_INFO_LEN];
3999 
4000 		*abort_now = 1;
4001 		/* XXX */
4002 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4003 		    cumack, send_s);
4004 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4005 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4006 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4007 		return;
4008 	}
4009 	asoc->this_sack_highest_gap = cumack;
4010 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4011 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4012 		    stcb->asoc.overall_error_count,
4013 		    0,
4014 		    SCTP_FROM_SCTP_INDATA,
4015 		    __LINE__);
4016 	}
4017 	stcb->asoc.overall_error_count = 0;
4018 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4019 		/* process the new consecutive TSN first */
4020 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4021 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4022 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4023 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4024 				}
4025 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026 					/*
4027 					 * If it is less than ACKED, it is
4028 					 * now no-longer in flight. Higher
4029 					 * values may occur during marking
4030 					 */
4031 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4032 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4033 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4034 							    tp1->whoTo->flight_size,
4035 							    tp1->book_size,
4036 							    (uint32_t)(uintptr_t)tp1->whoTo,
4037 							    tp1->rec.data.tsn);
4038 						}
4039 						sctp_flight_size_decrease(tp1);
4040 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4041 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4042 							    tp1);
4043 						}
4044 						/* sa_ignore NO_NULL_CHK */
4045 						sctp_total_flight_decrease(stcb, tp1);
4046 					}
4047 					tp1->whoTo->net_ack += tp1->send_size;
4048 					if (tp1->snd_count < 2) {
4049 						/*
4050 						 * True non-retransmited
4051 						 * chunk
4052 						 */
4053 						tp1->whoTo->net_ack2 +=
4054 						    tp1->send_size;
4055 
4056 						/* update RTO too? */
4057 						if (tp1->do_rtt) {
4058 							if (rto_ok) {
4059 								tp1->whoTo->RTO =
4060 								/*
4061 								 * sa_ignore
4062 								 * NO_NULL_CHK
4063 								 */
4064 								    sctp_calculate_rto(stcb,
4065 								    asoc, tp1->whoTo,
4066 								    &tp1->sent_rcv_time,
4067 								    SCTP_RTT_FROM_DATA);
4068 								rto_ok = 0;
4069 							}
4070 							if (tp1->whoTo->rto_needed == 0) {
4071 								tp1->whoTo->rto_needed = 1;
4072 							}
4073 							tp1->do_rtt = 0;
4074 						}
4075 					}
4076 					/*
4077 					 * CMT: CUCv2 algorithm. From the
4078 					 * cumack'd TSNs, for each TSN being
4079 					 * acked for the first time, set the
4080 					 * following variables for the
4081 					 * corresp destination.
4082 					 * new_pseudo_cumack will trigger a
4083 					 * cwnd update.
4084 					 * find_(rtx_)pseudo_cumack will
4085 					 * trigger search for the next
4086 					 * expected (rtx-)pseudo-cumack.
4087 					 */
4088 					tp1->whoTo->new_pseudo_cumack = 1;
4089 					tp1->whoTo->find_pseudo_cumack = 1;
4090 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4091 
4092 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4093 						/* sa_ignore NO_NULL_CHK */
4094 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4095 					}
4096 				}
4097 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4098 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4099 				}
4100 				if (tp1->rec.data.chunk_was_revoked) {
4101 					/* deflate the cwnd */
4102 					tp1->whoTo->cwnd -= tp1->book_size;
4103 					tp1->rec.data.chunk_was_revoked = 0;
4104 				}
4105 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4106 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4107 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4108 #ifdef INVARIANTS
4109 					} else {
4110 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4111 #endif
4112 					}
4113 				}
4114 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4115 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4116 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4117 					asoc->trigger_reset = 1;
4118 				}
4119 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4120 				if (tp1->data) {
4121 					/* sa_ignore NO_NULL_CHK */
4122 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4123 					sctp_m_freem(tp1->data);
4124 					tp1->data = NULL;
4125 				}
4126 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4127 					sctp_log_sack(asoc->last_acked_seq,
4128 					    cumack,
4129 					    tp1->rec.data.tsn,
4130 					    0,
4131 					    0,
4132 					    SCTP_LOG_FREE_SENT);
4133 				}
4134 				asoc->sent_queue_cnt--;
4135 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4136 			} else {
4137 				break;
4138 			}
4139 		}
4140 
4141 	}
4142 	/* sa_ignore NO_NULL_CHK */
4143 	if (stcb->sctp_socket) {
4144 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4145 		struct socket *so;
4146 
4147 #endif
4148 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4149 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4150 			/* sa_ignore NO_NULL_CHK */
4151 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4152 		}
4153 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4154 		so = SCTP_INP_SO(stcb->sctp_ep);
4155 		atomic_add_int(&stcb->asoc.refcnt, 1);
4156 		SCTP_TCB_UNLOCK(stcb);
4157 		SCTP_SOCKET_LOCK(so, 1);
4158 		SCTP_TCB_LOCK(stcb);
4159 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4160 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4161 			/* assoc was freed while we were unlocked */
4162 			SCTP_SOCKET_UNLOCK(so, 1);
4163 			return;
4164 		}
4165 #endif
4166 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4167 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4168 		SCTP_SOCKET_UNLOCK(so, 1);
4169 #endif
4170 	} else {
4171 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4172 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4173 		}
4174 	}
4175 
4176 	/* JRS - Use the congestion control given in the CC module */
4177 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4178 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4179 			if (net->net_ack2 > 0) {
4180 				/*
4181 				 * Karn's rule applies to clearing error
4182 				 * count, this is optional.
4183 				 */
4184 				net->error_count = 0;
4185 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4186 					/* addr came good */
4187 					net->dest_state |= SCTP_ADDR_REACHABLE;
4188 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4189 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4190 				}
4191 				if (net == stcb->asoc.primary_destination) {
4192 					if (stcb->asoc.alternate) {
4193 						/*
4194 						 * release the alternate,
4195 						 * primary is good
4196 						 */
4197 						sctp_free_remote_addr(stcb->asoc.alternate);
4198 						stcb->asoc.alternate = NULL;
4199 					}
4200 				}
4201 				if (net->dest_state & SCTP_ADDR_PF) {
4202 					net->dest_state &= ~SCTP_ADDR_PF;
4203 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4204 					    stcb->sctp_ep, stcb, net,
4205 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4206 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4207 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4208 					/* Done with this net */
4209 					net->net_ack = 0;
4210 				}
4211 				/* restore any doubled timers */
4212 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4213 				if (net->RTO < stcb->asoc.minrto) {
4214 					net->RTO = stcb->asoc.minrto;
4215 				}
4216 				if (net->RTO > stcb->asoc.maxrto) {
4217 					net->RTO = stcb->asoc.maxrto;
4218 				}
4219 			}
4220 		}
4221 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4222 	}
4223 	asoc->last_acked_seq = cumack;
4224 
4225 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4226 		/* nothing left in-flight */
4227 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4228 			net->flight_size = 0;
4229 			net->partial_bytes_acked = 0;
4230 		}
4231 		asoc->total_flight = 0;
4232 		asoc->total_flight_count = 0;
4233 	}
4234 	/* RWND update */
4235 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4236 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4237 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4238 		/* SWS sender side engages */
4239 		asoc->peers_rwnd = 0;
4240 	}
4241 	if (asoc->peers_rwnd > old_rwnd) {
4242 		win_probe_recovery = 1;
4243 	}
4244 	/* Now assure a timer where data is queued at */
4245 again:
4246 	j = 0;
4247 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4248 		if (win_probe_recovery && (net->window_probe)) {
4249 			win_probe_recovered = 1;
4250 			/*
4251 			 * Find first chunk that was used with window probe
4252 			 * and clear the sent
4253 			 */
4254 			/* sa_ignore FREED_MEMORY */
4255 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4256 				if (tp1->window_probe) {
4257 					/* move back to data send queue */
4258 					sctp_window_probe_recovery(stcb, asoc, tp1);
4259 					break;
4260 				}
4261 			}
4262 		}
4263 		if (net->flight_size) {
4264 			j++;
4265 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4266 			if (net->window_probe) {
4267 				net->window_probe = 0;
4268 			}
4269 		} else {
4270 			if (net->window_probe) {
4271 				/*
4272 				 * In window probes we must assure a timer
4273 				 * is still running there
4274 				 */
4275 				net->window_probe = 0;
4276 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4277 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4278 				}
4279 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4280 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4281 				    stcb, net,
4282 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4283 			}
4284 		}
4285 	}
4286 	if ((j == 0) &&
4287 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4288 	    (asoc->sent_queue_retran_cnt == 0) &&
4289 	    (win_probe_recovered == 0) &&
4290 	    (done_once == 0)) {
4291 		/*
4292 		 * huh, this should not happen unless all packets are
4293 		 * PR-SCTP and marked to skip of course.
4294 		 */
4295 		if (sctp_fs_audit(asoc)) {
4296 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4297 				net->flight_size = 0;
4298 			}
4299 			asoc->total_flight = 0;
4300 			asoc->total_flight_count = 0;
4301 			asoc->sent_queue_retran_cnt = 0;
4302 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4303 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4304 					sctp_flight_size_increase(tp1);
4305 					sctp_total_flight_increase(stcb, tp1);
4306 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4307 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4308 				}
4309 			}
4310 		}
4311 		done_once = 1;
4312 		goto again;
4313 	}
4314 	/**********************************/
4315 	/* Now what about shutdown issues */
4316 	/**********************************/
4317 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4318 		/* nothing left on sendqueue.. consider done */
4319 		/* clean up */
4320 		if ((asoc->stream_queue_cnt == 1) &&
4321 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4322 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4323 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4324 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4325 		}
4326 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4327 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4328 		    (asoc->stream_queue_cnt == 1) &&
4329 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4330 			struct mbuf *op_err;
4331 
4332 			*abort_now = 1;
4333 			/* XXX */
4334 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4335 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4336 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4337 			return;
4338 		}
4339 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4340 		    (asoc->stream_queue_cnt == 0)) {
4341 			struct sctp_nets *netp;
4342 
4343 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4344 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4345 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4346 			}
4347 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4348 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4349 			sctp_stop_timers_for_shutdown(stcb);
4350 			if (asoc->alternate) {
4351 				netp = asoc->alternate;
4352 			} else {
4353 				netp = asoc->primary_destination;
4354 			}
4355 			sctp_send_shutdown(stcb, netp);
4356 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4357 			    stcb->sctp_ep, stcb, netp);
4358 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4359 			    stcb->sctp_ep, stcb, netp);
4360 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4361 		    (asoc->stream_queue_cnt == 0)) {
4362 			struct sctp_nets *netp;
4363 
4364 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4365 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4366 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4367 			sctp_stop_timers_for_shutdown(stcb);
4368 			if (asoc->alternate) {
4369 				netp = asoc->alternate;
4370 			} else {
4371 				netp = asoc->primary_destination;
4372 			}
4373 			sctp_send_shutdown_ack(stcb, netp);
4374 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4375 			    stcb->sctp_ep, stcb, netp);
4376 		}
4377 	}
4378 	/*********************************************/
4379 	/* Here we perform PR-SCTP procedures        */
4380 	/* (section 4.2)                             */
4381 	/*********************************************/
4382 	/* C1. update advancedPeerAckPoint */
4383 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4384 		asoc->advanced_peer_ack_point = cumack;
4385 	}
4386 	/* PR-Sctp issues need to be addressed too */
4387 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4388 		struct sctp_tmit_chunk *lchk;
4389 		uint32_t old_adv_peer_ack_point;
4390 
4391 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4392 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4393 		/* C3. See if we need to send a Fwd-TSN */
4394 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4395 			/*
4396 			 * ISSUE with ECN, see FWD-TSN processing.
4397 			 */
4398 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4399 				send_forward_tsn(stcb, asoc);
4400 			} else if (lchk) {
4401 				/* try to FR fwd-tsn's that get lost too */
4402 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4403 					send_forward_tsn(stcb, asoc);
4404 				}
4405 			}
4406 		}
4407 		if (lchk) {
4408 			/* Assure a timer is up */
4409 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4410 			    stcb->sctp_ep, stcb, lchk->whoTo);
4411 		}
4412 	}
4413 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4414 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4415 		    rwnd,
4416 		    stcb->asoc.peers_rwnd,
4417 		    stcb->asoc.total_flight,
4418 		    stcb->asoc.total_output_queue_size);
4419 	}
4420 }
4421 
4422 void
4423 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4424     struct sctp_tcb *stcb,
4425     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4426     int *abort_now, uint8_t flags,
4427     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4428 {
4429 	struct sctp_association *asoc;
4430 	struct sctp_tmit_chunk *tp1, *tp2;
4431 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4432 	uint16_t wake_him = 0;
4433 	uint32_t send_s = 0;
4434 	long j;
4435 	int accum_moved = 0;
4436 	int will_exit_fast_recovery = 0;
4437 	uint32_t a_rwnd, old_rwnd;
4438 	int win_probe_recovery = 0;
4439 	int win_probe_recovered = 0;
4440 	struct sctp_nets *net = NULL;
4441 	int done_once;
4442 	int rto_ok = 1;
4443 	uint8_t reneged_all = 0;
4444 	uint8_t cmt_dac_flag;
4445 
4446 	/*
4447 	 * we take any chance we can to service our queues since we cannot
4448 	 * get awoken when the socket is read from :<
4449 	 */
4450 	/*
4451 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4452 	 * old sack, if so discard. 2) If there is nothing left in the send
4453 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4454 	 * too, update any rwnd change and verify no timers are running.
4455 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4456 	 * moved process these first and note that it moved. 4) Process any
4457 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4458 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4459 	 * sync up flightsizes and things, stop all timers and also check
4460 	 * for shutdown_pending state. If so then go ahead and send off the
4461 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4462 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4463 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4464 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4465 	 * if in shutdown_recv state.
4466 	 */
4467 	SCTP_TCB_LOCK_ASSERT(stcb);
4468 	/* CMT DAC algo */
4469 	this_sack_lowest_newack = 0;
4470 	SCTP_STAT_INCR(sctps_slowpath_sack);
4471 	last_tsn = cum_ack;
4472 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4473 #ifdef SCTP_ASOCLOG_OF_TSNS
4474 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4475 	stcb->asoc.cumack_log_at++;
4476 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4477 		stcb->asoc.cumack_log_at = 0;
4478 	}
4479 #endif
4480 	a_rwnd = rwnd;
4481 
4482 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4483 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4484 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4485 	}
4486 	old_rwnd = stcb->asoc.peers_rwnd;
4487 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4488 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4489 		    stcb->asoc.overall_error_count,
4490 		    0,
4491 		    SCTP_FROM_SCTP_INDATA,
4492 		    __LINE__);
4493 	}
4494 	stcb->asoc.overall_error_count = 0;
4495 	asoc = &stcb->asoc;
4496 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4497 		sctp_log_sack(asoc->last_acked_seq,
4498 		    cum_ack,
4499 		    0,
4500 		    num_seg,
4501 		    num_dup,
4502 		    SCTP_LOG_NEW_SACK);
4503 	}
4504 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4505 		uint16_t i;
4506 		uint32_t *dupdata, dblock;
4507 
4508 		for (i = 0; i < num_dup; i++) {
4509 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4510 			    sizeof(uint32_t), (uint8_t *)&dblock);
4511 			if (dupdata == NULL) {
4512 				break;
4513 			}
4514 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4515 		}
4516 	}
4517 	/* reality check */
4518 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4519 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4520 		    sctpchunk_listhead);
4521 		send_s = tp1->rec.data.tsn + 1;
4522 	} else {
4523 		tp1 = NULL;
4524 		send_s = asoc->sending_seq;
4525 	}
4526 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4527 		struct mbuf *op_err;
4528 		char msg[SCTP_DIAG_INFO_LEN];
4529 
4530 		/*
4531 		 * no way, we have not even sent this TSN out yet. Peer is
4532 		 * hopelessly messed up with us.
4533 		 */
4534 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4535 		    cum_ack, send_s);
4536 		if (tp1) {
4537 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4538 			    tp1->rec.data.tsn, (void *)tp1);
4539 		}
4540 hopeless_peer:
4541 		*abort_now = 1;
4542 		/* XXX */
4543 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4544 		    cum_ack, send_s);
4545 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4546 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4547 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4548 		return;
4549 	}
4550 	/**********************/
4551 	/* 1) check the range */
4552 	/**********************/
4553 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4554 		/* acking something behind */
4555 		return;
4556 	}
4557 	/* update the Rwnd of the peer */
4558 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4559 	    TAILQ_EMPTY(&asoc->send_queue) &&
4560 	    (asoc->stream_queue_cnt == 0)) {
4561 		/* nothing left on send/sent and strmq */
4562 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4563 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4564 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4565 		}
4566 		asoc->peers_rwnd = a_rwnd;
4567 		if (asoc->sent_queue_retran_cnt) {
4568 			asoc->sent_queue_retran_cnt = 0;
4569 		}
4570 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4571 			/* SWS sender side engages */
4572 			asoc->peers_rwnd = 0;
4573 		}
4574 		/* stop any timers */
4575 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4576 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4577 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4578 			net->partial_bytes_acked = 0;
4579 			net->flight_size = 0;
4580 		}
4581 		asoc->total_flight = 0;
4582 		asoc->total_flight_count = 0;
4583 		return;
4584 	}
4585 	/*
4586 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4587 	 * things. The total byte count acked is tracked in netAckSz AND
4588 	 * netAck2 is used to track the total bytes acked that are un-
4589 	 * amibguious and were never retransmitted. We track these on a per
4590 	 * destination address basis.
4591 	 */
4592 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4593 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4594 			/* Drag along the window_tsn for cwr's */
4595 			net->cwr_window_tsn = cum_ack;
4596 		}
4597 		net->prev_cwnd = net->cwnd;
4598 		net->net_ack = 0;
4599 		net->net_ack2 = 0;
4600 
4601 		/*
4602 		 * CMT: Reset CUC and Fast recovery algo variables before
4603 		 * SACK processing
4604 		 */
4605 		net->new_pseudo_cumack = 0;
4606 		net->will_exit_fast_recovery = 0;
4607 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4608 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4609 		}
4610 		/*
4611 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4612 		 * to be greater than the cumack. Also reset saw_newack to 0
4613 		 * for all dests.
4614 		 */
4615 		net->saw_newack = 0;
4616 		net->this_sack_highest_newack = last_tsn;
4617 	}
4618 	/* process the new consecutive TSN first */
4619 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4620 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4621 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4622 				accum_moved = 1;
4623 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4624 					/*
4625 					 * If it is less than ACKED, it is
4626 					 * now no-longer in flight. Higher
4627 					 * values may occur during marking
4628 					 */
4629 					if ((tp1->whoTo->dest_state &
4630 					    SCTP_ADDR_UNCONFIRMED) &&
4631 					    (tp1->snd_count < 2)) {
4632 						/*
4633 						 * If there was no retran
4634 						 * and the address is
4635 						 * un-confirmed and we sent
4636 						 * there and are now
4637 						 * sacked.. its confirmed,
4638 						 * mark it so.
4639 						 */
4640 						tp1->whoTo->dest_state &=
4641 						    ~SCTP_ADDR_UNCONFIRMED;
4642 					}
4643 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4644 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4645 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4646 							    tp1->whoTo->flight_size,
4647 							    tp1->book_size,
4648 							    (uint32_t)(uintptr_t)tp1->whoTo,
4649 							    tp1->rec.data.tsn);
4650 						}
4651 						sctp_flight_size_decrease(tp1);
4652 						sctp_total_flight_decrease(stcb, tp1);
4653 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4654 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4655 							    tp1);
4656 						}
4657 					}
4658 					tp1->whoTo->net_ack += tp1->send_size;
4659 
4660 					/* CMT SFR and DAC algos */
4661 					this_sack_lowest_newack = tp1->rec.data.tsn;
4662 					tp1->whoTo->saw_newack = 1;
4663 
4664 					if (tp1->snd_count < 2) {
4665 						/*
4666 						 * True non-retransmited
4667 						 * chunk
4668 						 */
4669 						tp1->whoTo->net_ack2 +=
4670 						    tp1->send_size;
4671 
4672 						/* update RTO too? */
4673 						if (tp1->do_rtt) {
4674 							if (rto_ok) {
4675 								tp1->whoTo->RTO =
4676 								    sctp_calculate_rto(stcb,
4677 								    asoc, tp1->whoTo,
4678 								    &tp1->sent_rcv_time,
4679 								    SCTP_RTT_FROM_DATA);
4680 								rto_ok = 0;
4681 							}
4682 							if (tp1->whoTo->rto_needed == 0) {
4683 								tp1->whoTo->rto_needed = 1;
4684 							}
4685 							tp1->do_rtt = 0;
4686 						}
4687 					}
4688 					/*
4689 					 * CMT: CUCv2 algorithm. From the
4690 					 * cumack'd TSNs, for each TSN being
4691 					 * acked for the first time, set the
4692 					 * following variables for the
4693 					 * corresp destination.
4694 					 * new_pseudo_cumack will trigger a
4695 					 * cwnd update.
4696 					 * find_(rtx_)pseudo_cumack will
4697 					 * trigger search for the next
4698 					 * expected (rtx-)pseudo-cumack.
4699 					 */
4700 					tp1->whoTo->new_pseudo_cumack = 1;
4701 					tp1->whoTo->find_pseudo_cumack = 1;
4702 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4703 
4704 
4705 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4706 						sctp_log_sack(asoc->last_acked_seq,
4707 						    cum_ack,
4708 						    tp1->rec.data.tsn,
4709 						    0,
4710 						    0,
4711 						    SCTP_LOG_TSN_ACKED);
4712 					}
4713 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4714 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4715 					}
4716 				}
4717 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4718 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4719 #ifdef SCTP_AUDITING_ENABLED
4720 					sctp_audit_log(0xB3,
4721 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4722 #endif
4723 				}
4724 				if (tp1->rec.data.chunk_was_revoked) {
4725 					/* deflate the cwnd */
4726 					tp1->whoTo->cwnd -= tp1->book_size;
4727 					tp1->rec.data.chunk_was_revoked = 0;
4728 				}
4729 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4730 					tp1->sent = SCTP_DATAGRAM_ACKED;
4731 				}
4732 			}
4733 		} else {
4734 			break;
4735 		}
4736 	}
4737 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4738 	/* always set this up to cum-ack */
4739 	asoc->this_sack_highest_gap = last_tsn;
4740 
4741 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4742 
4743 		/*
4744 		 * thisSackHighestGap will increase while handling NEW
4745 		 * segments this_sack_highest_newack will increase while
4746 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4747 		 * used for CMT DAC algo. saw_newack will also change.
4748 		 */
4749 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4750 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4751 		    num_seg, num_nr_seg, &rto_ok)) {
4752 			wake_him++;
4753 		}
4754 		/*
4755 		 * validate the biggest_tsn_acked in the gap acks if strict
4756 		 * adherence is wanted.
4757 		 */
4758 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4759 			/*
4760 			 * peer is either confused or we are under attack.
4761 			 * We must abort.
4762 			 */
4763 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4764 			    biggest_tsn_acked, send_s);
4765 			goto hopeless_peer;
4766 		}
4767 	}
4768 	/*******************************************/
4769 	/* cancel ALL T3-send timer if accum moved */
4770 	/*******************************************/
4771 	if (asoc->sctp_cmt_on_off > 0) {
4772 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4773 			if (net->new_pseudo_cumack)
4774 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4775 				    stcb, net,
4776 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4777 
4778 		}
4779 	} else {
4780 		if (accum_moved) {
4781 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4782 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4783 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4784 			}
4785 		}
4786 	}
4787 	/********************************************/
4788 	/* drop the acked chunks from the sentqueue */
4789 	/********************************************/
4790 	asoc->last_acked_seq = cum_ack;
4791 
4792 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4793 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4794 			break;
4795 		}
4796 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4797 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4798 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4799 #ifdef INVARIANTS
4800 			} else {
4801 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4802 #endif
4803 			}
4804 		}
4805 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4806 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4807 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4808 			asoc->trigger_reset = 1;
4809 		}
4810 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4811 		if (PR_SCTP_ENABLED(tp1->flags)) {
4812 			if (asoc->pr_sctp_cnt != 0)
4813 				asoc->pr_sctp_cnt--;
4814 		}
4815 		asoc->sent_queue_cnt--;
4816 		if (tp1->data) {
4817 			/* sa_ignore NO_NULL_CHK */
4818 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4819 			sctp_m_freem(tp1->data);
4820 			tp1->data = NULL;
4821 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4822 				asoc->sent_queue_cnt_removeable--;
4823 			}
4824 		}
4825 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4826 			sctp_log_sack(asoc->last_acked_seq,
4827 			    cum_ack,
4828 			    tp1->rec.data.tsn,
4829 			    0,
4830 			    0,
4831 			    SCTP_LOG_FREE_SENT);
4832 		}
4833 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4834 		wake_him++;
4835 	}
4836 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4837 #ifdef INVARIANTS
4838 		panic("Warning flight size is positive and should be 0");
4839 #else
4840 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4841 		    asoc->total_flight);
4842 #endif
4843 		asoc->total_flight = 0;
4844 	}
4845 	/* sa_ignore NO_NULL_CHK */
4846 	if ((wake_him) && (stcb->sctp_socket)) {
4847 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4848 		struct socket *so;
4849 
4850 #endif
4851 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4852 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4853 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4854 		}
4855 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4856 		so = SCTP_INP_SO(stcb->sctp_ep);
4857 		atomic_add_int(&stcb->asoc.refcnt, 1);
4858 		SCTP_TCB_UNLOCK(stcb);
4859 		SCTP_SOCKET_LOCK(so, 1);
4860 		SCTP_TCB_LOCK(stcb);
4861 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4862 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4863 			/* assoc was freed while we were unlocked */
4864 			SCTP_SOCKET_UNLOCK(so, 1);
4865 			return;
4866 		}
4867 #endif
4868 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4870 		SCTP_SOCKET_UNLOCK(so, 1);
4871 #endif
4872 	} else {
4873 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4874 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4875 		}
4876 	}
4877 
4878 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4879 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4880 			/* Setup so we will exit RFC2582 fast recovery */
4881 			will_exit_fast_recovery = 1;
4882 		}
4883 	}
4884 	/*
4885 	 * Check for revoked fragments:
4886 	 *
4887 	 * if Previous sack - Had no frags then we can't have any revoked if
4888 	 * Previous sack - Had frag's then - If we now have frags aka
4889 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4890 	 * some of them. else - The peer revoked all ACKED fragments, since
4891 	 * we had some before and now we have NONE.
4892 	 */
4893 
4894 	if (num_seg) {
4895 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4896 		asoc->saw_sack_with_frags = 1;
4897 	} else if (asoc->saw_sack_with_frags) {
4898 		int cnt_revoked = 0;
4899 
4900 		/* Peer revoked all dg's marked or acked */
4901 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4902 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4903 				tp1->sent = SCTP_DATAGRAM_SENT;
4904 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4905 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4906 					    tp1->whoTo->flight_size,
4907 					    tp1->book_size,
4908 					    (uint32_t)(uintptr_t)tp1->whoTo,
4909 					    tp1->rec.data.tsn);
4910 				}
4911 				sctp_flight_size_increase(tp1);
4912 				sctp_total_flight_increase(stcb, tp1);
4913 				tp1->rec.data.chunk_was_revoked = 1;
4914 				/*
4915 				 * To ensure that this increase in
4916 				 * flightsize, which is artificial, does not
4917 				 * throttle the sender, we also increase the
4918 				 * cwnd artificially.
4919 				 */
4920 				tp1->whoTo->cwnd += tp1->book_size;
4921 				cnt_revoked++;
4922 			}
4923 		}
4924 		if (cnt_revoked) {
4925 			reneged_all = 1;
4926 		}
4927 		asoc->saw_sack_with_frags = 0;
4928 	}
4929 	if (num_nr_seg > 0)
4930 		asoc->saw_sack_with_nr_frags = 1;
4931 	else
4932 		asoc->saw_sack_with_nr_frags = 0;
4933 
4934 	/* JRS - Use the congestion control given in the CC module */
4935 	if (ecne_seen == 0) {
4936 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4937 			if (net->net_ack2 > 0) {
4938 				/*
4939 				 * Karn's rule applies to clearing error
4940 				 * count, this is optional.
4941 				 */
4942 				net->error_count = 0;
4943 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4944 					/* addr came good */
4945 					net->dest_state |= SCTP_ADDR_REACHABLE;
4946 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4947 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4948 				}
4949 				if (net == stcb->asoc.primary_destination) {
4950 					if (stcb->asoc.alternate) {
4951 						/*
4952 						 * release the alternate,
4953 						 * primary is good
4954 						 */
4955 						sctp_free_remote_addr(stcb->asoc.alternate);
4956 						stcb->asoc.alternate = NULL;
4957 					}
4958 				}
4959 				if (net->dest_state & SCTP_ADDR_PF) {
4960 					net->dest_state &= ~SCTP_ADDR_PF;
4961 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4962 					    stcb->sctp_ep, stcb, net,
4963 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4964 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4965 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4966 					/* Done with this net */
4967 					net->net_ack = 0;
4968 				}
4969 				/* restore any doubled timers */
4970 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4971 				if (net->RTO < stcb->asoc.minrto) {
4972 					net->RTO = stcb->asoc.minrto;
4973 				}
4974 				if (net->RTO > stcb->asoc.maxrto) {
4975 					net->RTO = stcb->asoc.maxrto;
4976 				}
4977 			}
4978 		}
4979 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4980 	}
4981 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4982 		/* nothing left in-flight */
4983 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 			/* stop all timers */
4985 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 			    stcb, net,
4987 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4988 			net->flight_size = 0;
4989 			net->partial_bytes_acked = 0;
4990 		}
4991 		asoc->total_flight = 0;
4992 		asoc->total_flight_count = 0;
4993 	}
4994 	/**********************************/
4995 	/* Now what about shutdown issues */
4996 	/**********************************/
4997 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4998 		/* nothing left on sendqueue.. consider done */
4999 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5000 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5001 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5002 		}
5003 		asoc->peers_rwnd = a_rwnd;
5004 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5005 			/* SWS sender side engages */
5006 			asoc->peers_rwnd = 0;
5007 		}
5008 		/* clean up */
5009 		if ((asoc->stream_queue_cnt == 1) &&
5010 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5011 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5012 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5013 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5014 		}
5015 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5016 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5017 		    (asoc->stream_queue_cnt == 1) &&
5018 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5019 			struct mbuf *op_err;
5020 
5021 			*abort_now = 1;
5022 			/* XXX */
5023 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5024 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5025 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5026 			return;
5027 		}
5028 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5029 		    (asoc->stream_queue_cnt == 0)) {
5030 			struct sctp_nets *netp;
5031 
5032 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5033 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5034 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5035 			}
5036 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5037 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5038 			sctp_stop_timers_for_shutdown(stcb);
5039 			if (asoc->alternate) {
5040 				netp = asoc->alternate;
5041 			} else {
5042 				netp = asoc->primary_destination;
5043 			}
5044 			sctp_send_shutdown(stcb, netp);
5045 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5046 			    stcb->sctp_ep, stcb, netp);
5047 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5048 			    stcb->sctp_ep, stcb, netp);
5049 			return;
5050 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5051 		    (asoc->stream_queue_cnt == 0)) {
5052 			struct sctp_nets *netp;
5053 
5054 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5055 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5056 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5057 			sctp_stop_timers_for_shutdown(stcb);
5058 			if (asoc->alternate) {
5059 				netp = asoc->alternate;
5060 			} else {
5061 				netp = asoc->primary_destination;
5062 			}
5063 			sctp_send_shutdown_ack(stcb, netp);
5064 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5065 			    stcb->sctp_ep, stcb, netp);
5066 			return;
5067 		}
5068 	}
5069 	/*
5070 	 * Now here we are going to recycle net_ack for a different use...
5071 	 * HEADS UP.
5072 	 */
5073 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5074 		net->net_ack = 0;
5075 	}
5076 
5077 	/*
5078 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5079 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5080 	 * automatically ensure that.
5081 	 */
5082 	if ((asoc->sctp_cmt_on_off > 0) &&
5083 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5084 	    (cmt_dac_flag == 0)) {
5085 		this_sack_lowest_newack = cum_ack;
5086 	}
5087 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5088 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5089 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5090 	}
5091 	/* JRS - Use the congestion control given in the CC module */
5092 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5093 
5094 	/* Now are we exiting loss recovery ? */
5095 	if (will_exit_fast_recovery) {
5096 		/* Ok, we must exit fast recovery */
5097 		asoc->fast_retran_loss_recovery = 0;
5098 	}
5099 	if ((asoc->sat_t3_loss_recovery) &&
5100 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5101 		/* end satellite t3 loss recovery */
5102 		asoc->sat_t3_loss_recovery = 0;
5103 	}
5104 	/*
5105 	 * CMT Fast recovery
5106 	 */
5107 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5108 		if (net->will_exit_fast_recovery) {
5109 			/* Ok, we must exit fast recovery */
5110 			net->fast_retran_loss_recovery = 0;
5111 		}
5112 	}
5113 
5114 	/* Adjust and set the new rwnd value */
5115 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5116 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5117 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5118 	}
5119 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5120 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5121 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5122 		/* SWS sender side engages */
5123 		asoc->peers_rwnd = 0;
5124 	}
5125 	if (asoc->peers_rwnd > old_rwnd) {
5126 		win_probe_recovery = 1;
5127 	}
5128 	/*
5129 	 * Now we must setup so we have a timer up for anyone with
5130 	 * outstanding data.
5131 	 */
5132 	done_once = 0;
5133 again:
5134 	j = 0;
5135 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5136 		if (win_probe_recovery && (net->window_probe)) {
5137 			win_probe_recovered = 1;
5138 			/*-
5139 			 * Find first chunk that was used with
5140 			 * window probe and clear the event. Put
5141 			 * it back into the send queue as if has
5142 			 * not been sent.
5143 			 */
5144 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5145 				if (tp1->window_probe) {
5146 					sctp_window_probe_recovery(stcb, asoc, tp1);
5147 					break;
5148 				}
5149 			}
5150 		}
5151 		if (net->flight_size) {
5152 			j++;
5153 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5154 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5155 				    stcb->sctp_ep, stcb, net);
5156 			}
5157 			if (net->window_probe) {
5158 				net->window_probe = 0;
5159 			}
5160 		} else {
5161 			if (net->window_probe) {
5162 				/*
5163 				 * In window probes we must assure a timer
5164 				 * is still running there
5165 				 */
5166 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5167 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5168 					    stcb->sctp_ep, stcb, net);
5169 
5170 				}
5171 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5172 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5173 				    stcb, net,
5174 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5175 			}
5176 		}
5177 	}
5178 	if ((j == 0) &&
5179 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5180 	    (asoc->sent_queue_retran_cnt == 0) &&
5181 	    (win_probe_recovered == 0) &&
5182 	    (done_once == 0)) {
5183 		/*
5184 		 * huh, this should not happen unless all packets are
5185 		 * PR-SCTP and marked to skip of course.
5186 		 */
5187 		if (sctp_fs_audit(asoc)) {
5188 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5189 				net->flight_size = 0;
5190 			}
5191 			asoc->total_flight = 0;
5192 			asoc->total_flight_count = 0;
5193 			asoc->sent_queue_retran_cnt = 0;
5194 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5195 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5196 					sctp_flight_size_increase(tp1);
5197 					sctp_total_flight_increase(stcb, tp1);
5198 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5199 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5200 				}
5201 			}
5202 		}
5203 		done_once = 1;
5204 		goto again;
5205 	}
5206 	/*********************************************/
5207 	/* Here we perform PR-SCTP procedures        */
5208 	/* (section 4.2)                             */
5209 	/*********************************************/
5210 	/* C1. update advancedPeerAckPoint */
5211 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5212 		asoc->advanced_peer_ack_point = cum_ack;
5213 	}
5214 	/* C2. try to further move advancedPeerAckPoint ahead */
5215 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5216 		struct sctp_tmit_chunk *lchk;
5217 		uint32_t old_adv_peer_ack_point;
5218 
5219 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5220 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5221 		/* C3. See if we need to send a Fwd-TSN */
5222 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5223 			/*
5224 			 * ISSUE with ECN, see FWD-TSN processing.
5225 			 */
5226 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5227 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5228 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5229 				    old_adv_peer_ack_point);
5230 			}
5231 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5232 				send_forward_tsn(stcb, asoc);
5233 			} else if (lchk) {
5234 				/* try to FR fwd-tsn's that get lost too */
5235 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5236 					send_forward_tsn(stcb, asoc);
5237 				}
5238 			}
5239 		}
5240 		if (lchk) {
5241 			/* Assure a timer is up */
5242 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5243 			    stcb->sctp_ep, stcb, lchk->whoTo);
5244 		}
5245 	}
5246 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5247 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5248 		    a_rwnd,
5249 		    stcb->asoc.peers_rwnd,
5250 		    stcb->asoc.total_flight,
5251 		    stcb->asoc.total_output_queue_size);
5252 	}
5253 }
5254 
5255 void
5256 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5257 {
5258 	/* Copy cum-ack */
5259 	uint32_t cum_ack, a_rwnd;
5260 
5261 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5262 	/* Arrange so a_rwnd does NOT change */
5263 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5264 
5265 	/* Now call the express sack handling */
5266 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5267 }
5268 
5269 static void
5270 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5271     struct sctp_stream_in *strmin)
5272 {
5273 	struct sctp_queued_to_read *control, *ncontrol;
5274 	struct sctp_association *asoc;
5275 	uint32_t mid;
5276 	int need_reasm_check = 0;
5277 
5278 	asoc = &stcb->asoc;
5279 	mid = strmin->last_mid_delivered;
5280 	/*
5281 	 * First deliver anything prior to and including the stream no that
5282 	 * came in.
5283 	 */
5284 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5285 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5286 			/* this is deliverable now */
5287 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5288 				if (control->on_strm_q) {
5289 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5290 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5291 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5292 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5293 #ifdef INVARIANTS
5294 					} else {
5295 						panic("strmin: %p ctl: %p unknown %d",
5296 						    strmin, control, control->on_strm_q);
5297 #endif
5298 					}
5299 					control->on_strm_q = 0;
5300 				}
5301 				/* subtract pending on streams */
5302 				if (asoc->size_on_all_streams >= control->length) {
5303 					asoc->size_on_all_streams -= control->length;
5304 				} else {
5305 #ifdef INVARIANTS
5306 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5307 #else
5308 					asoc->size_on_all_streams = 0;
5309 #endif
5310 				}
5311 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5312 				/* deliver it to at least the delivery-q */
5313 				if (stcb->sctp_socket) {
5314 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5315 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5316 					    control,
5317 					    &stcb->sctp_socket->so_rcv,
5318 					    1, SCTP_READ_LOCK_HELD,
5319 					    SCTP_SO_NOT_LOCKED);
5320 				}
5321 			} else {
5322 				/* Its a fragmented message */
5323 				if (control->first_frag_seen) {
5324 					/*
5325 					 * Make it so this is next to
5326 					 * deliver, we restore later
5327 					 */
5328 					strmin->last_mid_delivered = control->mid - 1;
5329 					need_reasm_check = 1;
5330 					break;
5331 				}
5332 			}
5333 		} else {
5334 			/* no more delivery now. */
5335 			break;
5336 		}
5337 	}
5338 	if (need_reasm_check) {
5339 		int ret;
5340 
5341 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5342 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5343 			/* Restore the next to deliver unless we are ahead */
5344 			strmin->last_mid_delivered = mid;
5345 		}
5346 		if (ret == 0) {
5347 			/* Left the front Partial one on */
5348 			return;
5349 		}
5350 		need_reasm_check = 0;
5351 	}
5352 	/*
5353 	 * now we must deliver things in queue the normal way  if any are
5354 	 * now ready.
5355 	 */
5356 	mid = strmin->last_mid_delivered + 1;
5357 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5358 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5359 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5360 				/* this is deliverable now */
5361 				if (control->on_strm_q) {
5362 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5363 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5364 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5365 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5366 #ifdef INVARIANTS
5367 					} else {
5368 						panic("strmin: %p ctl: %p unknown %d",
5369 						    strmin, control, control->on_strm_q);
5370 #endif
5371 					}
5372 					control->on_strm_q = 0;
5373 				}
5374 				/* subtract pending on streams */
5375 				if (asoc->size_on_all_streams >= control->length) {
5376 					asoc->size_on_all_streams -= control->length;
5377 				} else {
5378 #ifdef INVARIANTS
5379 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5380 #else
5381 					asoc->size_on_all_streams = 0;
5382 #endif
5383 				}
5384 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5385 				/* deliver it to at least the delivery-q */
5386 				strmin->last_mid_delivered = control->mid;
5387 				if (stcb->sctp_socket) {
5388 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5389 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5390 					    control,
5391 					    &stcb->sctp_socket->so_rcv, 1,
5392 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5393 
5394 				}
5395 				mid = strmin->last_mid_delivered + 1;
5396 			} else {
5397 				/* Its a fragmented message */
5398 				if (control->first_frag_seen) {
5399 					/*
5400 					 * Make it so this is next to
5401 					 * deliver
5402 					 */
5403 					strmin->last_mid_delivered = control->mid - 1;
5404 					need_reasm_check = 1;
5405 					break;
5406 				}
5407 			}
5408 		} else {
5409 			break;
5410 		}
5411 	}
5412 	if (need_reasm_check) {
5413 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5414 	}
5415 }
5416 
5417 
5418 
5419 static void
5420 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5421     struct sctp_association *asoc,
5422     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5423 {
5424 	struct sctp_queued_to_read *control;
5425 	struct sctp_stream_in *strm;
5426 	struct sctp_tmit_chunk *chk, *nchk;
5427 	int cnt_removed = 0;
5428 
5429 	/*
5430 	 * For now large messages held on the stream reasm that are complete
5431 	 * will be tossed too. We could in theory do more work to spin
5432 	 * through and stop after dumping one msg aka seeing the start of a
5433 	 * new msg at the head, and call the delivery function... to see if
5434 	 * it can be delivered... But for now we just dump everything on the
5435 	 * queue.
5436 	 */
5437 	strm = &asoc->strmin[stream];
5438 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5439 	if (control == NULL) {
5440 		/* Not found */
5441 		return;
5442 	}
5443 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5444 		return;
5445 	}
5446 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5447 		/* Purge hanging chunks */
5448 		if (!asoc->idata_supported && (ordered == 0)) {
5449 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5450 				break;
5451 			}
5452 		}
5453 		cnt_removed++;
5454 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5455 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5456 			asoc->size_on_reasm_queue -= chk->send_size;
5457 		} else {
5458 #ifdef INVARIANTS
5459 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5460 #else
5461 			asoc->size_on_reasm_queue = 0;
5462 #endif
5463 		}
5464 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5465 		if (chk->data) {
5466 			sctp_m_freem(chk->data);
5467 			chk->data = NULL;
5468 		}
5469 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5470 	}
5471 	if (!TAILQ_EMPTY(&control->reasm)) {
5472 		/* This has to be old data, unordered */
5473 		if (control->data) {
5474 			sctp_m_freem(control->data);
5475 			control->data = NULL;
5476 		}
5477 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5478 		chk = TAILQ_FIRST(&control->reasm);
5479 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5480 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5481 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5482 			    chk, SCTP_READ_LOCK_HELD);
5483 		}
5484 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5485 		return;
5486 	}
5487 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5488 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5489 		if (asoc->size_on_all_streams >= control->length) {
5490 			asoc->size_on_all_streams -= control->length;
5491 		} else {
5492 #ifdef INVARIANTS
5493 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5494 #else
5495 			asoc->size_on_all_streams = 0;
5496 #endif
5497 		}
5498 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5499 		control->on_strm_q = 0;
5500 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5501 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5502 		control->on_strm_q = 0;
5503 #ifdef INVARIANTS
5504 	} else if (control->on_strm_q) {
5505 		panic("strm: %p ctl: %p unknown %d",
5506 		    strm, control, control->on_strm_q);
5507 #endif
5508 	}
5509 	control->on_strm_q = 0;
5510 	if (control->on_read_q == 0) {
5511 		sctp_free_remote_addr(control->whoFrom);
5512 		if (control->data) {
5513 			sctp_m_freem(control->data);
5514 			control->data = NULL;
5515 		}
5516 		sctp_free_a_readq(stcb, control);
5517 	}
5518 }
5519 
5520 void
5521 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5522     struct sctp_forward_tsn_chunk *fwd,
5523     int *abort_flag, struct mbuf *m, int offset)
5524 {
5525 	/* The pr-sctp fwd tsn */
5526 	/*
5527 	 * here we will perform all the data receiver side steps for
5528 	 * processing FwdTSN, as required in by pr-sctp draft:
5529 	 *
5530 	 * Assume we get FwdTSN(x):
5531 	 *
5532 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5533 	 * + others we have 3) examine and update re-ordering queue on
5534 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5535 	 * report where we are.
5536 	 */
5537 	struct sctp_association *asoc;
5538 	uint32_t new_cum_tsn, gap;
5539 	unsigned int i, fwd_sz, m_size;
5540 	uint32_t str_seq;
5541 	struct sctp_stream_in *strm;
5542 	struct sctp_queued_to_read *control, *sv;
5543 
5544 	asoc = &stcb->asoc;
5545 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5546 		SCTPDBG(SCTP_DEBUG_INDATA1,
5547 		    "Bad size too small/big fwd-tsn\n");
5548 		return;
5549 	}
5550 	m_size = (stcb->asoc.mapping_array_size << 3);
5551 	/*************************************************************/
5552 	/* 1. Here we update local cumTSN and shift the bitmap array */
5553 	/*************************************************************/
5554 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5555 
5556 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5557 		/* Already got there ... */
5558 		return;
5559 	}
5560 	/*
5561 	 * now we know the new TSN is more advanced, let's find the actual
5562 	 * gap
5563 	 */
5564 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5565 	asoc->cumulative_tsn = new_cum_tsn;
5566 	if (gap >= m_size) {
5567 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5568 			struct mbuf *op_err;
5569 			char msg[SCTP_DIAG_INFO_LEN];
5570 
5571 			/*
5572 			 * out of range (of single byte chunks in the rwnd I
5573 			 * give out). This must be an attacker.
5574 			 */
5575 			*abort_flag = 1;
5576 			snprintf(msg, sizeof(msg),
5577 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5578 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5579 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5580 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5581 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5582 			return;
5583 		}
5584 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5585 
5586 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5587 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5588 		asoc->highest_tsn_inside_map = new_cum_tsn;
5589 
5590 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5591 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5592 
5593 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5594 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5595 		}
5596 	} else {
5597 		SCTP_TCB_LOCK_ASSERT(stcb);
5598 		for (i = 0; i <= gap; i++) {
5599 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5600 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5601 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5602 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5603 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5604 				}
5605 			}
5606 		}
5607 	}
5608 	/*************************************************************/
5609 	/* 2. Clear up re-assembly queue                             */
5610 	/*************************************************************/
5611 
5612 	/* This is now done as part of clearing up the stream/seq */
5613 	if (asoc->idata_supported == 0) {
5614 		uint16_t sid;
5615 
5616 		/* Flush all the un-ordered data based on cum-tsn */
5617 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5618 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5619 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5620 		}
5621 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5622 	}
5623 	/*******************************************************/
5624 	/* 3. Update the PR-stream re-ordering queues and fix  */
5625 	/* delivery issues as needed.                       */
5626 	/*******************************************************/
5627 	fwd_sz -= sizeof(*fwd);
5628 	if (m && fwd_sz) {
5629 		/* New method. */
5630 		unsigned int num_str;
5631 		uint32_t mid, cur_mid;
5632 		uint16_t sid;
5633 		uint16_t ordered, flags;
5634 		struct sctp_strseq *stseq, strseqbuf;
5635 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5636 
5637 		offset += sizeof(*fwd);
5638 
5639 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5640 		if (asoc->idata_supported) {
5641 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5642 		} else {
5643 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5644 		}
5645 		for (i = 0; i < num_str; i++) {
5646 			if (asoc->idata_supported) {
5647 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5648 				    sizeof(struct sctp_strseq_mid),
5649 				    (uint8_t *)&strseqbuf_m);
5650 				offset += sizeof(struct sctp_strseq_mid);
5651 				if (stseq_m == NULL) {
5652 					break;
5653 				}
5654 				sid = ntohs(stseq_m->sid);
5655 				mid = ntohl(stseq_m->mid);
5656 				flags = ntohs(stseq_m->flags);
5657 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5658 					ordered = 0;
5659 				} else {
5660 					ordered = 1;
5661 				}
5662 			} else {
5663 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5664 				    sizeof(struct sctp_strseq),
5665 				    (uint8_t *)&strseqbuf);
5666 				offset += sizeof(struct sctp_strseq);
5667 				if (stseq == NULL) {
5668 					break;
5669 				}
5670 				sid = ntohs(stseq->sid);
5671 				mid = (uint32_t)ntohs(stseq->ssn);
5672 				ordered = 1;
5673 			}
5674 			/* Convert */
5675 
5676 			/* now process */
5677 
5678 			/*
5679 			 * Ok we now look for the stream/seq on the read
5680 			 * queue where its not all delivered. If we find it
5681 			 * we transmute the read entry into a PDI_ABORTED.
5682 			 */
5683 			if (sid >= asoc->streamincnt) {
5684 				/* screwed up streams, stop!  */
5685 				break;
5686 			}
5687 			if ((asoc->str_of_pdapi == sid) &&
5688 			    (asoc->ssn_of_pdapi == mid)) {
5689 				/*
5690 				 * If this is the one we were partially
5691 				 * delivering now then we no longer are.
5692 				 * Note this will change with the reassembly
5693 				 * re-write.
5694 				 */
5695 				asoc->fragmented_delivery_inprogress = 0;
5696 			}
5697 			strm = &asoc->strmin[sid];
5698 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5699 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5700 			}
5701 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5702 				if ((control->sinfo_stream == sid) &&
5703 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5704 					str_seq = (sid << 16) | (0x0000ffff & mid);
5705 					control->pdapi_aborted = 1;
5706 					sv = stcb->asoc.control_pdapi;
5707 					control->end_added = 1;
5708 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5709 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5710 						if (asoc->size_on_all_streams >= control->length) {
5711 							asoc->size_on_all_streams -= control->length;
5712 						} else {
5713 #ifdef INVARIANTS
5714 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5715 #else
5716 							asoc->size_on_all_streams = 0;
5717 #endif
5718 						}
5719 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5720 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5721 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5722 #ifdef INVARIANTS
5723 					} else if (control->on_strm_q) {
5724 						panic("strm: %p ctl: %p unknown %d",
5725 						    strm, control, control->on_strm_q);
5726 #endif
5727 					}
5728 					control->on_strm_q = 0;
5729 					stcb->asoc.control_pdapi = control;
5730 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5731 					    stcb,
5732 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5733 					    (void *)&str_seq,
5734 					    SCTP_SO_NOT_LOCKED);
5735 					stcb->asoc.control_pdapi = sv;
5736 					break;
5737 				} else if ((control->sinfo_stream == sid) &&
5738 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5739 					/* We are past our victim SSN */
5740 					break;
5741 				}
5742 			}
5743 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5744 				/* Update the sequence number */
5745 				strm->last_mid_delivered = mid;
5746 			}
5747 			/* now kick the stream the new way */
5748 			/* sa_ignore NO_NULL_CHK */
5749 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5750 		}
5751 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5752 	}
5753 	/*
5754 	 * Now slide thing forward.
5755 	 */
5756 	sctp_slide_mapping_arrays(stcb);
5757 }
5758