xref: /freebsd/sys/netinet/sctp_indata.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static uint32_t
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
92 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
93 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
94 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
95 	if (stcb->asoc.sb_cc == 0 &&
96 	    asoc->cnt_on_reasm_queue == 0 &&
97 	    asoc->cnt_on_all_streams == 0) {
98 		/* Full rwnd granted */
99 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
100 		return (calc);
101 	}
102 	/* get actual space */
103 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
104 	/*
105 	 * take out what has NOT been put on socket queue and we yet hold
106 	 * for putting up.
107 	 */
108 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
109 	    asoc->cnt_on_reasm_queue * MSIZE));
110 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
111 	    asoc->cnt_on_all_streams * MSIZE));
112 	if (calc == 0) {
113 		/* out of space */
114 		return (calc);
115 	}
116 	/* what is the overhead of all these rwnd's */
117 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
118 	/*
119 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
120 	 * even it is 0. SWS engaged
121 	 */
122 	if (calc < stcb->asoc.my_rwnd_control_len) {
123 		calc = 1;
124 	}
125 	return (calc);
126 }
127 
128 
129 
130 /*
131  * Build out our readq entry based on the incoming packet.
132  */
133 struct sctp_queued_to_read *
134 sctp_build_readq_entry(struct sctp_tcb *stcb,
135     struct sctp_nets *net,
136     uint32_t tsn, uint32_t ppid,
137     uint32_t context, uint16_t sid,
138     uint32_t mid, uint8_t flags,
139     struct mbuf *dm)
140 {
141 	struct sctp_queued_to_read *read_queue_e = NULL;
142 
143 	sctp_alloc_a_readq(stcb, read_queue_e);
144 	if (read_queue_e == NULL) {
145 		goto failed_build;
146 	}
147 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
148 	read_queue_e->sinfo_stream = sid;
149 	read_queue_e->sinfo_flags = (flags << 8);
150 	read_queue_e->sinfo_ppid = ppid;
151 	read_queue_e->sinfo_context = context;
152 	read_queue_e->sinfo_tsn = tsn;
153 	read_queue_e->sinfo_cumtsn = tsn;
154 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
155 	read_queue_e->mid = mid;
156 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
157 	TAILQ_INIT(&read_queue_e->reasm);
158 	read_queue_e->whoFrom = net;
159 	atomic_add_int(&net->ref_count, 1);
160 	read_queue_e->data = dm;
161 	read_queue_e->stcb = stcb;
162 	read_queue_e->port_from = stcb->rport;
163 failed_build:
164 	return (read_queue_e);
165 }
166 
167 struct mbuf *
168 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
169 {
170 	struct sctp_extrcvinfo *seinfo;
171 	struct sctp_sndrcvinfo *outinfo;
172 	struct sctp_rcvinfo *rcvinfo;
173 	struct sctp_nxtinfo *nxtinfo;
174 	struct cmsghdr *cmh;
175 	struct mbuf *ret;
176 	int len;
177 	int use_extended;
178 	int provide_nxt;
179 
180 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
181 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
182 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
183 		/* user does not want any ancillary data */
184 		return (NULL);
185 	}
186 	len = 0;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
188 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
189 	}
190 	seinfo = (struct sctp_extrcvinfo *)sinfo;
191 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
192 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
193 		provide_nxt = 1;
194 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
195 	} else {
196 		provide_nxt = 0;
197 	}
198 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
199 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
200 			use_extended = 1;
201 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
202 		} else {
203 			use_extended = 0;
204 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
205 		}
206 	} else {
207 		use_extended = 0;
208 	}
209 
210 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
211 	if (ret == NULL) {
212 		/* No space */
213 		return (ret);
214 	}
215 	SCTP_BUF_LEN(ret) = 0;
216 
217 	/* We need a CMSG header followed by the struct */
218 	cmh = mtod(ret, struct cmsghdr *);
219 	/*
220 	 * Make sure that there is no un-initialized padding between the
221 	 * cmsg header and cmsg data and after the cmsg data.
222 	 */
223 	memset(cmh, 0, len);
224 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
225 		cmh->cmsg_level = IPPROTO_SCTP;
226 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
227 		cmh->cmsg_type = SCTP_RCVINFO;
228 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
229 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
230 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
231 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
232 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
233 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
234 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
235 		rcvinfo->rcv_context = sinfo->sinfo_context;
236 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
237 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
238 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
239 	}
240 	if (provide_nxt) {
241 		cmh->cmsg_level = IPPROTO_SCTP;
242 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
243 		cmh->cmsg_type = SCTP_NXTINFO;
244 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
245 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
246 		nxtinfo->nxt_flags = 0;
247 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
248 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
249 		}
250 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
251 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
252 		}
253 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
254 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
255 		}
256 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
257 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
258 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
259 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
260 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
261 	}
262 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
263 		cmh->cmsg_level = IPPROTO_SCTP;
264 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
265 		if (use_extended) {
266 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
267 			cmh->cmsg_type = SCTP_EXTRCV;
268 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
269 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
270 		} else {
271 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
272 			cmh->cmsg_type = SCTP_SNDRCV;
273 			*outinfo = *sinfo;
274 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
275 		}
276 	}
277 	return (ret);
278 }
279 
280 
281 static void
282 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
283 {
284 	uint32_t gap, i, cumackp1;
285 	int fnd = 0;
286 	int in_r = 0, in_nr = 0;
287 
288 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
289 		return;
290 	}
291 	cumackp1 = asoc->cumulative_tsn + 1;
292 	if (SCTP_TSN_GT(cumackp1, tsn)) {
293 		/*
294 		 * this tsn is behind the cum ack and thus we don't need to
295 		 * worry about it being moved from one to the other.
296 		 */
297 		return;
298 	}
299 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
300 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
301 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
302 	if ((in_r == 0) && (in_nr == 0)) {
303 #ifdef INVARIANTS
304 		panic("Things are really messed up now");
305 #else
306 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
307 		sctp_print_mapping_array(asoc);
308 #endif
309 	}
310 	if (in_nr == 0)
311 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
312 	if (in_r)
313 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
314 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
315 		asoc->highest_tsn_inside_nr_map = tsn;
316 	}
317 	if (tsn == asoc->highest_tsn_inside_map) {
318 		/* We must back down to see what the new highest is */
319 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
320 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
321 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
322 				asoc->highest_tsn_inside_map = i;
323 				fnd = 1;
324 				break;
325 			}
326 		}
327 		if (!fnd) {
328 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
329 		}
330 	}
331 }
332 
333 static int
334 sctp_place_control_in_stream(struct sctp_stream_in *strm,
335     struct sctp_association *asoc,
336     struct sctp_queued_to_read *control)
337 {
338 	struct sctp_queued_to_read *at;
339 	struct sctp_readhead *q;
340 	uint8_t flags, unordered;
341 
342 	flags = (control->sinfo_flags >> 8);
343 	unordered = flags & SCTP_DATA_UNORDERED;
344 	if (unordered) {
345 		q = &strm->uno_inqueue;
346 		if (asoc->idata_supported == 0) {
347 			if (!TAILQ_EMPTY(q)) {
348 				/*
349 				 * Only one stream can be here in old style
350 				 * -- abort
351 				 */
352 				return (-1);
353 			}
354 			TAILQ_INSERT_TAIL(q, control, next_instrm);
355 			control->on_strm_q = SCTP_ON_UNORDERED;
356 			return (0);
357 		}
358 	} else {
359 		q = &strm->inqueue;
360 	}
361 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
362 		control->end_added = 1;
363 		control->first_frag_seen = 1;
364 		control->last_frag_seen = 1;
365 	}
366 	if (TAILQ_EMPTY(q)) {
367 		/* Empty queue */
368 		TAILQ_INSERT_HEAD(q, control, next_instrm);
369 		if (unordered) {
370 			control->on_strm_q = SCTP_ON_UNORDERED;
371 		} else {
372 			control->on_strm_q = SCTP_ON_ORDERED;
373 		}
374 		return (0);
375 	} else {
376 		TAILQ_FOREACH(at, q, next_instrm) {
377 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
378 				/*
379 				 * one in queue is bigger than the new one,
380 				 * insert before this one
381 				 */
382 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
383 				if (unordered) {
384 					control->on_strm_q = SCTP_ON_UNORDERED;
385 				} else {
386 					control->on_strm_q = SCTP_ON_ORDERED;
387 				}
388 				break;
389 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
390 				/*
391 				 * Gak, He sent me a duplicate msg id
392 				 * number?? return -1 to abort.
393 				 */
394 				return (-1);
395 			} else {
396 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
397 					/*
398 					 * We are at the end, insert it
399 					 * after this one
400 					 */
401 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
402 						sctp_log_strm_del(control, at,
403 						    SCTP_STR_LOG_FROM_INSERT_TL);
404 					}
405 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
406 					if (unordered) {
407 						control->on_strm_q = SCTP_ON_UNORDERED;
408 					} else {
409 						control->on_strm_q = SCTP_ON_ORDERED;
410 					}
411 					break;
412 				}
413 			}
414 		}
415 	}
416 	return (0);
417 }
418 
419 static void
420 sctp_abort_in_reasm(struct sctp_tcb *stcb,
421     struct sctp_queued_to_read *control,
422     struct sctp_tmit_chunk *chk,
423     int *abort_flag, int opspot)
424 {
425 	char msg[SCTP_DIAG_INFO_LEN];
426 	struct mbuf *oper;
427 
428 	if (stcb->asoc.idata_supported) {
429 		snprintf(msg, sizeof(msg),
430 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
431 		    opspot,
432 		    control->fsn_included,
433 		    chk->rec.data.tsn,
434 		    chk->rec.data.sid,
435 		    chk->rec.data.fsn, chk->rec.data.mid);
436 	} else {
437 		snprintf(msg, sizeof(msg),
438 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn,
444 		    (uint16_t)chk->rec.data.mid);
445 	}
446 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
447 	sctp_m_freem(chk->data);
448 	chk->data = NULL;
449 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
450 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
451 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
452 	*abort_flag = 1;
453 }
454 
455 static void
456 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
457 {
458 	/*
459 	 * The control could not be placed and must be cleaned.
460 	 */
461 	struct sctp_tmit_chunk *chk, *nchk;
462 
463 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
464 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
465 		if (chk->data)
466 			sctp_m_freem(chk->data);
467 		chk->data = NULL;
468 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
469 	}
470 	sctp_free_a_readq(stcb, control);
471 }
472 
473 /*
474  * Queue the chunk either right into the socket buffer if it is the next one
475  * to go OR put it in the correct place in the delivery queue.  If we do
476  * append to the so_buf, keep doing so until we are out of order as
477  * long as the control's entered are non-fragmented.
478  */
479 static void
480 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
481     struct sctp_association *asoc,
482     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
483 {
484 	/*
485 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
486 	 * all the data in one stream this could happen quite rapidly. One
487 	 * could use the TSN to keep track of things, but this scheme breaks
488 	 * down in the other type of stream usage that could occur. Send a
489 	 * single msg to stream 0, send 4Billion messages to stream 1, now
490 	 * send a message to stream 0. You have a situation where the TSN
491 	 * has wrapped but not in the stream. Is this worth worrying about
492 	 * or should we just change our queue sort at the bottom to be by
493 	 * TSN.
494 	 *
495 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
496 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
497 	 * assignment this could happen... and I don't see how this would be
498 	 * a violation. So for now I am undecided an will leave the sort by
499 	 * SSN alone. Maybe a hybred approach is the answer
500 	 *
501 	 */
502 	struct sctp_queued_to_read *at;
503 	int queue_needed;
504 	uint32_t nxt_todel;
505 	struct mbuf *op_err;
506 	struct sctp_stream_in *strm;
507 	char msg[SCTP_DIAG_INFO_LEN];
508 
509 	strm = &asoc->strmin[control->sinfo_stream];
510 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
511 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
512 	}
513 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
514 		/* The incoming sseq is behind where we last delivered? */
515 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
516 		    strm->last_mid_delivered, control->mid);
517 		/*
518 		 * throw it in the stream so it gets cleaned up in
519 		 * association destruction
520 		 */
521 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
522 		if (asoc->idata_supported) {
523 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
524 			    strm->last_mid_delivered, control->sinfo_tsn,
525 			    control->sinfo_stream, control->mid);
526 		} else {
527 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
528 			    (uint16_t)strm->last_mid_delivered,
529 			    control->sinfo_tsn,
530 			    control->sinfo_stream,
531 			    (uint16_t)control->mid);
532 		}
533 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
534 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
535 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
536 		*abort_flag = 1;
537 		return;
538 
539 	}
540 	queue_needed = 1;
541 	asoc->size_on_all_streams += control->length;
542 	sctp_ucount_incr(asoc->cnt_on_all_streams);
543 	nxt_todel = strm->last_mid_delivered + 1;
544 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
545 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
546 		struct socket *so;
547 
548 		so = SCTP_INP_SO(stcb->sctp_ep);
549 		atomic_add_int(&stcb->asoc.refcnt, 1);
550 		SCTP_TCB_UNLOCK(stcb);
551 		SCTP_SOCKET_LOCK(so, 1);
552 		SCTP_TCB_LOCK(stcb);
553 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
554 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
555 			SCTP_SOCKET_UNLOCK(so, 1);
556 			return;
557 		}
558 #endif
559 		/* can be delivered right away? */
560 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
561 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
562 		}
563 		/* EY it wont be queued if it could be delivered directly */
564 		queue_needed = 0;
565 		if (asoc->size_on_all_streams >= control->length) {
566 			asoc->size_on_all_streams -= control->length;
567 		} else {
568 #ifdef INVARIANTS
569 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
570 #else
571 			asoc->size_on_all_streams = 0;
572 #endif
573 		}
574 		sctp_ucount_decr(asoc->cnt_on_all_streams);
575 		strm->last_mid_delivered++;
576 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
577 		sctp_add_to_readq(stcb->sctp_ep, stcb,
578 		    control,
579 		    &stcb->sctp_socket->so_rcv, 1,
580 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
581 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
582 			/* all delivered */
583 			nxt_todel = strm->last_mid_delivered + 1;
584 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
585 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
586 				if (control->on_strm_q == SCTP_ON_ORDERED) {
587 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
588 					if (asoc->size_on_all_streams >= control->length) {
589 						asoc->size_on_all_streams -= control->length;
590 					} else {
591 #ifdef INVARIANTS
592 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
593 #else
594 						asoc->size_on_all_streams = 0;
595 #endif
596 					}
597 					sctp_ucount_decr(asoc->cnt_on_all_streams);
598 #ifdef INVARIANTS
599 				} else {
600 					panic("Huh control: %p is on_strm_q: %d",
601 					    control, control->on_strm_q);
602 #endif
603 				}
604 				control->on_strm_q = 0;
605 				strm->last_mid_delivered++;
606 				/*
607 				 * We ignore the return of deliver_data here
608 				 * since we always can hold the chunk on the
609 				 * d-queue. And we have a finite number that
610 				 * can be delivered from the strq.
611 				 */
612 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
613 					sctp_log_strm_del(control, NULL,
614 					    SCTP_STR_LOG_FROM_IMMED_DEL);
615 				}
616 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
617 				sctp_add_to_readq(stcb->sctp_ep, stcb,
618 				    control,
619 				    &stcb->sctp_socket->so_rcv, 1,
620 				    SCTP_READ_LOCK_NOT_HELD,
621 				    SCTP_SO_LOCKED);
622 				continue;
623 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
624 				*need_reasm = 1;
625 			}
626 			break;
627 		}
628 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
629 		SCTP_SOCKET_UNLOCK(so, 1);
630 #endif
631 	}
632 	if (queue_needed) {
633 		/*
634 		 * Ok, we did not deliver this guy, find the correct place
635 		 * to put it on the queue.
636 		 */
637 		if (sctp_place_control_in_stream(strm, asoc, control)) {
638 			snprintf(msg, sizeof(msg),
639 			    "Queue to str MID: %u duplicate",
640 			    control->mid);
641 			sctp_clean_up_control(stcb, control);
642 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
643 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
644 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
645 			*abort_flag = 1;
646 		}
647 	}
648 }
649 
650 
651 static void
652 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
653 {
654 	struct mbuf *m, *prev = NULL;
655 	struct sctp_tcb *stcb;
656 
657 	stcb = control->stcb;
658 	control->held_length = 0;
659 	control->length = 0;
660 	m = control->data;
661 	while (m) {
662 		if (SCTP_BUF_LEN(m) == 0) {
663 			/* Skip mbufs with NO length */
664 			if (prev == NULL) {
665 				/* First one */
666 				control->data = sctp_m_free(m);
667 				m = control->data;
668 			} else {
669 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
670 				m = SCTP_BUF_NEXT(prev);
671 			}
672 			if (m == NULL) {
673 				control->tail_mbuf = prev;
674 			}
675 			continue;
676 		}
677 		prev = m;
678 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
679 		if (control->on_read_q) {
680 			/*
681 			 * On read queue so we must increment the SB stuff,
682 			 * we assume caller has done any locks of SB.
683 			 */
684 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
685 		}
686 		m = SCTP_BUF_NEXT(m);
687 	}
688 	if (prev) {
689 		control->tail_mbuf = prev;
690 	}
691 }
692 
693 static void
694 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
695 {
696 	struct mbuf *prev = NULL;
697 	struct sctp_tcb *stcb;
698 
699 	stcb = control->stcb;
700 	if (stcb == NULL) {
701 #ifdef INVARIANTS
702 		panic("Control broken");
703 #else
704 		return;
705 #endif
706 	}
707 	if (control->tail_mbuf == NULL) {
708 		/* TSNH */
709 		control->data = m;
710 		sctp_setup_tail_pointer(control);
711 		return;
712 	}
713 	control->tail_mbuf->m_next = m;
714 	while (m) {
715 		if (SCTP_BUF_LEN(m) == 0) {
716 			/* Skip mbufs with NO length */
717 			if (prev == NULL) {
718 				/* First one */
719 				control->tail_mbuf->m_next = sctp_m_free(m);
720 				m = control->tail_mbuf->m_next;
721 			} else {
722 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
723 				m = SCTP_BUF_NEXT(prev);
724 			}
725 			if (m == NULL) {
726 				control->tail_mbuf = prev;
727 			}
728 			continue;
729 		}
730 		prev = m;
731 		if (control->on_read_q) {
732 			/*
733 			 * On read queue so we must increment the SB stuff,
734 			 * we assume caller has done any locks of SB.
735 			 */
736 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
737 		}
738 		*added += SCTP_BUF_LEN(m);
739 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
740 		m = SCTP_BUF_NEXT(m);
741 	}
742 	if (prev) {
743 		control->tail_mbuf = prev;
744 	}
745 }
746 
747 static void
748 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
749 {
750 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
751 	nc->sinfo_stream = control->sinfo_stream;
752 	nc->mid = control->mid;
753 	TAILQ_INIT(&nc->reasm);
754 	nc->top_fsn = control->top_fsn;
755 	nc->mid = control->mid;
756 	nc->sinfo_flags = control->sinfo_flags;
757 	nc->sinfo_ppid = control->sinfo_ppid;
758 	nc->sinfo_context = control->sinfo_context;
759 	nc->fsn_included = 0xffffffff;
760 	nc->sinfo_tsn = control->sinfo_tsn;
761 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
762 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
763 	nc->whoFrom = control->whoFrom;
764 	atomic_add_int(&nc->whoFrom->ref_count, 1);
765 	nc->stcb = control->stcb;
766 	nc->port_from = control->port_from;
767 }
768 
769 static void
770 sctp_reset_a_control(struct sctp_queued_to_read *control,
771     struct sctp_inpcb *inp, uint32_t tsn)
772 {
773 	control->fsn_included = tsn;
774 	if (control->on_read_q) {
775 		/*
776 		 * We have to purge it from there, hopefully this will work
777 		 * :-)
778 		 */
779 		TAILQ_REMOVE(&inp->read_queue, control, next);
780 		control->on_read_q = 0;
781 	}
782 }
783 
784 static int
785 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
786     struct sctp_association *asoc,
787     struct sctp_stream_in *strm,
788     struct sctp_queued_to_read *control,
789     uint32_t pd_point,
790     int inp_read_lock_held)
791 {
792 	/*
793 	 * Special handling for the old un-ordered data chunk. All the
794 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
795 	 * to see if we have it all. If you return one, no other control
796 	 * entries on the un-ordered queue will be looked at. In theory
797 	 * there should be no others entries in reality, unless the guy is
798 	 * sending both unordered NDATA and unordered DATA...
799 	 */
800 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
801 	uint32_t fsn;
802 	struct sctp_queued_to_read *nc;
803 	int cnt_added;
804 
805 	if (control->first_frag_seen == 0) {
806 		/* Nothing we can do, we have not seen the first piece yet */
807 		return (1);
808 	}
809 	/* Collapse any we can */
810 	cnt_added = 0;
811 restart:
812 	fsn = control->fsn_included + 1;
813 	/* Now what can we add? */
814 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
815 		if (chk->rec.data.fsn == fsn) {
816 			/* Ok lets add it */
817 			sctp_alloc_a_readq(stcb, nc);
818 			if (nc == NULL) {
819 				break;
820 			}
821 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
822 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
823 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
824 			fsn++;
825 			cnt_added++;
826 			chk = NULL;
827 			if (control->end_added) {
828 				/* We are done */
829 				if (!TAILQ_EMPTY(&control->reasm)) {
830 					/*
831 					 * Ok we have to move anything left
832 					 * on the control queue to a new
833 					 * control.
834 					 */
835 					sctp_build_readq_entry_from_ctl(nc, control);
836 					tchk = TAILQ_FIRST(&control->reasm);
837 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
838 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
839 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
840 							asoc->size_on_reasm_queue -= tchk->send_size;
841 						} else {
842 #ifdef INVARIANTS
843 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
844 #else
845 							asoc->size_on_reasm_queue = 0;
846 #endif
847 						}
848 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
849 						nc->first_frag_seen = 1;
850 						nc->fsn_included = tchk->rec.data.fsn;
851 						nc->data = tchk->data;
852 						nc->sinfo_ppid = tchk->rec.data.ppid;
853 						nc->sinfo_tsn = tchk->rec.data.tsn;
854 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
855 						tchk->data = NULL;
856 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
857 						sctp_setup_tail_pointer(nc);
858 						tchk = TAILQ_FIRST(&control->reasm);
859 					}
860 					/* Spin the rest onto the queue */
861 					while (tchk) {
862 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
863 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
864 						tchk = TAILQ_FIRST(&control->reasm);
865 					}
866 					/*
867 					 * Now lets add it to the queue
868 					 * after removing control
869 					 */
870 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
871 					nc->on_strm_q = SCTP_ON_UNORDERED;
872 					if (control->on_strm_q) {
873 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
874 						control->on_strm_q = 0;
875 					}
876 				}
877 				if (control->pdapi_started) {
878 					strm->pd_api_started = 0;
879 					control->pdapi_started = 0;
880 				}
881 				if (control->on_strm_q) {
882 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
883 					control->on_strm_q = 0;
884 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
885 				}
886 				if (control->on_read_q == 0) {
887 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
888 					    &stcb->sctp_socket->so_rcv, control->end_added,
889 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
890 				}
891 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
892 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
893 					/*
894 					 * Switch to the new guy and
895 					 * continue
896 					 */
897 					control = nc;
898 					goto restart;
899 				} else {
900 					if (nc->on_strm_q == 0) {
901 						sctp_free_a_readq(stcb, nc);
902 					}
903 				}
904 				return (1);
905 			} else {
906 				sctp_free_a_readq(stcb, nc);
907 			}
908 		} else {
909 			/* Can't add more */
910 			break;
911 		}
912 	}
913 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
914 		strm->pd_api_started = 1;
915 		control->pdapi_started = 1;
916 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
917 		    &stcb->sctp_socket->so_rcv, control->end_added,
918 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
919 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
920 		return (0);
921 	} else {
922 		return (1);
923 	}
924 }
925 
926 static void
927 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
928     struct sctp_association *asoc,
929     struct sctp_queued_to_read *control,
930     struct sctp_tmit_chunk *chk,
931     int *abort_flag)
932 {
933 	struct sctp_tmit_chunk *at;
934 	int inserted;
935 
936 	/*
937 	 * Here we need to place the chunk into the control structure sorted
938 	 * in the correct order.
939 	 */
940 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
941 		/* Its the very first one. */
942 		SCTPDBG(SCTP_DEBUG_XXX,
943 		    "chunk is a first fsn: %u becomes fsn_included\n",
944 		    chk->rec.data.fsn);
945 		if (control->first_frag_seen) {
946 			/*
947 			 * In old un-ordered we can reassembly on one
948 			 * control multiple messages. As long as the next
949 			 * FIRST is greater then the old first (TSN i.e. FSN
950 			 * wise)
951 			 */
952 			struct mbuf *tdata;
953 			uint32_t tmp;
954 
955 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
956 				/*
957 				 * Easy way the start of a new guy beyond
958 				 * the lowest
959 				 */
960 				goto place_chunk;
961 			}
962 			if ((chk->rec.data.fsn == control->fsn_included) ||
963 			    (control->pdapi_started)) {
964 				/*
965 				 * Ok this should not happen, if it does we
966 				 * started the pd-api on the higher TSN
967 				 * (since the equals part is a TSN failure
968 				 * it must be that).
969 				 *
970 				 * We are completly hosed in that case since
971 				 * I have no way to recover. This really
972 				 * will only happen if we can get more TSN's
973 				 * higher before the pd-api-point.
974 				 */
975 				sctp_abort_in_reasm(stcb, control, chk,
976 				    abort_flag,
977 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
978 
979 				return;
980 			}
981 			/*
982 			 * Ok we have two firsts and the one we just got is
983 			 * smaller than the one we previously placed.. yuck!
984 			 * We must swap them out.
985 			 */
986 			/* swap the mbufs */
987 			tdata = control->data;
988 			control->data = chk->data;
989 			chk->data = tdata;
990 			/* Save the lengths */
991 			chk->send_size = control->length;
992 			/* Recompute length of control and tail pointer */
993 			sctp_setup_tail_pointer(control);
994 			/* Fix the FSN included */
995 			tmp = control->fsn_included;
996 			control->fsn_included = chk->rec.data.fsn;
997 			chk->rec.data.fsn = tmp;
998 			/* Fix the TSN included */
999 			tmp = control->sinfo_tsn;
1000 			control->sinfo_tsn = chk->rec.data.tsn;
1001 			chk->rec.data.tsn = tmp;
1002 			/* Fix the PPID included */
1003 			tmp = control->sinfo_ppid;
1004 			control->sinfo_ppid = chk->rec.data.ppid;
1005 			chk->rec.data.ppid = tmp;
1006 			/* Fix tail pointer */
1007 			goto place_chunk;
1008 		}
1009 		control->first_frag_seen = 1;
1010 		control->fsn_included = chk->rec.data.fsn;
1011 		control->top_fsn = chk->rec.data.fsn;
1012 		control->sinfo_tsn = chk->rec.data.tsn;
1013 		control->sinfo_ppid = chk->rec.data.ppid;
1014 		control->data = chk->data;
1015 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1016 		chk->data = NULL;
1017 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1018 		sctp_setup_tail_pointer(control);
1019 		return;
1020 	}
1021 place_chunk:
1022 	inserted = 0;
1023 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1024 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1025 			/*
1026 			 * This one in queue is bigger than the new one,
1027 			 * insert the new one before at.
1028 			 */
1029 			asoc->size_on_reasm_queue += chk->send_size;
1030 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1031 			inserted = 1;
1032 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1033 			break;
1034 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1035 			/*
1036 			 * They sent a duplicate fsn number. This really
1037 			 * should not happen since the FSN is a TSN and it
1038 			 * should have been dropped earlier.
1039 			 */
1040 			sctp_abort_in_reasm(stcb, control, chk,
1041 			    abort_flag,
1042 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1043 			return;
1044 		}
1045 	}
1046 	if (inserted == 0) {
1047 		/* Its at the end */
1048 		asoc->size_on_reasm_queue += chk->send_size;
1049 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1050 		control->top_fsn = chk->rec.data.fsn;
1051 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1052 	}
1053 }
1054 
1055 static int
1056 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1057     struct sctp_stream_in *strm, int inp_read_lock_held)
1058 {
1059 	/*
1060 	 * Given a stream, strm, see if any of the SSN's on it that are
1061 	 * fragmented are ready to deliver. If so go ahead and place them on
1062 	 * the read queue. In so placing if we have hit the end, then we
1063 	 * need to remove them from the stream's queue.
1064 	 */
1065 	struct sctp_queued_to_read *control, *nctl = NULL;
1066 	uint32_t next_to_del;
1067 	uint32_t pd_point;
1068 	int ret = 0;
1069 
1070 	if (stcb->sctp_socket) {
1071 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1072 		    stcb->sctp_ep->partial_delivery_point);
1073 	} else {
1074 		pd_point = stcb->sctp_ep->partial_delivery_point;
1075 	}
1076 	control = TAILQ_FIRST(&strm->uno_inqueue);
1077 
1078 	if ((control != NULL) &&
1079 	    (asoc->idata_supported == 0)) {
1080 		/* Special handling needed for "old" data format */
1081 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1082 			goto done_un;
1083 		}
1084 	}
1085 	if (strm->pd_api_started) {
1086 		/* Can't add more */
1087 		return (0);
1088 	}
1089 	while (control) {
1090 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1091 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1092 		nctl = TAILQ_NEXT(control, next_instrm);
1093 		if (control->end_added) {
1094 			/* We just put the last bit on */
1095 			if (control->on_strm_q) {
1096 #ifdef INVARIANTS
1097 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1098 					panic("Huh control: %p on_q: %d -- not unordered?",
1099 					    control, control->on_strm_q);
1100 				}
1101 #endif
1102 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1103 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1104 				control->on_strm_q = 0;
1105 			}
1106 			if (control->on_read_q == 0) {
1107 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1108 				    control,
1109 				    &stcb->sctp_socket->so_rcv, control->end_added,
1110 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1111 			}
1112 		} else {
1113 			/* Can we do a PD-API for this un-ordered guy? */
1114 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1115 				strm->pd_api_started = 1;
1116 				control->pdapi_started = 1;
1117 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1118 				    control,
1119 				    &stcb->sctp_socket->so_rcv, control->end_added,
1120 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1121 
1122 				break;
1123 			}
1124 		}
1125 		control = nctl;
1126 	}
1127 done_un:
1128 	control = TAILQ_FIRST(&strm->inqueue);
1129 	if (strm->pd_api_started) {
1130 		/* Can't add more */
1131 		return (0);
1132 	}
1133 	if (control == NULL) {
1134 		return (ret);
1135 	}
1136 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1137 		/*
1138 		 * Ok the guy at the top was being partially delivered
1139 		 * completed, so we remove it. Note the pd_api flag was
1140 		 * taken off when the chunk was merged on in
1141 		 * sctp_queue_data_for_reasm below.
1142 		 */
1143 		nctl = TAILQ_NEXT(control, next_instrm);
1144 		SCTPDBG(SCTP_DEBUG_XXX,
1145 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1146 		    control, control->end_added, control->mid,
1147 		    control->top_fsn, control->fsn_included,
1148 		    strm->last_mid_delivered);
1149 		if (control->end_added) {
1150 			if (control->on_strm_q) {
1151 #ifdef INVARIANTS
1152 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1153 					panic("Huh control: %p on_q: %d -- not ordered?",
1154 					    control, control->on_strm_q);
1155 				}
1156 #endif
1157 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1158 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1159 				if (asoc->size_on_all_streams >= control->length) {
1160 					asoc->size_on_all_streams -= control->length;
1161 				} else {
1162 #ifdef INVARIANTS
1163 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1164 #else
1165 					asoc->size_on_all_streams = 0;
1166 #endif
1167 				}
1168 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1169 				control->on_strm_q = 0;
1170 			}
1171 			if (strm->pd_api_started && control->pdapi_started) {
1172 				control->pdapi_started = 0;
1173 				strm->pd_api_started = 0;
1174 			}
1175 			if (control->on_read_q == 0) {
1176 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1177 				    control,
1178 				    &stcb->sctp_socket->so_rcv, control->end_added,
1179 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1180 			}
1181 			control = nctl;
1182 		}
1183 	}
1184 	if (strm->pd_api_started) {
1185 		/*
1186 		 * Can't add more must have gotten an un-ordered above being
1187 		 * partially delivered.
1188 		 */
1189 		return (0);
1190 	}
1191 deliver_more:
1192 	next_to_del = strm->last_mid_delivered + 1;
1193 	if (control) {
1194 		SCTPDBG(SCTP_DEBUG_XXX,
1195 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1196 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1197 		    next_to_del);
1198 		nctl = TAILQ_NEXT(control, next_instrm);
1199 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1200 		    (control->first_frag_seen)) {
1201 			int done;
1202 
1203 			/* Ok we can deliver it onto the stream. */
1204 			if (control->end_added) {
1205 				/* We are done with it afterwards */
1206 				if (control->on_strm_q) {
1207 #ifdef INVARIANTS
1208 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1209 						panic("Huh control: %p on_q: %d -- not ordered?",
1210 						    control, control->on_strm_q);
1211 					}
1212 #endif
1213 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1214 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1215 					if (asoc->size_on_all_streams >= control->length) {
1216 						asoc->size_on_all_streams -= control->length;
1217 					} else {
1218 #ifdef INVARIANTS
1219 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1220 #else
1221 						asoc->size_on_all_streams = 0;
1222 #endif
1223 					}
1224 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1225 					control->on_strm_q = 0;
1226 				}
1227 				ret++;
1228 			}
1229 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1230 				/*
1231 				 * A singleton now slipping through - mark
1232 				 * it non-revokable too
1233 				 */
1234 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1235 			} else if (control->end_added == 0) {
1236 				/*
1237 				 * Check if we can defer adding until its
1238 				 * all there
1239 				 */
1240 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1241 					/*
1242 					 * Don't need it or cannot add more
1243 					 * (one being delivered that way)
1244 					 */
1245 					goto out;
1246 				}
1247 			}
1248 			done = (control->end_added) && (control->last_frag_seen);
1249 			if (control->on_read_q == 0) {
1250 				if (!done) {
1251 					if (asoc->size_on_all_streams >= control->length) {
1252 						asoc->size_on_all_streams -= control->length;
1253 					} else {
1254 #ifdef INVARIANTS
1255 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1256 #else
1257 						asoc->size_on_all_streams = 0;
1258 #endif
1259 					}
1260 					strm->pd_api_started = 1;
1261 					control->pdapi_started = 1;
1262 				}
1263 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1264 				    control,
1265 				    &stcb->sctp_socket->so_rcv, control->end_added,
1266 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1267 			}
1268 			strm->last_mid_delivered = next_to_del;
1269 			if (done) {
1270 				control = nctl;
1271 				goto deliver_more;
1272 			}
1273 		}
1274 	}
1275 out:
1276 	return (ret);
1277 }
1278 
1279 
1280 uint32_t
1281 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1282     struct sctp_stream_in *strm,
1283     struct sctp_tcb *stcb, struct sctp_association *asoc,
1284     struct sctp_tmit_chunk *chk, int hold_rlock)
1285 {
1286 	/*
1287 	 * Given a control and a chunk, merge the data from the chk onto the
1288 	 * control and free up the chunk resources.
1289 	 */
1290 	uint32_t added = 0;
1291 	int i_locked = 0;
1292 
1293 	if (control->on_read_q && (hold_rlock == 0)) {
1294 		/*
1295 		 * Its being pd-api'd so we must do some locks.
1296 		 */
1297 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1298 		i_locked = 1;
1299 	}
1300 	if (control->data == NULL) {
1301 		control->data = chk->data;
1302 		sctp_setup_tail_pointer(control);
1303 	} else {
1304 		sctp_add_to_tail_pointer(control, chk->data, &added);
1305 	}
1306 	control->fsn_included = chk->rec.data.fsn;
1307 	asoc->size_on_reasm_queue -= chk->send_size;
1308 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1309 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1310 	chk->data = NULL;
1311 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1312 		control->first_frag_seen = 1;
1313 		control->sinfo_tsn = chk->rec.data.tsn;
1314 		control->sinfo_ppid = chk->rec.data.ppid;
1315 	}
1316 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1317 		/* Its complete */
1318 		if ((control->on_strm_q) && (control->on_read_q)) {
1319 			if (control->pdapi_started) {
1320 				control->pdapi_started = 0;
1321 				strm->pd_api_started = 0;
1322 			}
1323 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1324 				/* Unordered */
1325 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1326 				control->on_strm_q = 0;
1327 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1328 				/* Ordered */
1329 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1330 				/*
1331 				 * Don't need to decrement
1332 				 * size_on_all_streams, since control is on
1333 				 * the read queue.
1334 				 */
1335 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1336 				control->on_strm_q = 0;
1337 #ifdef INVARIANTS
1338 			} else if (control->on_strm_q) {
1339 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1340 				    control->on_strm_q);
1341 #endif
1342 			}
1343 		}
1344 		control->end_added = 1;
1345 		control->last_frag_seen = 1;
1346 	}
1347 	if (i_locked) {
1348 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1349 	}
1350 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1351 	return (added);
1352 }
1353 
1354 /*
1355  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1356  * queue, see if anthing can be delivered. If so pull it off (or as much as
1357  * we can. If we run out of space then we must dump what we can and set the
1358  * appropriate flag to say we queued what we could.
1359  */
1360 static void
1361 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1362     struct sctp_queued_to_read *control,
1363     struct sctp_tmit_chunk *chk,
1364     int created_control,
1365     int *abort_flag, uint32_t tsn)
1366 {
1367 	uint32_t next_fsn;
1368 	struct sctp_tmit_chunk *at, *nat;
1369 	struct sctp_stream_in *strm;
1370 	int do_wakeup, unordered;
1371 	uint32_t lenadded;
1372 
1373 	strm = &asoc->strmin[control->sinfo_stream];
1374 	/*
1375 	 * For old un-ordered data chunks.
1376 	 */
1377 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1378 		unordered = 1;
1379 	} else {
1380 		unordered = 0;
1381 	}
1382 	/* Must be added to the stream-in queue */
1383 	if (created_control) {
1384 		if (unordered == 0) {
1385 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1386 		}
1387 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1388 			/* Duplicate SSN? */
1389 			sctp_abort_in_reasm(stcb, control, chk,
1390 			    abort_flag,
1391 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1392 			sctp_clean_up_control(stcb, control);
1393 			return;
1394 		}
1395 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1396 			/*
1397 			 * Ok we created this control and now lets validate
1398 			 * that its legal i.e. there is a B bit set, if not
1399 			 * and we have up to the cum-ack then its invalid.
1400 			 */
1401 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1402 				sctp_abort_in_reasm(stcb, control, chk,
1403 				    abort_flag,
1404 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1405 				return;
1406 			}
1407 		}
1408 	}
1409 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1410 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1411 		return;
1412 	}
1413 	/*
1414 	 * Ok we must queue the chunk into the reasembly portion: o if its
1415 	 * the first it goes to the control mbuf. o if its not first but the
1416 	 * next in sequence it goes to the control, and each succeeding one
1417 	 * in order also goes. o if its not in order we place it on the list
1418 	 * in its place.
1419 	 */
1420 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1421 		/* Its the very first one. */
1422 		SCTPDBG(SCTP_DEBUG_XXX,
1423 		    "chunk is a first fsn: %u becomes fsn_included\n",
1424 		    chk->rec.data.fsn);
1425 		if (control->first_frag_seen) {
1426 			/*
1427 			 * Error on senders part, they either sent us two
1428 			 * data chunks with FIRST, or they sent two
1429 			 * un-ordered chunks that were fragmented at the
1430 			 * same time in the same stream.
1431 			 */
1432 			sctp_abort_in_reasm(stcb, control, chk,
1433 			    abort_flag,
1434 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1435 			return;
1436 		}
1437 		control->first_frag_seen = 1;
1438 		control->sinfo_ppid = chk->rec.data.ppid;
1439 		control->sinfo_tsn = chk->rec.data.tsn;
1440 		control->fsn_included = chk->rec.data.fsn;
1441 		control->data = chk->data;
1442 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1443 		chk->data = NULL;
1444 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1445 		sctp_setup_tail_pointer(control);
1446 		asoc->size_on_all_streams += control->length;
1447 	} else {
1448 		/* Place the chunk in our list */
1449 		int inserted = 0;
1450 
1451 		if (control->last_frag_seen == 0) {
1452 			/* Still willing to raise highest FSN seen */
1453 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1454 				SCTPDBG(SCTP_DEBUG_XXX,
1455 				    "We have a new top_fsn: %u\n",
1456 				    chk->rec.data.fsn);
1457 				control->top_fsn = chk->rec.data.fsn;
1458 			}
1459 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1460 				SCTPDBG(SCTP_DEBUG_XXX,
1461 				    "The last fsn is now in place fsn: %u\n",
1462 				    chk->rec.data.fsn);
1463 				control->last_frag_seen = 1;
1464 			}
1465 			if (asoc->idata_supported || control->first_frag_seen) {
1466 				/*
1467 				 * For IDATA we always check since we know
1468 				 * that the first fragment is 0. For old
1469 				 * DATA we have to receive the first before
1470 				 * we know the first FSN (which is the TSN).
1471 				 */
1472 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1473 					/*
1474 					 * We have already delivered up to
1475 					 * this so its a dup
1476 					 */
1477 					sctp_abort_in_reasm(stcb, control, chk,
1478 					    abort_flag,
1479 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1480 					return;
1481 				}
1482 			}
1483 		} else {
1484 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1485 				/* Second last? huh? */
1486 				SCTPDBG(SCTP_DEBUG_XXX,
1487 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1488 				    chk->rec.data.fsn, control->top_fsn);
1489 				sctp_abort_in_reasm(stcb, control,
1490 				    chk, abort_flag,
1491 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1492 				return;
1493 			}
1494 			if (asoc->idata_supported || control->first_frag_seen) {
1495 				/*
1496 				 * For IDATA we always check since we know
1497 				 * that the first fragment is 0. For old
1498 				 * DATA we have to receive the first before
1499 				 * we know the first FSN (which is the TSN).
1500 				 */
1501 
1502 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1503 					/*
1504 					 * We have already delivered up to
1505 					 * this so its a dup
1506 					 */
1507 					SCTPDBG(SCTP_DEBUG_XXX,
1508 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1509 					    chk->rec.data.fsn, control->fsn_included);
1510 					sctp_abort_in_reasm(stcb, control, chk,
1511 					    abort_flag,
1512 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1513 					return;
1514 				}
1515 			}
1516 			/*
1517 			 * validate not beyond top FSN if we have seen last
1518 			 * one
1519 			 */
1520 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1521 				SCTPDBG(SCTP_DEBUG_XXX,
1522 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1523 				    chk->rec.data.fsn,
1524 				    control->top_fsn);
1525 				sctp_abort_in_reasm(stcb, control, chk,
1526 				    abort_flag,
1527 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1528 				return;
1529 			}
1530 		}
1531 		/*
1532 		 * If we reach here, we need to place the new chunk in the
1533 		 * reassembly for this control.
1534 		 */
1535 		SCTPDBG(SCTP_DEBUG_XXX,
1536 		    "chunk is a not first fsn: %u needs to be inserted\n",
1537 		    chk->rec.data.fsn);
1538 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1539 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1540 				/*
1541 				 * This one in queue is bigger than the new
1542 				 * one, insert the new one before at.
1543 				 */
1544 				SCTPDBG(SCTP_DEBUG_XXX,
1545 				    "Insert it before fsn: %u\n",
1546 				    at->rec.data.fsn);
1547 				asoc->size_on_reasm_queue += chk->send_size;
1548 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1549 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1550 				inserted = 1;
1551 				break;
1552 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1553 				/*
1554 				 * Gak, He sent me a duplicate str seq
1555 				 * number
1556 				 */
1557 				/*
1558 				 * foo bar, I guess I will just free this
1559 				 * new guy, should we abort too? FIX ME
1560 				 * MAYBE? Or it COULD be that the SSN's have
1561 				 * wrapped. Maybe I should compare to TSN
1562 				 * somehow... sigh for now just blow away
1563 				 * the chunk!
1564 				 */
1565 				SCTPDBG(SCTP_DEBUG_XXX,
1566 				    "Duplicate to fsn: %u -- abort\n",
1567 				    at->rec.data.fsn);
1568 				sctp_abort_in_reasm(stcb, control,
1569 				    chk, abort_flag,
1570 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1571 				return;
1572 			}
1573 		}
1574 		if (inserted == 0) {
1575 			/* Goes on the end */
1576 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1577 			    chk->rec.data.fsn);
1578 			asoc->size_on_reasm_queue += chk->send_size;
1579 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1580 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1581 		}
1582 	}
1583 	/*
1584 	 * Ok lets see if we can suck any up into the control structure that
1585 	 * are in seq if it makes sense.
1586 	 */
1587 	do_wakeup = 0;
1588 	/*
1589 	 * If the first fragment has not been seen there is no sense in
1590 	 * looking.
1591 	 */
1592 	if (control->first_frag_seen) {
1593 		next_fsn = control->fsn_included + 1;
1594 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1595 			if (at->rec.data.fsn == next_fsn) {
1596 				/* We can add this one now to the control */
1597 				SCTPDBG(SCTP_DEBUG_XXX,
1598 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1599 				    control, at,
1600 				    at->rec.data.fsn,
1601 				    next_fsn, control->fsn_included);
1602 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1603 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1604 				if (control->on_read_q) {
1605 					do_wakeup = 1;
1606 				} else {
1607 					/*
1608 					 * We only add to the
1609 					 * size-on-all-streams if its not on
1610 					 * the read q. The read q flag will
1611 					 * cause a sballoc so its accounted
1612 					 * for there.
1613 					 */
1614 					asoc->size_on_all_streams += lenadded;
1615 				}
1616 				next_fsn++;
1617 				if (control->end_added && control->pdapi_started) {
1618 					if (strm->pd_api_started) {
1619 						strm->pd_api_started = 0;
1620 						control->pdapi_started = 0;
1621 					}
1622 					if (control->on_read_q == 0) {
1623 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1624 						    control,
1625 						    &stcb->sctp_socket->so_rcv, control->end_added,
1626 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1627 					}
1628 					break;
1629 				}
1630 			} else {
1631 				break;
1632 			}
1633 		}
1634 	}
1635 	if (do_wakeup) {
1636 		/* Need to wakeup the reader */
1637 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1638 	}
1639 }
1640 
1641 static struct sctp_queued_to_read *
1642 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1643 {
1644 	struct sctp_queued_to_read *control;
1645 
1646 	if (ordered) {
1647 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1648 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1649 				break;
1650 			}
1651 		}
1652 	} else {
1653 		if (idata_supported) {
1654 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1655 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1656 					break;
1657 				}
1658 			}
1659 		} else {
1660 			control = TAILQ_FIRST(&strm->uno_inqueue);
1661 		}
1662 	}
1663 	return (control);
1664 }
1665 
1666 static int
1667 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1668     struct mbuf **m, int offset, int chk_length,
1669     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1670     int *break_flag, int last_chunk, uint8_t chk_type)
1671 {
1672 	/* Process a data chunk */
1673 	/* struct sctp_tmit_chunk *chk; */
1674 	struct sctp_tmit_chunk *chk;
1675 	uint32_t tsn, fsn, gap, mid;
1676 	struct mbuf *dmbuf;
1677 	int the_len;
1678 	int need_reasm_check = 0;
1679 	uint16_t sid;
1680 	struct mbuf *op_err;
1681 	char msg[SCTP_DIAG_INFO_LEN];
1682 	struct sctp_queued_to_read *control, *ncontrol;
1683 	uint32_t ppid;
1684 	uint8_t chk_flags;
1685 	struct sctp_stream_reset_list *liste;
1686 	int ordered;
1687 	size_t clen;
1688 	int created_control = 0;
1689 
1690 	if (chk_type == SCTP_IDATA) {
1691 		struct sctp_idata_chunk *chunk, chunk_buf;
1692 
1693 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1694 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1695 		chk_flags = chunk->ch.chunk_flags;
1696 		clen = sizeof(struct sctp_idata_chunk);
1697 		tsn = ntohl(chunk->dp.tsn);
1698 		sid = ntohs(chunk->dp.sid);
1699 		mid = ntohl(chunk->dp.mid);
1700 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1701 			fsn = 0;
1702 			ppid = chunk->dp.ppid_fsn.ppid;
1703 		} else {
1704 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1705 			ppid = 0xffffffff;	/* Use as an invalid value. */
1706 		}
1707 	} else {
1708 		struct sctp_data_chunk *chunk, chunk_buf;
1709 
1710 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1711 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1712 		chk_flags = chunk->ch.chunk_flags;
1713 		clen = sizeof(struct sctp_data_chunk);
1714 		tsn = ntohl(chunk->dp.tsn);
1715 		sid = ntohs(chunk->dp.sid);
1716 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1717 		fsn = tsn;
1718 		ppid = chunk->dp.ppid;
1719 	}
1720 	if ((size_t)chk_length == clen) {
1721 		/*
1722 		 * Need to send an abort since we had a empty data chunk.
1723 		 */
1724 		op_err = sctp_generate_no_user_data_cause(tsn);
1725 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1726 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1727 		*abort_flag = 1;
1728 		return (0);
1729 	}
1730 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1731 		asoc->send_sack = 1;
1732 	}
1733 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1734 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1735 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1736 	}
1737 	if (stcb == NULL) {
1738 		return (0);
1739 	}
1740 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1741 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1742 		/* It is a duplicate */
1743 		SCTP_STAT_INCR(sctps_recvdupdata);
1744 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1745 			/* Record a dup for the next outbound sack */
1746 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1747 			asoc->numduptsns++;
1748 		}
1749 		asoc->send_sack = 1;
1750 		return (0);
1751 	}
1752 	/* Calculate the number of TSN's between the base and this TSN */
1753 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1754 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1755 		/* Can't hold the bit in the mapping at max array, toss it */
1756 		return (0);
1757 	}
1758 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1759 		SCTP_TCB_LOCK_ASSERT(stcb);
1760 		if (sctp_expand_mapping_array(asoc, gap)) {
1761 			/* Can't expand, drop it */
1762 			return (0);
1763 		}
1764 	}
1765 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1766 		*high_tsn = tsn;
1767 	}
1768 	/* See if we have received this one already */
1769 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1770 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1771 		SCTP_STAT_INCR(sctps_recvdupdata);
1772 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1773 			/* Record a dup for the next outbound sack */
1774 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1775 			asoc->numduptsns++;
1776 		}
1777 		asoc->send_sack = 1;
1778 		return (0);
1779 	}
1780 	/*
1781 	 * Check to see about the GONE flag, duplicates would cause a sack
1782 	 * to be sent up above
1783 	 */
1784 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1785 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1786 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1787 		/*
1788 		 * wait a minute, this guy is gone, there is no longer a
1789 		 * receiver. Send peer an ABORT!
1790 		 */
1791 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1792 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1793 		*abort_flag = 1;
1794 		return (0);
1795 	}
1796 	/*
1797 	 * Now before going further we see if there is room. If NOT then we
1798 	 * MAY let one through only IF this TSN is the one we are waiting
1799 	 * for on a partial delivery API.
1800 	 */
1801 
1802 	/* Is the stream valid? */
1803 	if (sid >= asoc->streamincnt) {
1804 		struct sctp_error_invalid_stream *cause;
1805 
1806 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1807 		    0, M_NOWAIT, 1, MT_DATA);
1808 		if (op_err != NULL) {
1809 			/* add some space up front so prepend will work well */
1810 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1811 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1812 			/*
1813 			 * Error causes are just param's and this one has
1814 			 * two back to back phdr, one with the error type
1815 			 * and size, the other with the streamid and a rsvd
1816 			 */
1817 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1818 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1819 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1820 			cause->stream_id = htons(sid);
1821 			cause->reserved = htons(0);
1822 			sctp_queue_op_err(stcb, op_err);
1823 		}
1824 		SCTP_STAT_INCR(sctps_badsid);
1825 		SCTP_TCB_LOCK_ASSERT(stcb);
1826 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1827 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1828 			asoc->highest_tsn_inside_nr_map = tsn;
1829 		}
1830 		if (tsn == (asoc->cumulative_tsn + 1)) {
1831 			/* Update cum-ack */
1832 			asoc->cumulative_tsn = tsn;
1833 		}
1834 		return (0);
1835 	}
1836 	/*
1837 	 * If its a fragmented message, lets see if we can find the control
1838 	 * on the reassembly queues.
1839 	 */
1840 	if ((chk_type == SCTP_IDATA) &&
1841 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1842 	    (fsn == 0)) {
1843 		/*
1844 		 * The first *must* be fsn 0, and other (middle/end) pieces
1845 		 * can *not* be fsn 0. XXX: This can happen in case of a
1846 		 * wrap around. Ignore is for now.
1847 		 */
1848 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1849 		    mid, chk_flags);
1850 		goto err_out;
1851 	}
1852 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1853 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1854 	    chk_flags, control);
1855 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1856 		/* See if we can find the re-assembly entity */
1857 		if (control != NULL) {
1858 			/* We found something, does it belong? */
1859 			if (ordered && (mid != control->mid)) {
1860 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1861 		err_out:
1862 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1863 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1864 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1865 				*abort_flag = 1;
1866 				return (0);
1867 			}
1868 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1869 				/*
1870 				 * We can't have a switched order with an
1871 				 * unordered chunk
1872 				 */
1873 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1874 				    tsn);
1875 				goto err_out;
1876 			}
1877 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1878 				/*
1879 				 * We can't have a switched unordered with a
1880 				 * ordered chunk
1881 				 */
1882 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1883 				    tsn);
1884 				goto err_out;
1885 			}
1886 		}
1887 	} else {
1888 		/*
1889 		 * Its a complete segment. Lets validate we don't have a
1890 		 * re-assembly going on with the same Stream/Seq (for
1891 		 * ordered) or in the same Stream for unordered.
1892 		 */
1893 		if (control != NULL) {
1894 			if (ordered || asoc->idata_supported) {
1895 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1896 				    chk_flags, mid);
1897 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1898 				goto err_out;
1899 			} else {
1900 				if ((tsn == control->fsn_included + 1) &&
1901 				    (control->end_added == 0)) {
1902 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1903 					goto err_out;
1904 				} else {
1905 					control = NULL;
1906 				}
1907 			}
1908 		}
1909 	}
1910 	/* now do the tests */
1911 	if (((asoc->cnt_on_all_streams +
1912 	    asoc->cnt_on_reasm_queue +
1913 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1914 	    (((int)asoc->my_rwnd) <= 0)) {
1915 		/*
1916 		 * When we have NO room in the rwnd we check to make sure
1917 		 * the reader is doing its job...
1918 		 */
1919 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1920 			/* some to read, wake-up */
1921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1922 			struct socket *so;
1923 
1924 			so = SCTP_INP_SO(stcb->sctp_ep);
1925 			atomic_add_int(&stcb->asoc.refcnt, 1);
1926 			SCTP_TCB_UNLOCK(stcb);
1927 			SCTP_SOCKET_LOCK(so, 1);
1928 			SCTP_TCB_LOCK(stcb);
1929 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1930 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1931 				/* assoc was freed while we were unlocked */
1932 				SCTP_SOCKET_UNLOCK(so, 1);
1933 				return (0);
1934 			}
1935 #endif
1936 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1938 			SCTP_SOCKET_UNLOCK(so, 1);
1939 #endif
1940 		}
1941 		/* now is it in the mapping array of what we have accepted? */
1942 		if (chk_type == SCTP_DATA) {
1943 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1944 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1945 				/* Nope not in the valid range dump it */
1946 		dump_packet:
1947 				sctp_set_rwnd(stcb, asoc);
1948 				if ((asoc->cnt_on_all_streams +
1949 				    asoc->cnt_on_reasm_queue +
1950 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1951 					SCTP_STAT_INCR(sctps_datadropchklmt);
1952 				} else {
1953 					SCTP_STAT_INCR(sctps_datadroprwnd);
1954 				}
1955 				*break_flag = 1;
1956 				return (0);
1957 			}
1958 		} else {
1959 			if (control == NULL) {
1960 				goto dump_packet;
1961 			}
1962 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1963 				goto dump_packet;
1964 			}
1965 		}
1966 	}
1967 #ifdef SCTP_ASOCLOG_OF_TSNS
1968 	SCTP_TCB_LOCK_ASSERT(stcb);
1969 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1970 		asoc->tsn_in_at = 0;
1971 		asoc->tsn_in_wrapped = 1;
1972 	}
1973 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1974 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1975 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1976 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1977 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1978 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1979 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1980 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1981 	asoc->tsn_in_at++;
1982 #endif
1983 	/*
1984 	 * Before we continue lets validate that we are not being fooled by
1985 	 * an evil attacker. We can only have Nk chunks based on our TSN
1986 	 * spread allowed by the mapping array N * 8 bits, so there is no
1987 	 * way our stream sequence numbers could have wrapped. We of course
1988 	 * only validate the FIRST fragment so the bit must be set.
1989 	 */
1990 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1991 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1992 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1993 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1994 		/* The incoming sseq is behind where we last delivered? */
1995 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1996 		    mid, asoc->strmin[sid].last_mid_delivered);
1997 
1998 		if (asoc->idata_supported) {
1999 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2000 			    asoc->strmin[sid].last_mid_delivered,
2001 			    tsn,
2002 			    sid,
2003 			    mid);
2004 		} else {
2005 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2006 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2007 			    tsn,
2008 			    sid,
2009 			    (uint16_t)mid);
2010 		}
2011 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2012 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2013 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2014 		*abort_flag = 1;
2015 		return (0);
2016 	}
2017 	if (chk_type == SCTP_IDATA) {
2018 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2019 	} else {
2020 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2021 	}
2022 	if (last_chunk == 0) {
2023 		if (chk_type == SCTP_IDATA) {
2024 			dmbuf = SCTP_M_COPYM(*m,
2025 			    (offset + sizeof(struct sctp_idata_chunk)),
2026 			    the_len, M_NOWAIT);
2027 		} else {
2028 			dmbuf = SCTP_M_COPYM(*m,
2029 			    (offset + sizeof(struct sctp_data_chunk)),
2030 			    the_len, M_NOWAIT);
2031 		}
2032 #ifdef SCTP_MBUF_LOGGING
2033 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2034 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2035 		}
2036 #endif
2037 	} else {
2038 		/* We can steal the last chunk */
2039 		int l_len;
2040 
2041 		dmbuf = *m;
2042 		/* lop off the top part */
2043 		if (chk_type == SCTP_IDATA) {
2044 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2045 		} else {
2046 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2047 		}
2048 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2049 			l_len = SCTP_BUF_LEN(dmbuf);
2050 		} else {
2051 			/*
2052 			 * need to count up the size hopefully does not hit
2053 			 * this to often :-0
2054 			 */
2055 			struct mbuf *lat;
2056 
2057 			l_len = 0;
2058 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2059 				l_len += SCTP_BUF_LEN(lat);
2060 			}
2061 		}
2062 		if (l_len > the_len) {
2063 			/* Trim the end round bytes off  too */
2064 			m_adj(dmbuf, -(l_len - the_len));
2065 		}
2066 	}
2067 	if (dmbuf == NULL) {
2068 		SCTP_STAT_INCR(sctps_nomem);
2069 		return (0);
2070 	}
2071 	/*
2072 	 * Now no matter what, we need a control, get one if we don't have
2073 	 * one (we may have gotten it above when we found the message was
2074 	 * fragmented
2075 	 */
2076 	if (control == NULL) {
2077 		sctp_alloc_a_readq(stcb, control);
2078 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2079 		    ppid,
2080 		    sid,
2081 		    chk_flags,
2082 		    NULL, fsn, mid);
2083 		if (control == NULL) {
2084 			SCTP_STAT_INCR(sctps_nomem);
2085 			return (0);
2086 		}
2087 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2088 			struct mbuf *mm;
2089 
2090 			control->data = dmbuf;
2091 			for (mm = control->data; mm; mm = mm->m_next) {
2092 				control->length += SCTP_BUF_LEN(mm);
2093 			}
2094 			control->tail_mbuf = NULL;
2095 			control->end_added = 1;
2096 			control->last_frag_seen = 1;
2097 			control->first_frag_seen = 1;
2098 			control->fsn_included = fsn;
2099 			control->top_fsn = fsn;
2100 		}
2101 		created_control = 1;
2102 	}
2103 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2104 	    chk_flags, ordered, mid, control);
2105 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2106 	    TAILQ_EMPTY(&asoc->resetHead) &&
2107 	    ((ordered == 0) ||
2108 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2109 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2110 		/* Candidate for express delivery */
2111 		/*
2112 		 * Its not fragmented, No PD-API is up, Nothing in the
2113 		 * delivery queue, Its un-ordered OR ordered and the next to
2114 		 * deliver AND nothing else is stuck on the stream queue,
2115 		 * And there is room for it in the socket buffer. Lets just
2116 		 * stuff it up the buffer....
2117 		 */
2118 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2119 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2120 			asoc->highest_tsn_inside_nr_map = tsn;
2121 		}
2122 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2123 		    control, mid);
2124 
2125 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2126 		    control, &stcb->sctp_socket->so_rcv,
2127 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2128 
2129 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2130 			/* for ordered, bump what we delivered */
2131 			asoc->strmin[sid].last_mid_delivered++;
2132 		}
2133 		SCTP_STAT_INCR(sctps_recvexpress);
2134 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2135 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2136 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2137 		}
2138 		control = NULL;
2139 		goto finish_express_del;
2140 	}
2141 	/* Now will we need a chunk too? */
2142 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2143 		sctp_alloc_a_chunk(stcb, chk);
2144 		if (chk == NULL) {
2145 			/* No memory so we drop the chunk */
2146 			SCTP_STAT_INCR(sctps_nomem);
2147 			if (last_chunk == 0) {
2148 				/* we copied it, free the copy */
2149 				sctp_m_freem(dmbuf);
2150 			}
2151 			return (0);
2152 		}
2153 		chk->rec.data.tsn = tsn;
2154 		chk->no_fr_allowed = 0;
2155 		chk->rec.data.fsn = fsn;
2156 		chk->rec.data.mid = mid;
2157 		chk->rec.data.sid = sid;
2158 		chk->rec.data.ppid = ppid;
2159 		chk->rec.data.context = stcb->asoc.context;
2160 		chk->rec.data.doing_fast_retransmit = 0;
2161 		chk->rec.data.rcv_flags = chk_flags;
2162 		chk->asoc = asoc;
2163 		chk->send_size = the_len;
2164 		chk->whoTo = net;
2165 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2166 		    chk,
2167 		    control, mid);
2168 		atomic_add_int(&net->ref_count, 1);
2169 		chk->data = dmbuf;
2170 	}
2171 	/* Set the appropriate TSN mark */
2172 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2173 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2174 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2175 			asoc->highest_tsn_inside_nr_map = tsn;
2176 		}
2177 	} else {
2178 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2179 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2180 			asoc->highest_tsn_inside_map = tsn;
2181 		}
2182 	}
2183 	/* Now is it complete (i.e. not fragmented)? */
2184 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2185 		/*
2186 		 * Special check for when streams are resetting. We could be
2187 		 * more smart about this and check the actual stream to see
2188 		 * if it is not being reset.. that way we would not create a
2189 		 * HOLB when amongst streams being reset and those not being
2190 		 * reset.
2191 		 *
2192 		 */
2193 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2194 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2195 			/*
2196 			 * yep its past where we need to reset... go ahead
2197 			 * and queue it.
2198 			 */
2199 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2200 				/* first one on */
2201 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2202 			} else {
2203 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2204 				unsigned char inserted = 0;
2205 
2206 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2207 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2208 
2209 						continue;
2210 					} else {
2211 						/* found it */
2212 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2213 						inserted = 1;
2214 						break;
2215 					}
2216 				}
2217 				if (inserted == 0) {
2218 					/*
2219 					 * must be put at end, use prevP
2220 					 * (all setup from loop) to setup
2221 					 * nextP.
2222 					 */
2223 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2224 				}
2225 			}
2226 			goto finish_express_del;
2227 		}
2228 		if (chk_flags & SCTP_DATA_UNORDERED) {
2229 			/* queue directly into socket buffer */
2230 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2231 			    control, mid);
2232 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2233 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2234 			    control,
2235 			    &stcb->sctp_socket->so_rcv, 1,
2236 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2237 
2238 		} else {
2239 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2240 			    mid);
2241 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2242 			if (*abort_flag) {
2243 				if (last_chunk) {
2244 					*m = NULL;
2245 				}
2246 				return (0);
2247 			}
2248 		}
2249 		goto finish_express_del;
2250 	}
2251 	/* If we reach here its a reassembly */
2252 	need_reasm_check = 1;
2253 	SCTPDBG(SCTP_DEBUG_XXX,
2254 	    "Queue data to stream for reasm control: %p MID: %u\n",
2255 	    control, mid);
2256 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2257 	if (*abort_flag) {
2258 		/*
2259 		 * the assoc is now gone and chk was put onto the reasm
2260 		 * queue, which has all been freed.
2261 		 */
2262 		if (last_chunk) {
2263 			*m = NULL;
2264 		}
2265 		return (0);
2266 	}
2267 finish_express_del:
2268 	/* Here we tidy up things */
2269 	if (tsn == (asoc->cumulative_tsn + 1)) {
2270 		/* Update cum-ack */
2271 		asoc->cumulative_tsn = tsn;
2272 	}
2273 	if (last_chunk) {
2274 		*m = NULL;
2275 	}
2276 	if (ordered) {
2277 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2278 	} else {
2279 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2280 	}
2281 	SCTP_STAT_INCR(sctps_recvdata);
2282 	/* Set it present please */
2283 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2284 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2285 	}
2286 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2287 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2288 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2289 	}
2290 	if (need_reasm_check) {
2291 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2292 		need_reasm_check = 0;
2293 	}
2294 	/* check the special flag for stream resets */
2295 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2296 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2297 		/*
2298 		 * we have finished working through the backlogged TSN's now
2299 		 * time to reset streams. 1: call reset function. 2: free
2300 		 * pending_reply space 3: distribute any chunks in
2301 		 * pending_reply_queue.
2302 		 */
2303 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2304 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2305 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2306 		SCTP_FREE(liste, SCTP_M_STRESET);
2307 		/* sa_ignore FREED_MEMORY */
2308 		liste = TAILQ_FIRST(&asoc->resetHead);
2309 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2310 			/* All can be removed */
2311 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2312 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2313 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2314 				if (*abort_flag) {
2315 					return (0);
2316 				}
2317 				if (need_reasm_check) {
2318 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2319 					need_reasm_check = 0;
2320 				}
2321 			}
2322 		} else {
2323 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2324 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2325 					break;
2326 				}
2327 				/*
2328 				 * if control->sinfo_tsn is <= liste->tsn we
2329 				 * can process it which is the NOT of
2330 				 * control->sinfo_tsn > liste->tsn
2331 				 */
2332 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2333 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2334 				if (*abort_flag) {
2335 					return (0);
2336 				}
2337 				if (need_reasm_check) {
2338 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2339 					need_reasm_check = 0;
2340 				}
2341 			}
2342 		}
2343 	}
2344 	return (1);
2345 }
2346 
2347 static const int8_t sctp_map_lookup_tab[256] = {
2348 	0, 1, 0, 2, 0, 1, 0, 3,
2349 	0, 1, 0, 2, 0, 1, 0, 4,
2350 	0, 1, 0, 2, 0, 1, 0, 3,
2351 	0, 1, 0, 2, 0, 1, 0, 5,
2352 	0, 1, 0, 2, 0, 1, 0, 3,
2353 	0, 1, 0, 2, 0, 1, 0, 4,
2354 	0, 1, 0, 2, 0, 1, 0, 3,
2355 	0, 1, 0, 2, 0, 1, 0, 6,
2356 	0, 1, 0, 2, 0, 1, 0, 3,
2357 	0, 1, 0, 2, 0, 1, 0, 4,
2358 	0, 1, 0, 2, 0, 1, 0, 3,
2359 	0, 1, 0, 2, 0, 1, 0, 5,
2360 	0, 1, 0, 2, 0, 1, 0, 3,
2361 	0, 1, 0, 2, 0, 1, 0, 4,
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 7,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 4,
2366 	0, 1, 0, 2, 0, 1, 0, 3,
2367 	0, 1, 0, 2, 0, 1, 0, 5,
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 4,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 6,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 4,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 5,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 4,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 8
2380 };
2381 
2382 
2383 void
2384 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2385 {
2386 	/*
2387 	 * Now we also need to check the mapping array in a couple of ways.
2388 	 * 1) Did we move the cum-ack point?
2389 	 *
2390 	 * When you first glance at this you might think that all entries
2391 	 * that make up the position of the cum-ack would be in the
2392 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2393 	 * deliverable. Thats true with one exception, when its a fragmented
2394 	 * message we may not deliver the data until some threshold (or all
2395 	 * of it) is in place. So we must OR the nr_mapping_array and
2396 	 * mapping_array to get a true picture of the cum-ack.
2397 	 */
2398 	struct sctp_association *asoc;
2399 	int at;
2400 	uint8_t val;
2401 	int slide_from, slide_end, lgap, distance;
2402 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2403 
2404 	asoc = &stcb->asoc;
2405 
2406 	old_cumack = asoc->cumulative_tsn;
2407 	old_base = asoc->mapping_array_base_tsn;
2408 	old_highest = asoc->highest_tsn_inside_map;
2409 	/*
2410 	 * We could probably improve this a small bit by calculating the
2411 	 * offset of the current cum-ack as the starting point.
2412 	 */
2413 	at = 0;
2414 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2415 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2416 		if (val == 0xff) {
2417 			at += 8;
2418 		} else {
2419 			/* there is a 0 bit */
2420 			at += sctp_map_lookup_tab[val];
2421 			break;
2422 		}
2423 	}
2424 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2425 
2426 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2427 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2428 #ifdef INVARIANTS
2429 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2430 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2431 #else
2432 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2433 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2434 		sctp_print_mapping_array(asoc);
2435 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2436 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2437 		}
2438 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2439 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2440 #endif
2441 	}
2442 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2443 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2444 	} else {
2445 		highest_tsn = asoc->highest_tsn_inside_map;
2446 	}
2447 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2448 		/* The complete array was completed by a single FR */
2449 		/* highest becomes the cum-ack */
2450 		int clr;
2451 #ifdef INVARIANTS
2452 		unsigned int i;
2453 #endif
2454 
2455 		/* clear the array */
2456 		clr = ((at + 7) >> 3);
2457 		if (clr > asoc->mapping_array_size) {
2458 			clr = asoc->mapping_array_size;
2459 		}
2460 		memset(asoc->mapping_array, 0, clr);
2461 		memset(asoc->nr_mapping_array, 0, clr);
2462 #ifdef INVARIANTS
2463 		for (i = 0; i < asoc->mapping_array_size; i++) {
2464 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2465 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2466 				sctp_print_mapping_array(asoc);
2467 			}
2468 		}
2469 #endif
2470 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2471 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2472 	} else if (at >= 8) {
2473 		/* we can slide the mapping array down */
2474 		/* slide_from holds where we hit the first NON 0xff byte */
2475 
2476 		/*
2477 		 * now calculate the ceiling of the move using our highest
2478 		 * TSN value
2479 		 */
2480 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2481 		slide_end = (lgap >> 3);
2482 		if (slide_end < slide_from) {
2483 			sctp_print_mapping_array(asoc);
2484 #ifdef INVARIANTS
2485 			panic("impossible slide");
2486 #else
2487 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2488 			    lgap, slide_end, slide_from, at);
2489 			return;
2490 #endif
2491 		}
2492 		if (slide_end > asoc->mapping_array_size) {
2493 #ifdef INVARIANTS
2494 			panic("would overrun buffer");
2495 #else
2496 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2497 			    asoc->mapping_array_size, slide_end);
2498 			slide_end = asoc->mapping_array_size;
2499 #endif
2500 		}
2501 		distance = (slide_end - slide_from) + 1;
2502 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2503 			sctp_log_map(old_base, old_cumack, old_highest,
2504 			    SCTP_MAP_PREPARE_SLIDE);
2505 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2506 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2507 		}
2508 		if (distance + slide_from > asoc->mapping_array_size ||
2509 		    distance < 0) {
2510 			/*
2511 			 * Here we do NOT slide forward the array so that
2512 			 * hopefully when more data comes in to fill it up
2513 			 * we will be able to slide it forward. Really I
2514 			 * don't think this should happen :-0
2515 			 */
2516 
2517 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2518 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2519 				    (uint32_t)asoc->mapping_array_size,
2520 				    SCTP_MAP_SLIDE_NONE);
2521 			}
2522 		} else {
2523 			int ii;
2524 
2525 			for (ii = 0; ii < distance; ii++) {
2526 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2527 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2528 
2529 			}
2530 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2531 				asoc->mapping_array[ii] = 0;
2532 				asoc->nr_mapping_array[ii] = 0;
2533 			}
2534 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2535 				asoc->highest_tsn_inside_map += (slide_from << 3);
2536 			}
2537 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2538 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2539 			}
2540 			asoc->mapping_array_base_tsn += (slide_from << 3);
2541 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2542 				sctp_log_map(asoc->mapping_array_base_tsn,
2543 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2544 				    SCTP_MAP_SLIDE_RESULT);
2545 			}
2546 		}
2547 	}
2548 }
2549 
2550 void
2551 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2552 {
2553 	struct sctp_association *asoc;
2554 	uint32_t highest_tsn;
2555 	int is_a_gap;
2556 
2557 	sctp_slide_mapping_arrays(stcb);
2558 	asoc = &stcb->asoc;
2559 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2560 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2561 	} else {
2562 		highest_tsn = asoc->highest_tsn_inside_map;
2563 	}
2564 	/* Is there a gap now? */
2565 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2566 
2567 	/*
2568 	 * Now we need to see if we need to queue a sack or just start the
2569 	 * timer (if allowed).
2570 	 */
2571 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2572 		/*
2573 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2574 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2575 		 * SACK
2576 		 */
2577 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2578 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2579 			    stcb->sctp_ep, stcb, NULL,
2580 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2581 		}
2582 		sctp_send_shutdown(stcb,
2583 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2584 		if (is_a_gap) {
2585 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2586 		}
2587 	} else {
2588 		/*
2589 		 * CMT DAC algorithm: increase number of packets received
2590 		 * since last ack
2591 		 */
2592 		stcb->asoc.cmt_dac_pkts_rcvd++;
2593 
2594 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2595 							 * SACK */
2596 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2597 							 * longer is one */
2598 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2599 		    (is_a_gap) ||	/* is still a gap */
2600 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2601 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2602 		    ) {
2603 
2604 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2605 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2606 			    (stcb->asoc.send_sack == 0) &&
2607 			    (stcb->asoc.numduptsns == 0) &&
2608 			    (stcb->asoc.delayed_ack) &&
2609 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2610 
2611 				/*
2612 				 * CMT DAC algorithm: With CMT, delay acks
2613 				 * even in the face of
2614 				 *
2615 				 * reordering. Therefore, if acks that do
2616 				 * not have to be sent because of the above
2617 				 * reasons, will be delayed. That is, acks
2618 				 * that would have been sent due to gap
2619 				 * reports will be delayed with DAC. Start
2620 				 * the delayed ack timer.
2621 				 */
2622 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2623 				    stcb->sctp_ep, stcb, NULL);
2624 			} else {
2625 				/*
2626 				 * Ok we must build a SACK since the timer
2627 				 * is pending, we got our first packet OR
2628 				 * there are gaps or duplicates.
2629 				 */
2630 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2631 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2632 			}
2633 		} else {
2634 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2635 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2636 				    stcb->sctp_ep, stcb, NULL);
2637 			}
2638 		}
2639 	}
2640 }
2641 
2642 int
2643 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2644     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2645     struct sctp_nets *net, uint32_t *high_tsn)
2646 {
2647 	struct sctp_chunkhdr *ch, chunk_buf;
2648 	struct sctp_association *asoc;
2649 	int num_chunks = 0;	/* number of control chunks processed */
2650 	int stop_proc = 0;
2651 	int break_flag, last_chunk;
2652 	int abort_flag = 0, was_a_gap;
2653 	struct mbuf *m;
2654 	uint32_t highest_tsn;
2655 	uint16_t chk_length;
2656 
2657 	/* set the rwnd */
2658 	sctp_set_rwnd(stcb, &stcb->asoc);
2659 
2660 	m = *mm;
2661 	SCTP_TCB_LOCK_ASSERT(stcb);
2662 	asoc = &stcb->asoc;
2663 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2664 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2665 	} else {
2666 		highest_tsn = asoc->highest_tsn_inside_map;
2667 	}
2668 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2669 	/*
2670 	 * setup where we got the last DATA packet from for any SACK that
2671 	 * may need to go out. Don't bump the net. This is done ONLY when a
2672 	 * chunk is assigned.
2673 	 */
2674 	asoc->last_data_chunk_from = net;
2675 
2676 	/*-
2677 	 * Now before we proceed we must figure out if this is a wasted
2678 	 * cluster... i.e. it is a small packet sent in and yet the driver
2679 	 * underneath allocated a full cluster for it. If so we must copy it
2680 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2681 	 * with cluster starvation. Note for __Panda__ we don't do this
2682 	 * since it has clusters all the way down to 64 bytes.
2683 	 */
2684 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2685 		/* we only handle mbufs that are singletons.. not chains */
2686 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2687 		if (m) {
2688 			/* ok lets see if we can copy the data up */
2689 			caddr_t *from, *to;
2690 
2691 			/* get the pointers and copy */
2692 			to = mtod(m, caddr_t *);
2693 			from = mtod((*mm), caddr_t *);
2694 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2695 			/* copy the length and free up the old */
2696 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2697 			sctp_m_freem(*mm);
2698 			/* success, back copy */
2699 			*mm = m;
2700 		} else {
2701 			/* We are in trouble in the mbuf world .. yikes */
2702 			m = *mm;
2703 		}
2704 	}
2705 	/* get pointer to the first chunk header */
2706 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2707 	    sizeof(struct sctp_chunkhdr),
2708 	    (uint8_t *)&chunk_buf);
2709 	if (ch == NULL) {
2710 		return (1);
2711 	}
2712 	/*
2713 	 * process all DATA chunks...
2714 	 */
2715 	*high_tsn = asoc->cumulative_tsn;
2716 	break_flag = 0;
2717 	asoc->data_pkts_seen++;
2718 	while (stop_proc == 0) {
2719 		/* validate chunk length */
2720 		chk_length = ntohs(ch->chunk_length);
2721 		if (length - *offset < chk_length) {
2722 			/* all done, mutulated chunk */
2723 			stop_proc = 1;
2724 			continue;
2725 		}
2726 		if ((asoc->idata_supported == 1) &&
2727 		    (ch->chunk_type == SCTP_DATA)) {
2728 			struct mbuf *op_err;
2729 			char msg[SCTP_DIAG_INFO_LEN];
2730 
2731 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2732 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2733 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2734 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2735 			return (2);
2736 		}
2737 		if ((asoc->idata_supported == 0) &&
2738 		    (ch->chunk_type == SCTP_IDATA)) {
2739 			struct mbuf *op_err;
2740 			char msg[SCTP_DIAG_INFO_LEN];
2741 
2742 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2743 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2744 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2745 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2746 			return (2);
2747 		}
2748 		if ((ch->chunk_type == SCTP_DATA) ||
2749 		    (ch->chunk_type == SCTP_IDATA)) {
2750 			uint16_t clen;
2751 
2752 			if (ch->chunk_type == SCTP_DATA) {
2753 				clen = sizeof(struct sctp_data_chunk);
2754 			} else {
2755 				clen = sizeof(struct sctp_idata_chunk);
2756 			}
2757 			if (chk_length < clen) {
2758 				/*
2759 				 * Need to send an abort since we had a
2760 				 * invalid data chunk.
2761 				 */
2762 				struct mbuf *op_err;
2763 				char msg[SCTP_DIAG_INFO_LEN];
2764 
2765 				snprintf(msg, sizeof(msg), "%s chunk of length %u",
2766 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2767 				    chk_length);
2768 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2769 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2770 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2771 				return (2);
2772 			}
2773 #ifdef SCTP_AUDITING_ENABLED
2774 			sctp_audit_log(0xB1, 0);
2775 #endif
2776 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2777 				last_chunk = 1;
2778 			} else {
2779 				last_chunk = 0;
2780 			}
2781 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2782 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2783 			    last_chunk, ch->chunk_type)) {
2784 				num_chunks++;
2785 			}
2786 			if (abort_flag)
2787 				return (2);
2788 
2789 			if (break_flag) {
2790 				/*
2791 				 * Set because of out of rwnd space and no
2792 				 * drop rep space left.
2793 				 */
2794 				stop_proc = 1;
2795 				continue;
2796 			}
2797 		} else {
2798 			/* not a data chunk in the data region */
2799 			switch (ch->chunk_type) {
2800 			case SCTP_INITIATION:
2801 			case SCTP_INITIATION_ACK:
2802 			case SCTP_SELECTIVE_ACK:
2803 			case SCTP_NR_SELECTIVE_ACK:
2804 			case SCTP_HEARTBEAT_REQUEST:
2805 			case SCTP_HEARTBEAT_ACK:
2806 			case SCTP_ABORT_ASSOCIATION:
2807 			case SCTP_SHUTDOWN:
2808 			case SCTP_SHUTDOWN_ACK:
2809 			case SCTP_OPERATION_ERROR:
2810 			case SCTP_COOKIE_ECHO:
2811 			case SCTP_COOKIE_ACK:
2812 			case SCTP_ECN_ECHO:
2813 			case SCTP_ECN_CWR:
2814 			case SCTP_SHUTDOWN_COMPLETE:
2815 			case SCTP_AUTHENTICATION:
2816 			case SCTP_ASCONF_ACK:
2817 			case SCTP_PACKET_DROPPED:
2818 			case SCTP_STREAM_RESET:
2819 			case SCTP_FORWARD_CUM_TSN:
2820 			case SCTP_ASCONF:
2821 				{
2822 					/*
2823 					 * Now, what do we do with KNOWN
2824 					 * chunks that are NOT in the right
2825 					 * place?
2826 					 *
2827 					 * For now, I do nothing but ignore
2828 					 * them. We may later want to add
2829 					 * sysctl stuff to switch out and do
2830 					 * either an ABORT() or possibly
2831 					 * process them.
2832 					 */
2833 					struct mbuf *op_err;
2834 					char msg[SCTP_DIAG_INFO_LEN];
2835 
2836 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2837 					    ch->chunk_type);
2838 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2839 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2840 					return (2);
2841 				}
2842 			default:
2843 				/*
2844 				 * Unknown chunk type: use bit rules after
2845 				 * checking length
2846 				 */
2847 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2848 					/*
2849 					 * Need to send an abort since we
2850 					 * had a invalid chunk.
2851 					 */
2852 					struct mbuf *op_err;
2853 					char msg[SCTP_DIAG_INFO_LEN];
2854 
2855 					snprintf(msg, sizeof(msg), "Chunk of length %u",
2856 					    chk_length);
2857 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2858 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2859 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2860 					return (2);
2861 				}
2862 				if (ch->chunk_type & 0x40) {
2863 					/* Add a error report to the queue */
2864 					struct mbuf *op_err;
2865 					struct sctp_gen_error_cause *cause;
2866 
2867 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2868 					    0, M_NOWAIT, 1, MT_DATA);
2869 					if (op_err != NULL) {
2870 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2871 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2872 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2873 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2874 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2875 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2876 							sctp_queue_op_err(stcb, op_err);
2877 						} else {
2878 							sctp_m_freem(op_err);
2879 						}
2880 					}
2881 				}
2882 				if ((ch->chunk_type & 0x80) == 0) {
2883 					/* discard the rest of this packet */
2884 					stop_proc = 1;
2885 				}	/* else skip this bad chunk and
2886 					 * continue... */
2887 				break;
2888 			}	/* switch of chunk type */
2889 		}
2890 		*offset += SCTP_SIZE32(chk_length);
2891 		if ((*offset >= length) || stop_proc) {
2892 			/* no more data left in the mbuf chain */
2893 			stop_proc = 1;
2894 			continue;
2895 		}
2896 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2897 		    sizeof(struct sctp_chunkhdr),
2898 		    (uint8_t *)&chunk_buf);
2899 		if (ch == NULL) {
2900 			*offset = length;
2901 			stop_proc = 1;
2902 			continue;
2903 		}
2904 	}
2905 	if (break_flag) {
2906 		/*
2907 		 * we need to report rwnd overrun drops.
2908 		 */
2909 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2910 	}
2911 	if (num_chunks) {
2912 		/*
2913 		 * Did we get data, if so update the time for auto-close and
2914 		 * give peer credit for being alive.
2915 		 */
2916 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2917 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2918 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2919 			    stcb->asoc.overall_error_count,
2920 			    0,
2921 			    SCTP_FROM_SCTP_INDATA,
2922 			    __LINE__);
2923 		}
2924 		stcb->asoc.overall_error_count = 0;
2925 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2926 	}
2927 	/* now service all of the reassm queue if needed */
2928 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2929 		/* Assure that we ack right away */
2930 		stcb->asoc.send_sack = 1;
2931 	}
2932 	/* Start a sack timer or QUEUE a SACK for sending */
2933 	sctp_sack_check(stcb, was_a_gap);
2934 	return (0);
2935 }
2936 
2937 static int
2938 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2939     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2940     int *num_frs,
2941     uint32_t *biggest_newly_acked_tsn,
2942     uint32_t *this_sack_lowest_newack,
2943     int *rto_ok)
2944 {
2945 	struct sctp_tmit_chunk *tp1;
2946 	unsigned int theTSN;
2947 	int j, wake_him = 0, circled = 0;
2948 
2949 	/* Recover the tp1 we last saw */
2950 	tp1 = *p_tp1;
2951 	if (tp1 == NULL) {
2952 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2953 	}
2954 	for (j = frag_strt; j <= frag_end; j++) {
2955 		theTSN = j + last_tsn;
2956 		while (tp1) {
2957 			if (tp1->rec.data.doing_fast_retransmit)
2958 				(*num_frs) += 1;
2959 
2960 			/*-
2961 			 * CMT: CUCv2 algorithm. For each TSN being
2962 			 * processed from the sent queue, track the
2963 			 * next expected pseudo-cumack, or
2964 			 * rtx_pseudo_cumack, if required. Separate
2965 			 * cumack trackers for first transmissions,
2966 			 * and retransmissions.
2967 			 */
2968 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2969 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2970 			    (tp1->snd_count == 1)) {
2971 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2972 				tp1->whoTo->find_pseudo_cumack = 0;
2973 			}
2974 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2975 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2976 			    (tp1->snd_count > 1)) {
2977 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2978 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2979 			}
2980 			if (tp1->rec.data.tsn == theTSN) {
2981 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2982 					/*-
2983 					 * must be held until
2984 					 * cum-ack passes
2985 					 */
2986 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2987 						/*-
2988 						 * If it is less than RESEND, it is
2989 						 * now no-longer in flight.
2990 						 * Higher values may already be set
2991 						 * via previous Gap Ack Blocks...
2992 						 * i.e. ACKED or RESEND.
2993 						 */
2994 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2995 						    *biggest_newly_acked_tsn)) {
2996 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2997 						}
2998 						/*-
2999 						 * CMT: SFR algo (and HTNA) - set
3000 						 * saw_newack to 1 for dest being
3001 						 * newly acked. update
3002 						 * this_sack_highest_newack if
3003 						 * appropriate.
3004 						 */
3005 						if (tp1->rec.data.chunk_was_revoked == 0)
3006 							tp1->whoTo->saw_newack = 1;
3007 
3008 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3009 						    tp1->whoTo->this_sack_highest_newack)) {
3010 							tp1->whoTo->this_sack_highest_newack =
3011 							    tp1->rec.data.tsn;
3012 						}
3013 						/*-
3014 						 * CMT DAC algo: also update
3015 						 * this_sack_lowest_newack
3016 						 */
3017 						if (*this_sack_lowest_newack == 0) {
3018 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3019 								sctp_log_sack(*this_sack_lowest_newack,
3020 								    last_tsn,
3021 								    tp1->rec.data.tsn,
3022 								    0,
3023 								    0,
3024 								    SCTP_LOG_TSN_ACKED);
3025 							}
3026 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3027 						}
3028 						/*-
3029 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3030 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3031 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3032 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3033 						 * Separate pseudo_cumack trackers for first transmissions and
3034 						 * retransmissions.
3035 						 */
3036 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3037 							if (tp1->rec.data.chunk_was_revoked == 0) {
3038 								tp1->whoTo->new_pseudo_cumack = 1;
3039 							}
3040 							tp1->whoTo->find_pseudo_cumack = 1;
3041 						}
3042 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3043 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3044 						}
3045 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3046 							if (tp1->rec.data.chunk_was_revoked == 0) {
3047 								tp1->whoTo->new_pseudo_cumack = 1;
3048 							}
3049 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3050 						}
3051 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3052 							sctp_log_sack(*biggest_newly_acked_tsn,
3053 							    last_tsn,
3054 							    tp1->rec.data.tsn,
3055 							    frag_strt,
3056 							    frag_end,
3057 							    SCTP_LOG_TSN_ACKED);
3058 						}
3059 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3060 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3061 							    tp1->whoTo->flight_size,
3062 							    tp1->book_size,
3063 							    (uint32_t)(uintptr_t)tp1->whoTo,
3064 							    tp1->rec.data.tsn);
3065 						}
3066 						sctp_flight_size_decrease(tp1);
3067 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3068 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3069 							    tp1);
3070 						}
3071 						sctp_total_flight_decrease(stcb, tp1);
3072 
3073 						tp1->whoTo->net_ack += tp1->send_size;
3074 						if (tp1->snd_count < 2) {
3075 							/*-
3076 							 * True non-retransmited chunk
3077 							 */
3078 							tp1->whoTo->net_ack2 += tp1->send_size;
3079 
3080 							/*-
3081 							 * update RTO too ?
3082 							 */
3083 							if (tp1->do_rtt) {
3084 								if (*rto_ok) {
3085 									tp1->whoTo->RTO =
3086 									    sctp_calculate_rto(stcb,
3087 									    &stcb->asoc,
3088 									    tp1->whoTo,
3089 									    &tp1->sent_rcv_time,
3090 									    SCTP_RTT_FROM_DATA);
3091 									*rto_ok = 0;
3092 								}
3093 								if (tp1->whoTo->rto_needed == 0) {
3094 									tp1->whoTo->rto_needed = 1;
3095 								}
3096 								tp1->do_rtt = 0;
3097 							}
3098 						}
3099 					}
3100 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3101 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3102 						    stcb->asoc.this_sack_highest_gap)) {
3103 							stcb->asoc.this_sack_highest_gap =
3104 							    tp1->rec.data.tsn;
3105 						}
3106 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3107 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3108 #ifdef SCTP_AUDITING_ENABLED
3109 							sctp_audit_log(0xB2,
3110 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3111 #endif
3112 						}
3113 					}
3114 					/*-
3115 					 * All chunks NOT UNSENT fall through here and are marked
3116 					 * (leave PR-SCTP ones that are to skip alone though)
3117 					 */
3118 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3119 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3120 						tp1->sent = SCTP_DATAGRAM_MARKED;
3121 					}
3122 					if (tp1->rec.data.chunk_was_revoked) {
3123 						/* deflate the cwnd */
3124 						tp1->whoTo->cwnd -= tp1->book_size;
3125 						tp1->rec.data.chunk_was_revoked = 0;
3126 					}
3127 					/* NR Sack code here */
3128 					if (nr_sacking &&
3129 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3130 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3131 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3132 #ifdef INVARIANTS
3133 						} else {
3134 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3135 #endif
3136 						}
3137 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3138 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3139 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3140 							stcb->asoc.trigger_reset = 1;
3141 						}
3142 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3143 						if (tp1->data) {
3144 							/*
3145 							 * sa_ignore
3146 							 * NO_NULL_CHK
3147 							 */
3148 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3149 							sctp_m_freem(tp1->data);
3150 							tp1->data = NULL;
3151 						}
3152 						wake_him++;
3153 					}
3154 				}
3155 				break;
3156 			}	/* if (tp1->tsn == theTSN) */
3157 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3158 				break;
3159 			}
3160 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3161 			if ((tp1 == NULL) && (circled == 0)) {
3162 				circled++;
3163 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3164 			}
3165 		}		/* end while (tp1) */
3166 		if (tp1 == NULL) {
3167 			circled = 0;
3168 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3169 		}
3170 		/* In case the fragments were not in order we must reset */
3171 	}			/* end for (j = fragStart */
3172 	*p_tp1 = tp1;
3173 	return (wake_him);	/* Return value only used for nr-sack */
3174 }
3175 
3176 
3177 static int
3178 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3179     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3180     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3181     int num_seg, int num_nr_seg, int *rto_ok)
3182 {
3183 	struct sctp_gap_ack_block *frag, block;
3184 	struct sctp_tmit_chunk *tp1;
3185 	int i;
3186 	int num_frs = 0;
3187 	int chunk_freed;
3188 	int non_revocable;
3189 	uint16_t frag_strt, frag_end, prev_frag_end;
3190 
3191 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3192 	prev_frag_end = 0;
3193 	chunk_freed = 0;
3194 
3195 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3196 		if (i == num_seg) {
3197 			prev_frag_end = 0;
3198 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3199 		}
3200 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3201 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3202 		*offset += sizeof(block);
3203 		if (frag == NULL) {
3204 			return (chunk_freed);
3205 		}
3206 		frag_strt = ntohs(frag->start);
3207 		frag_end = ntohs(frag->end);
3208 
3209 		if (frag_strt > frag_end) {
3210 			/* This gap report is malformed, skip it. */
3211 			continue;
3212 		}
3213 		if (frag_strt <= prev_frag_end) {
3214 			/* This gap report is not in order, so restart. */
3215 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3216 		}
3217 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3218 			*biggest_tsn_acked = last_tsn + frag_end;
3219 		}
3220 		if (i < num_seg) {
3221 			non_revocable = 0;
3222 		} else {
3223 			non_revocable = 1;
3224 		}
3225 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3226 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3227 		    this_sack_lowest_newack, rto_ok)) {
3228 			chunk_freed = 1;
3229 		}
3230 		prev_frag_end = frag_end;
3231 	}
3232 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3233 		if (num_frs)
3234 			sctp_log_fr(*biggest_tsn_acked,
3235 			    *biggest_newly_acked_tsn,
3236 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3237 	}
3238 	return (chunk_freed);
3239 }
3240 
3241 static void
3242 sctp_check_for_revoked(struct sctp_tcb *stcb,
3243     struct sctp_association *asoc, uint32_t cumack,
3244     uint32_t biggest_tsn_acked)
3245 {
3246 	struct sctp_tmit_chunk *tp1;
3247 
3248 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3249 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3250 			/*
3251 			 * ok this guy is either ACK or MARKED. If it is
3252 			 * ACKED it has been previously acked but not this
3253 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3254 			 * again.
3255 			 */
3256 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3257 				break;
3258 			}
3259 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3260 				/* it has been revoked */
3261 				tp1->sent = SCTP_DATAGRAM_SENT;
3262 				tp1->rec.data.chunk_was_revoked = 1;
3263 				/*
3264 				 * We must add this stuff back in to assure
3265 				 * timers and such get started.
3266 				 */
3267 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3268 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3269 					    tp1->whoTo->flight_size,
3270 					    tp1->book_size,
3271 					    (uint32_t)(uintptr_t)tp1->whoTo,
3272 					    tp1->rec.data.tsn);
3273 				}
3274 				sctp_flight_size_increase(tp1);
3275 				sctp_total_flight_increase(stcb, tp1);
3276 				/*
3277 				 * We inflate the cwnd to compensate for our
3278 				 * artificial inflation of the flight_size.
3279 				 */
3280 				tp1->whoTo->cwnd += tp1->book_size;
3281 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3282 					sctp_log_sack(asoc->last_acked_seq,
3283 					    cumack,
3284 					    tp1->rec.data.tsn,
3285 					    0,
3286 					    0,
3287 					    SCTP_LOG_TSN_REVOKED);
3288 				}
3289 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3290 				/* it has been re-acked in this SACK */
3291 				tp1->sent = SCTP_DATAGRAM_ACKED;
3292 			}
3293 		}
3294 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3295 			break;
3296 	}
3297 }
3298 
3299 
3300 static void
3301 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3302     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3303 {
3304 	struct sctp_tmit_chunk *tp1;
3305 	int strike_flag = 0;
3306 	struct timeval now;
3307 	int tot_retrans = 0;
3308 	uint32_t sending_seq;
3309 	struct sctp_nets *net;
3310 	int num_dests_sacked = 0;
3311 
3312 	/*
3313 	 * select the sending_seq, this is either the next thing ready to be
3314 	 * sent but not transmitted, OR, the next seq we assign.
3315 	 */
3316 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3317 	if (tp1 == NULL) {
3318 		sending_seq = asoc->sending_seq;
3319 	} else {
3320 		sending_seq = tp1->rec.data.tsn;
3321 	}
3322 
3323 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3324 	if ((asoc->sctp_cmt_on_off > 0) &&
3325 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3326 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3327 			if (net->saw_newack)
3328 				num_dests_sacked++;
3329 		}
3330 	}
3331 	if (stcb->asoc.prsctp_supported) {
3332 		(void)SCTP_GETTIME_TIMEVAL(&now);
3333 	}
3334 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3335 		strike_flag = 0;
3336 		if (tp1->no_fr_allowed) {
3337 			/* this one had a timeout or something */
3338 			continue;
3339 		}
3340 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3341 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3342 				sctp_log_fr(biggest_tsn_newly_acked,
3343 				    tp1->rec.data.tsn,
3344 				    tp1->sent,
3345 				    SCTP_FR_LOG_CHECK_STRIKE);
3346 		}
3347 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3348 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3349 			/* done */
3350 			break;
3351 		}
3352 		if (stcb->asoc.prsctp_supported) {
3353 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3354 				/* Is it expired? */
3355 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3356 					/* Yes so drop it */
3357 					if (tp1->data != NULL) {
3358 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3359 						    SCTP_SO_NOT_LOCKED);
3360 					}
3361 					continue;
3362 				}
3363 			}
3364 		}
3365 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3366 			/* we are beyond the tsn in the sack  */
3367 			break;
3368 		}
3369 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3370 			/* either a RESEND, ACKED, or MARKED */
3371 			/* skip */
3372 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3373 				/* Continue strikin FWD-TSN chunks */
3374 				tp1->rec.data.fwd_tsn_cnt++;
3375 			}
3376 			continue;
3377 		}
3378 		/*
3379 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3380 		 */
3381 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3382 			/*
3383 			 * No new acks were receieved for data sent to this
3384 			 * dest. Therefore, according to the SFR algo for
3385 			 * CMT, no data sent to this dest can be marked for
3386 			 * FR using this SACK.
3387 			 */
3388 			continue;
3389 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3390 		    tp1->whoTo->this_sack_highest_newack)) {
3391 			/*
3392 			 * CMT: New acks were receieved for data sent to
3393 			 * this dest. But no new acks were seen for data
3394 			 * sent after tp1. Therefore, according to the SFR
3395 			 * algo for CMT, tp1 cannot be marked for FR using
3396 			 * this SACK. This step covers part of the DAC algo
3397 			 * and the HTNA algo as well.
3398 			 */
3399 			continue;
3400 		}
3401 		/*
3402 		 * Here we check to see if we were have already done a FR
3403 		 * and if so we see if the biggest TSN we saw in the sack is
3404 		 * smaller than the recovery point. If so we don't strike
3405 		 * the tsn... otherwise we CAN strike the TSN.
3406 		 */
3407 		/*
3408 		 * @@@ JRI: Check for CMT if (accum_moved &&
3409 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3410 		 * 0)) {
3411 		 */
3412 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3413 			/*
3414 			 * Strike the TSN if in fast-recovery and cum-ack
3415 			 * moved.
3416 			 */
3417 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3418 				sctp_log_fr(biggest_tsn_newly_acked,
3419 				    tp1->rec.data.tsn,
3420 				    tp1->sent,
3421 				    SCTP_FR_LOG_STRIKE_CHUNK);
3422 			}
3423 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3424 				tp1->sent++;
3425 			}
3426 			if ((asoc->sctp_cmt_on_off > 0) &&
3427 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3428 				/*
3429 				 * CMT DAC algorithm: If SACK flag is set to
3430 				 * 0, then lowest_newack test will not pass
3431 				 * because it would have been set to the
3432 				 * cumack earlier. If not already to be
3433 				 * rtx'd, If not a mixed sack and if tp1 is
3434 				 * not between two sacked TSNs, then mark by
3435 				 * one more. NOTE that we are marking by one
3436 				 * additional time since the SACK DAC flag
3437 				 * indicates that two packets have been
3438 				 * received after this missing TSN.
3439 				 */
3440 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3441 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3442 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3443 						sctp_log_fr(16 + num_dests_sacked,
3444 						    tp1->rec.data.tsn,
3445 						    tp1->sent,
3446 						    SCTP_FR_LOG_STRIKE_CHUNK);
3447 					}
3448 					tp1->sent++;
3449 				}
3450 			}
3451 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3452 		    (asoc->sctp_cmt_on_off == 0)) {
3453 			/*
3454 			 * For those that have done a FR we must take
3455 			 * special consideration if we strike. I.e the
3456 			 * biggest_newly_acked must be higher than the
3457 			 * sending_seq at the time we did the FR.
3458 			 */
3459 			if (
3460 #ifdef SCTP_FR_TO_ALTERNATE
3461 			/*
3462 			 * If FR's go to new networks, then we must only do
3463 			 * this for singly homed asoc's. However if the FR's
3464 			 * go to the same network (Armando's work) then its
3465 			 * ok to FR multiple times.
3466 			 */
3467 			    (asoc->numnets < 2)
3468 #else
3469 			    (1)
3470 #endif
3471 			    ) {
3472 
3473 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3474 				    tp1->rec.data.fast_retran_tsn)) {
3475 					/*
3476 					 * Strike the TSN, since this ack is
3477 					 * beyond where things were when we
3478 					 * did a FR.
3479 					 */
3480 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 						sctp_log_fr(biggest_tsn_newly_acked,
3482 						    tp1->rec.data.tsn,
3483 						    tp1->sent,
3484 						    SCTP_FR_LOG_STRIKE_CHUNK);
3485 					}
3486 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3487 						tp1->sent++;
3488 					}
3489 					strike_flag = 1;
3490 					if ((asoc->sctp_cmt_on_off > 0) &&
3491 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3492 						/*
3493 						 * CMT DAC algorithm: If
3494 						 * SACK flag is set to 0,
3495 						 * then lowest_newack test
3496 						 * will not pass because it
3497 						 * would have been set to
3498 						 * the cumack earlier. If
3499 						 * not already to be rtx'd,
3500 						 * If not a mixed sack and
3501 						 * if tp1 is not between two
3502 						 * sacked TSNs, then mark by
3503 						 * one more. NOTE that we
3504 						 * are marking by one
3505 						 * additional time since the
3506 						 * SACK DAC flag indicates
3507 						 * that two packets have
3508 						 * been received after this
3509 						 * missing TSN.
3510 						 */
3511 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3512 						    (num_dests_sacked == 1) &&
3513 						    SCTP_TSN_GT(this_sack_lowest_newack,
3514 						    tp1->rec.data.tsn)) {
3515 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3516 								sctp_log_fr(32 + num_dests_sacked,
3517 								    tp1->rec.data.tsn,
3518 								    tp1->sent,
3519 								    SCTP_FR_LOG_STRIKE_CHUNK);
3520 							}
3521 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3522 								tp1->sent++;
3523 							}
3524 						}
3525 					}
3526 				}
3527 			}
3528 			/*
3529 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3530 			 * algo covers HTNA.
3531 			 */
3532 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3533 		    biggest_tsn_newly_acked)) {
3534 			/*
3535 			 * We don't strike these: This is the  HTNA
3536 			 * algorithm i.e. we don't strike If our TSN is
3537 			 * larger than the Highest TSN Newly Acked.
3538 			 */
3539 			;
3540 		} else {
3541 			/* Strike the TSN */
3542 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3543 				sctp_log_fr(biggest_tsn_newly_acked,
3544 				    tp1->rec.data.tsn,
3545 				    tp1->sent,
3546 				    SCTP_FR_LOG_STRIKE_CHUNK);
3547 			}
3548 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3549 				tp1->sent++;
3550 			}
3551 			if ((asoc->sctp_cmt_on_off > 0) &&
3552 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3553 				/*
3554 				 * CMT DAC algorithm: If SACK flag is set to
3555 				 * 0, then lowest_newack test will not pass
3556 				 * because it would have been set to the
3557 				 * cumack earlier. If not already to be
3558 				 * rtx'd, If not a mixed sack and if tp1 is
3559 				 * not between two sacked TSNs, then mark by
3560 				 * one more. NOTE that we are marking by one
3561 				 * additional time since the SACK DAC flag
3562 				 * indicates that two packets have been
3563 				 * received after this missing TSN.
3564 				 */
3565 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3566 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3567 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3568 						sctp_log_fr(48 + num_dests_sacked,
3569 						    tp1->rec.data.tsn,
3570 						    tp1->sent,
3571 						    SCTP_FR_LOG_STRIKE_CHUNK);
3572 					}
3573 					tp1->sent++;
3574 				}
3575 			}
3576 		}
3577 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3578 			struct sctp_nets *alt;
3579 
3580 			/* fix counts and things */
3581 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3582 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3583 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3584 				    tp1->book_size,
3585 				    (uint32_t)(uintptr_t)tp1->whoTo,
3586 				    tp1->rec.data.tsn);
3587 			}
3588 			if (tp1->whoTo) {
3589 				tp1->whoTo->net_ack++;
3590 				sctp_flight_size_decrease(tp1);
3591 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3592 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3593 					    tp1);
3594 				}
3595 			}
3596 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3597 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3598 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3599 			}
3600 			/* add back to the rwnd */
3601 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3602 
3603 			/* remove from the total flight */
3604 			sctp_total_flight_decrease(stcb, tp1);
3605 
3606 			if ((stcb->asoc.prsctp_supported) &&
3607 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3608 				/*
3609 				 * Has it been retransmitted tv_sec times? -
3610 				 * we store the retran count there.
3611 				 */
3612 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3613 					/* Yes, so drop it */
3614 					if (tp1->data != NULL) {
3615 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3616 						    SCTP_SO_NOT_LOCKED);
3617 					}
3618 					/* Make sure to flag we had a FR */
3619 					tp1->whoTo->net_ack++;
3620 					continue;
3621 				}
3622 			}
3623 			/*
3624 			 * SCTP_PRINTF("OK, we are now ready to FR this
3625 			 * guy\n");
3626 			 */
3627 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3628 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3629 				    0, SCTP_FR_MARKED);
3630 			}
3631 			if (strike_flag) {
3632 				/* This is a subsequent FR */
3633 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3634 			}
3635 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3636 			if (asoc->sctp_cmt_on_off > 0) {
3637 				/*
3638 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3639 				 * If CMT is being used, then pick dest with
3640 				 * largest ssthresh for any retransmission.
3641 				 */
3642 				tp1->no_fr_allowed = 1;
3643 				alt = tp1->whoTo;
3644 				/* sa_ignore NO_NULL_CHK */
3645 				if (asoc->sctp_cmt_pf > 0) {
3646 					/*
3647 					 * JRS 5/18/07 - If CMT PF is on,
3648 					 * use the PF version of
3649 					 * find_alt_net()
3650 					 */
3651 					alt = sctp_find_alternate_net(stcb, alt, 2);
3652 				} else {
3653 					/*
3654 					 * JRS 5/18/07 - If only CMT is on,
3655 					 * use the CMT version of
3656 					 * find_alt_net()
3657 					 */
3658 					/* sa_ignore NO_NULL_CHK */
3659 					alt = sctp_find_alternate_net(stcb, alt, 1);
3660 				}
3661 				if (alt == NULL) {
3662 					alt = tp1->whoTo;
3663 				}
3664 				/*
3665 				 * CUCv2: If a different dest is picked for
3666 				 * the retransmission, then new
3667 				 * (rtx-)pseudo_cumack needs to be tracked
3668 				 * for orig dest. Let CUCv2 track new (rtx-)
3669 				 * pseudo-cumack always.
3670 				 */
3671 				if (tp1->whoTo) {
3672 					tp1->whoTo->find_pseudo_cumack = 1;
3673 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3674 				}
3675 			} else {/* CMT is OFF */
3676 
3677 #ifdef SCTP_FR_TO_ALTERNATE
3678 				/* Can we find an alternate? */
3679 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3680 #else
3681 				/*
3682 				 * default behavior is to NOT retransmit
3683 				 * FR's to an alternate. Armando Caro's
3684 				 * paper details why.
3685 				 */
3686 				alt = tp1->whoTo;
3687 #endif
3688 			}
3689 
3690 			tp1->rec.data.doing_fast_retransmit = 1;
3691 			tot_retrans++;
3692 			/* mark the sending seq for possible subsequent FR's */
3693 			/*
3694 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3695 			 * (uint32_t)tpi->rec.data.tsn);
3696 			 */
3697 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3698 				/*
3699 				 * If the queue of send is empty then its
3700 				 * the next sequence number that will be
3701 				 * assigned so we subtract one from this to
3702 				 * get the one we last sent.
3703 				 */
3704 				tp1->rec.data.fast_retran_tsn = sending_seq;
3705 			} else {
3706 				/*
3707 				 * If there are chunks on the send queue
3708 				 * (unsent data that has made it from the
3709 				 * stream queues but not out the door, we
3710 				 * take the first one (which will have the
3711 				 * lowest TSN) and subtract one to get the
3712 				 * one we last sent.
3713 				 */
3714 				struct sctp_tmit_chunk *ttt;
3715 
3716 				ttt = TAILQ_FIRST(&asoc->send_queue);
3717 				tp1->rec.data.fast_retran_tsn =
3718 				    ttt->rec.data.tsn;
3719 			}
3720 
3721 			if (tp1->do_rtt) {
3722 				/*
3723 				 * this guy had a RTO calculation pending on
3724 				 * it, cancel it
3725 				 */
3726 				if ((tp1->whoTo != NULL) &&
3727 				    (tp1->whoTo->rto_needed == 0)) {
3728 					tp1->whoTo->rto_needed = 1;
3729 				}
3730 				tp1->do_rtt = 0;
3731 			}
3732 			if (alt != tp1->whoTo) {
3733 				/* yes, there is an alternate. */
3734 				sctp_free_remote_addr(tp1->whoTo);
3735 				/* sa_ignore FREED_MEMORY */
3736 				tp1->whoTo = alt;
3737 				atomic_add_int(&alt->ref_count, 1);
3738 			}
3739 		}
3740 	}
3741 }
3742 
3743 struct sctp_tmit_chunk *
3744 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3745     struct sctp_association *asoc)
3746 {
3747 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3748 	struct timeval now;
3749 	int now_filled = 0;
3750 
3751 	if (asoc->prsctp_supported == 0) {
3752 		return (NULL);
3753 	}
3754 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3755 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3756 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3757 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3758 			/* no chance to advance, out of here */
3759 			break;
3760 		}
3761 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3762 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3763 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3764 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3765 				    asoc->advanced_peer_ack_point,
3766 				    tp1->rec.data.tsn, 0, 0);
3767 			}
3768 		}
3769 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3770 			/*
3771 			 * We can't fwd-tsn past any that are reliable aka
3772 			 * retransmitted until the asoc fails.
3773 			 */
3774 			break;
3775 		}
3776 		if (!now_filled) {
3777 			(void)SCTP_GETTIME_TIMEVAL(&now);
3778 			now_filled = 1;
3779 		}
3780 		/*
3781 		 * now we got a chunk which is marked for another
3782 		 * retransmission to a PR-stream but has run out its chances
3783 		 * already maybe OR has been marked to skip now. Can we skip
3784 		 * it if its a resend?
3785 		 */
3786 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3787 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3788 			/*
3789 			 * Now is this one marked for resend and its time is
3790 			 * now up?
3791 			 */
3792 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3793 				/* Yes so drop it */
3794 				if (tp1->data) {
3795 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3796 					    1, SCTP_SO_NOT_LOCKED);
3797 				}
3798 			} else {
3799 				/*
3800 				 * No, we are done when hit one for resend
3801 				 * whos time as not expired.
3802 				 */
3803 				break;
3804 			}
3805 		}
3806 		/*
3807 		 * Ok now if this chunk is marked to drop it we can clean up
3808 		 * the chunk, advance our peer ack point and we can check
3809 		 * the next chunk.
3810 		 */
3811 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3812 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3813 			/* advance PeerAckPoint goes forward */
3814 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3815 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3816 				a_adv = tp1;
3817 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3818 				/* No update but we do save the chk */
3819 				a_adv = tp1;
3820 			}
3821 		} else {
3822 			/*
3823 			 * If it is still in RESEND we can advance no
3824 			 * further
3825 			 */
3826 			break;
3827 		}
3828 	}
3829 	return (a_adv);
3830 }
3831 
3832 static int
3833 sctp_fs_audit(struct sctp_association *asoc)
3834 {
3835 	struct sctp_tmit_chunk *chk;
3836 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3837 	int ret;
3838 #ifndef INVARIANTS
3839 	int entry_flight, entry_cnt;
3840 #endif
3841 
3842 	ret = 0;
3843 #ifndef INVARIANTS
3844 	entry_flight = asoc->total_flight;
3845 	entry_cnt = asoc->total_flight_count;
3846 #endif
3847 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3848 		return (0);
3849 
3850 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3851 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3852 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3853 			    chk->rec.data.tsn,
3854 			    chk->send_size,
3855 			    chk->snd_count);
3856 			inflight++;
3857 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3858 			resend++;
3859 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3860 			inbetween++;
3861 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3862 			above++;
3863 		} else {
3864 			acked++;
3865 		}
3866 	}
3867 
3868 	if ((inflight > 0) || (inbetween > 0)) {
3869 #ifdef INVARIANTS
3870 		panic("Flight size-express incorrect? \n");
3871 #else
3872 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3873 		    entry_flight, entry_cnt);
3874 
3875 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3876 		    inflight, inbetween, resend, above, acked);
3877 		ret = 1;
3878 #endif
3879 	}
3880 	return (ret);
3881 }
3882 
3883 
3884 static void
3885 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3886     struct sctp_association *asoc,
3887     struct sctp_tmit_chunk *tp1)
3888 {
3889 	tp1->window_probe = 0;
3890 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3891 		/* TSN's skipped we do NOT move back. */
3892 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3893 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3894 		    tp1->book_size,
3895 		    (uint32_t)(uintptr_t)tp1->whoTo,
3896 		    tp1->rec.data.tsn);
3897 		return;
3898 	}
3899 	/* First setup this by shrinking flight */
3900 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3901 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3902 		    tp1);
3903 	}
3904 	sctp_flight_size_decrease(tp1);
3905 	sctp_total_flight_decrease(stcb, tp1);
3906 	/* Now mark for resend */
3907 	tp1->sent = SCTP_DATAGRAM_RESEND;
3908 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3909 
3910 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3911 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3912 		    tp1->whoTo->flight_size,
3913 		    tp1->book_size,
3914 		    (uint32_t)(uintptr_t)tp1->whoTo,
3915 		    tp1->rec.data.tsn);
3916 	}
3917 }
3918 
3919 void
3920 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3921     uint32_t rwnd, int *abort_now, int ecne_seen)
3922 {
3923 	struct sctp_nets *net;
3924 	struct sctp_association *asoc;
3925 	struct sctp_tmit_chunk *tp1, *tp2;
3926 	uint32_t old_rwnd;
3927 	int win_probe_recovery = 0;
3928 	int win_probe_recovered = 0;
3929 	int j, done_once = 0;
3930 	int rto_ok = 1;
3931 	uint32_t send_s;
3932 
3933 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3934 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3935 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3936 	}
3937 	SCTP_TCB_LOCK_ASSERT(stcb);
3938 #ifdef SCTP_ASOCLOG_OF_TSNS
3939 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3940 	stcb->asoc.cumack_log_at++;
3941 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3942 		stcb->asoc.cumack_log_at = 0;
3943 	}
3944 #endif
3945 	asoc = &stcb->asoc;
3946 	old_rwnd = asoc->peers_rwnd;
3947 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3948 		/* old ack */
3949 		return;
3950 	} else if (asoc->last_acked_seq == cumack) {
3951 		/* Window update sack */
3952 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3953 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3954 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3955 			/* SWS sender side engages */
3956 			asoc->peers_rwnd = 0;
3957 		}
3958 		if (asoc->peers_rwnd > old_rwnd) {
3959 			goto again;
3960 		}
3961 		return;
3962 	}
3963 	/* First setup for CC stuff */
3964 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3965 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3966 			/* Drag along the window_tsn for cwr's */
3967 			net->cwr_window_tsn = cumack;
3968 		}
3969 		net->prev_cwnd = net->cwnd;
3970 		net->net_ack = 0;
3971 		net->net_ack2 = 0;
3972 
3973 		/*
3974 		 * CMT: Reset CUC and Fast recovery algo variables before
3975 		 * SACK processing
3976 		 */
3977 		net->new_pseudo_cumack = 0;
3978 		net->will_exit_fast_recovery = 0;
3979 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3980 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3981 		}
3982 	}
3983 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3984 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3985 		    sctpchunk_listhead);
3986 		send_s = tp1->rec.data.tsn + 1;
3987 	} else {
3988 		send_s = asoc->sending_seq;
3989 	}
3990 	if (SCTP_TSN_GE(cumack, send_s)) {
3991 		struct mbuf *op_err;
3992 		char msg[SCTP_DIAG_INFO_LEN];
3993 
3994 		*abort_now = 1;
3995 		/* XXX */
3996 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3997 		    cumack, send_s);
3998 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3999 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4000 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4001 		return;
4002 	}
4003 	asoc->this_sack_highest_gap = cumack;
4004 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4005 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4006 		    stcb->asoc.overall_error_count,
4007 		    0,
4008 		    SCTP_FROM_SCTP_INDATA,
4009 		    __LINE__);
4010 	}
4011 	stcb->asoc.overall_error_count = 0;
4012 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4013 		/* process the new consecutive TSN first */
4014 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4015 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4016 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4017 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4018 				}
4019 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4020 					/*
4021 					 * If it is less than ACKED, it is
4022 					 * now no-longer in flight. Higher
4023 					 * values may occur during marking
4024 					 */
4025 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4026 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4027 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4028 							    tp1->whoTo->flight_size,
4029 							    tp1->book_size,
4030 							    (uint32_t)(uintptr_t)tp1->whoTo,
4031 							    tp1->rec.data.tsn);
4032 						}
4033 						sctp_flight_size_decrease(tp1);
4034 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4035 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4036 							    tp1);
4037 						}
4038 						/* sa_ignore NO_NULL_CHK */
4039 						sctp_total_flight_decrease(stcb, tp1);
4040 					}
4041 					tp1->whoTo->net_ack += tp1->send_size;
4042 					if (tp1->snd_count < 2) {
4043 						/*
4044 						 * True non-retransmited
4045 						 * chunk
4046 						 */
4047 						tp1->whoTo->net_ack2 +=
4048 						    tp1->send_size;
4049 
4050 						/* update RTO too? */
4051 						if (tp1->do_rtt) {
4052 							if (rto_ok) {
4053 								tp1->whoTo->RTO =
4054 								/*
4055 								 * sa_ignore
4056 								 * NO_NULL_CHK
4057 								 */
4058 								    sctp_calculate_rto(stcb,
4059 								    asoc, tp1->whoTo,
4060 								    &tp1->sent_rcv_time,
4061 								    SCTP_RTT_FROM_DATA);
4062 								rto_ok = 0;
4063 							}
4064 							if (tp1->whoTo->rto_needed == 0) {
4065 								tp1->whoTo->rto_needed = 1;
4066 							}
4067 							tp1->do_rtt = 0;
4068 						}
4069 					}
4070 					/*
4071 					 * CMT: CUCv2 algorithm. From the
4072 					 * cumack'd TSNs, for each TSN being
4073 					 * acked for the first time, set the
4074 					 * following variables for the
4075 					 * corresp destination.
4076 					 * new_pseudo_cumack will trigger a
4077 					 * cwnd update.
4078 					 * find_(rtx_)pseudo_cumack will
4079 					 * trigger search for the next
4080 					 * expected (rtx-)pseudo-cumack.
4081 					 */
4082 					tp1->whoTo->new_pseudo_cumack = 1;
4083 					tp1->whoTo->find_pseudo_cumack = 1;
4084 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4085 
4086 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4087 						/* sa_ignore NO_NULL_CHK */
4088 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4089 					}
4090 				}
4091 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4092 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4093 				}
4094 				if (tp1->rec.data.chunk_was_revoked) {
4095 					/* deflate the cwnd */
4096 					tp1->whoTo->cwnd -= tp1->book_size;
4097 					tp1->rec.data.chunk_was_revoked = 0;
4098 				}
4099 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4100 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4101 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4102 #ifdef INVARIANTS
4103 					} else {
4104 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4105 #endif
4106 					}
4107 				}
4108 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4109 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4110 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4111 					asoc->trigger_reset = 1;
4112 				}
4113 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4114 				if (tp1->data) {
4115 					/* sa_ignore NO_NULL_CHK */
4116 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4117 					sctp_m_freem(tp1->data);
4118 					tp1->data = NULL;
4119 				}
4120 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4121 					sctp_log_sack(asoc->last_acked_seq,
4122 					    cumack,
4123 					    tp1->rec.data.tsn,
4124 					    0,
4125 					    0,
4126 					    SCTP_LOG_FREE_SENT);
4127 				}
4128 				asoc->sent_queue_cnt--;
4129 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4130 			} else {
4131 				break;
4132 			}
4133 		}
4134 
4135 	}
4136 	/* sa_ignore NO_NULL_CHK */
4137 	if (stcb->sctp_socket) {
4138 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4139 		struct socket *so;
4140 
4141 #endif
4142 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4143 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4144 			/* sa_ignore NO_NULL_CHK */
4145 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4146 		}
4147 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4148 		so = SCTP_INP_SO(stcb->sctp_ep);
4149 		atomic_add_int(&stcb->asoc.refcnt, 1);
4150 		SCTP_TCB_UNLOCK(stcb);
4151 		SCTP_SOCKET_LOCK(so, 1);
4152 		SCTP_TCB_LOCK(stcb);
4153 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4154 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4155 			/* assoc was freed while we were unlocked */
4156 			SCTP_SOCKET_UNLOCK(so, 1);
4157 			return;
4158 		}
4159 #endif
4160 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4161 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4162 		SCTP_SOCKET_UNLOCK(so, 1);
4163 #endif
4164 	} else {
4165 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4166 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4167 		}
4168 	}
4169 
4170 	/* JRS - Use the congestion control given in the CC module */
4171 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4172 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4173 			if (net->net_ack2 > 0) {
4174 				/*
4175 				 * Karn's rule applies to clearing error
4176 				 * count, this is optional.
4177 				 */
4178 				net->error_count = 0;
4179 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4180 					/* addr came good */
4181 					net->dest_state |= SCTP_ADDR_REACHABLE;
4182 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4183 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4184 				}
4185 				if (net == stcb->asoc.primary_destination) {
4186 					if (stcb->asoc.alternate) {
4187 						/*
4188 						 * release the alternate,
4189 						 * primary is good
4190 						 */
4191 						sctp_free_remote_addr(stcb->asoc.alternate);
4192 						stcb->asoc.alternate = NULL;
4193 					}
4194 				}
4195 				if (net->dest_state & SCTP_ADDR_PF) {
4196 					net->dest_state &= ~SCTP_ADDR_PF;
4197 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4198 					    stcb->sctp_ep, stcb, net,
4199 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4200 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4201 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4202 					/* Done with this net */
4203 					net->net_ack = 0;
4204 				}
4205 				/* restore any doubled timers */
4206 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4207 				if (net->RTO < stcb->asoc.minrto) {
4208 					net->RTO = stcb->asoc.minrto;
4209 				}
4210 				if (net->RTO > stcb->asoc.maxrto) {
4211 					net->RTO = stcb->asoc.maxrto;
4212 				}
4213 			}
4214 		}
4215 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4216 	}
4217 	asoc->last_acked_seq = cumack;
4218 
4219 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4220 		/* nothing left in-flight */
4221 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4222 			net->flight_size = 0;
4223 			net->partial_bytes_acked = 0;
4224 		}
4225 		asoc->total_flight = 0;
4226 		asoc->total_flight_count = 0;
4227 	}
4228 	/* RWND update */
4229 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4230 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4231 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4232 		/* SWS sender side engages */
4233 		asoc->peers_rwnd = 0;
4234 	}
4235 	if (asoc->peers_rwnd > old_rwnd) {
4236 		win_probe_recovery = 1;
4237 	}
4238 	/* Now assure a timer where data is queued at */
4239 again:
4240 	j = 0;
4241 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4242 		if (win_probe_recovery && (net->window_probe)) {
4243 			win_probe_recovered = 1;
4244 			/*
4245 			 * Find first chunk that was used with window probe
4246 			 * and clear the sent
4247 			 */
4248 			/* sa_ignore FREED_MEMORY */
4249 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4250 				if (tp1->window_probe) {
4251 					/* move back to data send queue */
4252 					sctp_window_probe_recovery(stcb, asoc, tp1);
4253 					break;
4254 				}
4255 			}
4256 		}
4257 		if (net->flight_size) {
4258 			j++;
4259 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4260 			if (net->window_probe) {
4261 				net->window_probe = 0;
4262 			}
4263 		} else {
4264 			if (net->window_probe) {
4265 				/*
4266 				 * In window probes we must assure a timer
4267 				 * is still running there
4268 				 */
4269 				net->window_probe = 0;
4270 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4271 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4272 				}
4273 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4274 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4275 				    stcb, net,
4276 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4277 			}
4278 		}
4279 	}
4280 	if ((j == 0) &&
4281 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4282 	    (asoc->sent_queue_retran_cnt == 0) &&
4283 	    (win_probe_recovered == 0) &&
4284 	    (done_once == 0)) {
4285 		/*
4286 		 * huh, this should not happen unless all packets are
4287 		 * PR-SCTP and marked to skip of course.
4288 		 */
4289 		if (sctp_fs_audit(asoc)) {
4290 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4291 				net->flight_size = 0;
4292 			}
4293 			asoc->total_flight = 0;
4294 			asoc->total_flight_count = 0;
4295 			asoc->sent_queue_retran_cnt = 0;
4296 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4297 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4298 					sctp_flight_size_increase(tp1);
4299 					sctp_total_flight_increase(stcb, tp1);
4300 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4301 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4302 				}
4303 			}
4304 		}
4305 		done_once = 1;
4306 		goto again;
4307 	}
4308 	/**********************************/
4309 	/* Now what about shutdown issues */
4310 	/**********************************/
4311 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4312 		/* nothing left on sendqueue.. consider done */
4313 		/* clean up */
4314 		if ((asoc->stream_queue_cnt == 1) &&
4315 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4316 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4317 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4318 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4319 		}
4320 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4321 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4322 		    (asoc->stream_queue_cnt == 1) &&
4323 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4324 			struct mbuf *op_err;
4325 
4326 			*abort_now = 1;
4327 			/* XXX */
4328 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4329 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4330 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4331 			return;
4332 		}
4333 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4334 		    (asoc->stream_queue_cnt == 0)) {
4335 			struct sctp_nets *netp;
4336 
4337 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4338 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4339 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4340 			}
4341 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4342 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4343 			sctp_stop_timers_for_shutdown(stcb);
4344 			if (asoc->alternate) {
4345 				netp = asoc->alternate;
4346 			} else {
4347 				netp = asoc->primary_destination;
4348 			}
4349 			sctp_send_shutdown(stcb, netp);
4350 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4351 			    stcb->sctp_ep, stcb, netp);
4352 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4353 			    stcb->sctp_ep, stcb, netp);
4354 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4355 		    (asoc->stream_queue_cnt == 0)) {
4356 			struct sctp_nets *netp;
4357 
4358 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4359 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4360 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4361 			sctp_stop_timers_for_shutdown(stcb);
4362 			if (asoc->alternate) {
4363 				netp = asoc->alternate;
4364 			} else {
4365 				netp = asoc->primary_destination;
4366 			}
4367 			sctp_send_shutdown_ack(stcb, netp);
4368 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4369 			    stcb->sctp_ep, stcb, netp);
4370 		}
4371 	}
4372 	/*********************************************/
4373 	/* Here we perform PR-SCTP procedures        */
4374 	/* (section 4.2)                             */
4375 	/*********************************************/
4376 	/* C1. update advancedPeerAckPoint */
4377 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4378 		asoc->advanced_peer_ack_point = cumack;
4379 	}
4380 	/* PR-Sctp issues need to be addressed too */
4381 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4382 		struct sctp_tmit_chunk *lchk;
4383 		uint32_t old_adv_peer_ack_point;
4384 
4385 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4386 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4387 		/* C3. See if we need to send a Fwd-TSN */
4388 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4389 			/*
4390 			 * ISSUE with ECN, see FWD-TSN processing.
4391 			 */
4392 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4393 				send_forward_tsn(stcb, asoc);
4394 			} else if (lchk) {
4395 				/* try to FR fwd-tsn's that get lost too */
4396 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4397 					send_forward_tsn(stcb, asoc);
4398 				}
4399 			}
4400 		}
4401 		if (lchk) {
4402 			/* Assure a timer is up */
4403 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4404 			    stcb->sctp_ep, stcb, lchk->whoTo);
4405 		}
4406 	}
4407 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4408 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4409 		    rwnd,
4410 		    stcb->asoc.peers_rwnd,
4411 		    stcb->asoc.total_flight,
4412 		    stcb->asoc.total_output_queue_size);
4413 	}
4414 }
4415 
4416 void
4417 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4418     struct sctp_tcb *stcb,
4419     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4420     int *abort_now, uint8_t flags,
4421     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4422 {
4423 	struct sctp_association *asoc;
4424 	struct sctp_tmit_chunk *tp1, *tp2;
4425 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4426 	uint16_t wake_him = 0;
4427 	uint32_t send_s = 0;
4428 	long j;
4429 	int accum_moved = 0;
4430 	int will_exit_fast_recovery = 0;
4431 	uint32_t a_rwnd, old_rwnd;
4432 	int win_probe_recovery = 0;
4433 	int win_probe_recovered = 0;
4434 	struct sctp_nets *net = NULL;
4435 	int done_once;
4436 	int rto_ok = 1;
4437 	uint8_t reneged_all = 0;
4438 	uint8_t cmt_dac_flag;
4439 
4440 	/*
4441 	 * we take any chance we can to service our queues since we cannot
4442 	 * get awoken when the socket is read from :<
4443 	 */
4444 	/*
4445 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4446 	 * old sack, if so discard. 2) If there is nothing left in the send
4447 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4448 	 * too, update any rwnd change and verify no timers are running.
4449 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4450 	 * moved process these first and note that it moved. 4) Process any
4451 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4452 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4453 	 * sync up flightsizes and things, stop all timers and also check
4454 	 * for shutdown_pending state. If so then go ahead and send off the
4455 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4456 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4457 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4458 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4459 	 * if in shutdown_recv state.
4460 	 */
4461 	SCTP_TCB_LOCK_ASSERT(stcb);
4462 	/* CMT DAC algo */
4463 	this_sack_lowest_newack = 0;
4464 	SCTP_STAT_INCR(sctps_slowpath_sack);
4465 	last_tsn = cum_ack;
4466 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4467 #ifdef SCTP_ASOCLOG_OF_TSNS
4468 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4469 	stcb->asoc.cumack_log_at++;
4470 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4471 		stcb->asoc.cumack_log_at = 0;
4472 	}
4473 #endif
4474 	a_rwnd = rwnd;
4475 
4476 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4477 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4478 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4479 	}
4480 	old_rwnd = stcb->asoc.peers_rwnd;
4481 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4482 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4483 		    stcb->asoc.overall_error_count,
4484 		    0,
4485 		    SCTP_FROM_SCTP_INDATA,
4486 		    __LINE__);
4487 	}
4488 	stcb->asoc.overall_error_count = 0;
4489 	asoc = &stcb->asoc;
4490 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4491 		sctp_log_sack(asoc->last_acked_seq,
4492 		    cum_ack,
4493 		    0,
4494 		    num_seg,
4495 		    num_dup,
4496 		    SCTP_LOG_NEW_SACK);
4497 	}
4498 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4499 		uint16_t i;
4500 		uint32_t *dupdata, dblock;
4501 
4502 		for (i = 0; i < num_dup; i++) {
4503 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4504 			    sizeof(uint32_t), (uint8_t *)&dblock);
4505 			if (dupdata == NULL) {
4506 				break;
4507 			}
4508 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4509 		}
4510 	}
4511 	/* reality check */
4512 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4513 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4514 		    sctpchunk_listhead);
4515 		send_s = tp1->rec.data.tsn + 1;
4516 	} else {
4517 		tp1 = NULL;
4518 		send_s = asoc->sending_seq;
4519 	}
4520 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4521 		struct mbuf *op_err;
4522 		char msg[SCTP_DIAG_INFO_LEN];
4523 
4524 		/*
4525 		 * no way, we have not even sent this TSN out yet. Peer is
4526 		 * hopelessly messed up with us.
4527 		 */
4528 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4529 		    cum_ack, send_s);
4530 		if (tp1) {
4531 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4532 			    tp1->rec.data.tsn, (void *)tp1);
4533 		}
4534 hopeless_peer:
4535 		*abort_now = 1;
4536 		/* XXX */
4537 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4538 		    cum_ack, send_s);
4539 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4540 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4541 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4542 		return;
4543 	}
4544 	/**********************/
4545 	/* 1) check the range */
4546 	/**********************/
4547 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4548 		/* acking something behind */
4549 		return;
4550 	}
4551 	/* update the Rwnd of the peer */
4552 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4553 	    TAILQ_EMPTY(&asoc->send_queue) &&
4554 	    (asoc->stream_queue_cnt == 0)) {
4555 		/* nothing left on send/sent and strmq */
4556 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4557 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4558 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4559 		}
4560 		asoc->peers_rwnd = a_rwnd;
4561 		if (asoc->sent_queue_retran_cnt) {
4562 			asoc->sent_queue_retran_cnt = 0;
4563 		}
4564 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4565 			/* SWS sender side engages */
4566 			asoc->peers_rwnd = 0;
4567 		}
4568 		/* stop any timers */
4569 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4570 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4571 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4572 			net->partial_bytes_acked = 0;
4573 			net->flight_size = 0;
4574 		}
4575 		asoc->total_flight = 0;
4576 		asoc->total_flight_count = 0;
4577 		return;
4578 	}
4579 	/*
4580 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4581 	 * things. The total byte count acked is tracked in netAckSz AND
4582 	 * netAck2 is used to track the total bytes acked that are un-
4583 	 * amibguious and were never retransmitted. We track these on a per
4584 	 * destination address basis.
4585 	 */
4586 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4587 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4588 			/* Drag along the window_tsn for cwr's */
4589 			net->cwr_window_tsn = cum_ack;
4590 		}
4591 		net->prev_cwnd = net->cwnd;
4592 		net->net_ack = 0;
4593 		net->net_ack2 = 0;
4594 
4595 		/*
4596 		 * CMT: Reset CUC and Fast recovery algo variables before
4597 		 * SACK processing
4598 		 */
4599 		net->new_pseudo_cumack = 0;
4600 		net->will_exit_fast_recovery = 0;
4601 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4602 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4603 		}
4604 	}
4605 	/* process the new consecutive TSN first */
4606 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4607 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4608 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4609 				accum_moved = 1;
4610 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4611 					/*
4612 					 * If it is less than ACKED, it is
4613 					 * now no-longer in flight. Higher
4614 					 * values may occur during marking
4615 					 */
4616 					if ((tp1->whoTo->dest_state &
4617 					    SCTP_ADDR_UNCONFIRMED) &&
4618 					    (tp1->snd_count < 2)) {
4619 						/*
4620 						 * If there was no retran
4621 						 * and the address is
4622 						 * un-confirmed and we sent
4623 						 * there and are now
4624 						 * sacked.. its confirmed,
4625 						 * mark it so.
4626 						 */
4627 						tp1->whoTo->dest_state &=
4628 						    ~SCTP_ADDR_UNCONFIRMED;
4629 					}
4630 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4631 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4632 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4633 							    tp1->whoTo->flight_size,
4634 							    tp1->book_size,
4635 							    (uint32_t)(uintptr_t)tp1->whoTo,
4636 							    tp1->rec.data.tsn);
4637 						}
4638 						sctp_flight_size_decrease(tp1);
4639 						sctp_total_flight_decrease(stcb, tp1);
4640 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4641 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4642 							    tp1);
4643 						}
4644 					}
4645 					tp1->whoTo->net_ack += tp1->send_size;
4646 
4647 					/* CMT SFR and DAC algos */
4648 					this_sack_lowest_newack = tp1->rec.data.tsn;
4649 					tp1->whoTo->saw_newack = 1;
4650 
4651 					if (tp1->snd_count < 2) {
4652 						/*
4653 						 * True non-retransmited
4654 						 * chunk
4655 						 */
4656 						tp1->whoTo->net_ack2 +=
4657 						    tp1->send_size;
4658 
4659 						/* update RTO too? */
4660 						if (tp1->do_rtt) {
4661 							if (rto_ok) {
4662 								tp1->whoTo->RTO =
4663 								    sctp_calculate_rto(stcb,
4664 								    asoc, tp1->whoTo,
4665 								    &tp1->sent_rcv_time,
4666 								    SCTP_RTT_FROM_DATA);
4667 								rto_ok = 0;
4668 							}
4669 							if (tp1->whoTo->rto_needed == 0) {
4670 								tp1->whoTo->rto_needed = 1;
4671 							}
4672 							tp1->do_rtt = 0;
4673 						}
4674 					}
4675 					/*
4676 					 * CMT: CUCv2 algorithm. From the
4677 					 * cumack'd TSNs, for each TSN being
4678 					 * acked for the first time, set the
4679 					 * following variables for the
4680 					 * corresp destination.
4681 					 * new_pseudo_cumack will trigger a
4682 					 * cwnd update.
4683 					 * find_(rtx_)pseudo_cumack will
4684 					 * trigger search for the next
4685 					 * expected (rtx-)pseudo-cumack.
4686 					 */
4687 					tp1->whoTo->new_pseudo_cumack = 1;
4688 					tp1->whoTo->find_pseudo_cumack = 1;
4689 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4690 
4691 
4692 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4693 						sctp_log_sack(asoc->last_acked_seq,
4694 						    cum_ack,
4695 						    tp1->rec.data.tsn,
4696 						    0,
4697 						    0,
4698 						    SCTP_LOG_TSN_ACKED);
4699 					}
4700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4701 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4702 					}
4703 				}
4704 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4705 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4706 #ifdef SCTP_AUDITING_ENABLED
4707 					sctp_audit_log(0xB3,
4708 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4709 #endif
4710 				}
4711 				if (tp1->rec.data.chunk_was_revoked) {
4712 					/* deflate the cwnd */
4713 					tp1->whoTo->cwnd -= tp1->book_size;
4714 					tp1->rec.data.chunk_was_revoked = 0;
4715 				}
4716 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4717 					tp1->sent = SCTP_DATAGRAM_ACKED;
4718 				}
4719 			}
4720 		} else {
4721 			break;
4722 		}
4723 	}
4724 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4725 	/* always set this up to cum-ack */
4726 	asoc->this_sack_highest_gap = last_tsn;
4727 
4728 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4729 
4730 		/*
4731 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4732 		 * to be greater than the cumack. Also reset saw_newack to 0
4733 		 * for all dests.
4734 		 */
4735 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4736 			net->saw_newack = 0;
4737 			net->this_sack_highest_newack = last_tsn;
4738 		}
4739 
4740 		/*
4741 		 * thisSackHighestGap will increase while handling NEW
4742 		 * segments this_sack_highest_newack will increase while
4743 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4744 		 * used for CMT DAC algo. saw_newack will also change.
4745 		 */
4746 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4747 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4748 		    num_seg, num_nr_seg, &rto_ok)) {
4749 			wake_him++;
4750 		}
4751 		/*
4752 		 * validate the biggest_tsn_acked in the gap acks if strict
4753 		 * adherence is wanted.
4754 		 */
4755 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4756 			/*
4757 			 * peer is either confused or we are under attack.
4758 			 * We must abort.
4759 			 */
4760 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4761 			    biggest_tsn_acked, send_s);
4762 			goto hopeless_peer;
4763 		}
4764 	}
4765 	/*******************************************/
4766 	/* cancel ALL T3-send timer if accum moved */
4767 	/*******************************************/
4768 	if (asoc->sctp_cmt_on_off > 0) {
4769 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4770 			if (net->new_pseudo_cumack)
4771 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4772 				    stcb, net,
4773 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4774 
4775 		}
4776 	} else {
4777 		if (accum_moved) {
4778 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4779 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4780 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4781 			}
4782 		}
4783 	}
4784 	/********************************************/
4785 	/* drop the acked chunks from the sentqueue */
4786 	/********************************************/
4787 	asoc->last_acked_seq = cum_ack;
4788 
4789 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4790 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4791 			break;
4792 		}
4793 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4794 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4795 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4796 #ifdef INVARIANTS
4797 			} else {
4798 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4799 #endif
4800 			}
4801 		}
4802 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4803 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4804 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4805 			asoc->trigger_reset = 1;
4806 		}
4807 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4808 		if (PR_SCTP_ENABLED(tp1->flags)) {
4809 			if (asoc->pr_sctp_cnt != 0)
4810 				asoc->pr_sctp_cnt--;
4811 		}
4812 		asoc->sent_queue_cnt--;
4813 		if (tp1->data) {
4814 			/* sa_ignore NO_NULL_CHK */
4815 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4816 			sctp_m_freem(tp1->data);
4817 			tp1->data = NULL;
4818 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4819 				asoc->sent_queue_cnt_removeable--;
4820 			}
4821 		}
4822 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4823 			sctp_log_sack(asoc->last_acked_seq,
4824 			    cum_ack,
4825 			    tp1->rec.data.tsn,
4826 			    0,
4827 			    0,
4828 			    SCTP_LOG_FREE_SENT);
4829 		}
4830 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4831 		wake_him++;
4832 	}
4833 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4834 #ifdef INVARIANTS
4835 		panic("Warning flight size is positive and should be 0");
4836 #else
4837 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4838 		    asoc->total_flight);
4839 #endif
4840 		asoc->total_flight = 0;
4841 	}
4842 	/* sa_ignore NO_NULL_CHK */
4843 	if ((wake_him) && (stcb->sctp_socket)) {
4844 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4845 		struct socket *so;
4846 
4847 #endif
4848 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4849 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4850 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4851 		}
4852 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4853 		so = SCTP_INP_SO(stcb->sctp_ep);
4854 		atomic_add_int(&stcb->asoc.refcnt, 1);
4855 		SCTP_TCB_UNLOCK(stcb);
4856 		SCTP_SOCKET_LOCK(so, 1);
4857 		SCTP_TCB_LOCK(stcb);
4858 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4859 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4860 			/* assoc was freed while we were unlocked */
4861 			SCTP_SOCKET_UNLOCK(so, 1);
4862 			return;
4863 		}
4864 #endif
4865 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4867 		SCTP_SOCKET_UNLOCK(so, 1);
4868 #endif
4869 	} else {
4870 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4871 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4872 		}
4873 	}
4874 
4875 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4876 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4877 			/* Setup so we will exit RFC2582 fast recovery */
4878 			will_exit_fast_recovery = 1;
4879 		}
4880 	}
4881 	/*
4882 	 * Check for revoked fragments:
4883 	 *
4884 	 * if Previous sack - Had no frags then we can't have any revoked if
4885 	 * Previous sack - Had frag's then - If we now have frags aka
4886 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4887 	 * some of them. else - The peer revoked all ACKED fragments, since
4888 	 * we had some before and now we have NONE.
4889 	 */
4890 
4891 	if (num_seg) {
4892 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4893 		asoc->saw_sack_with_frags = 1;
4894 	} else if (asoc->saw_sack_with_frags) {
4895 		int cnt_revoked = 0;
4896 
4897 		/* Peer revoked all dg's marked or acked */
4898 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4899 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4900 				tp1->sent = SCTP_DATAGRAM_SENT;
4901 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4902 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4903 					    tp1->whoTo->flight_size,
4904 					    tp1->book_size,
4905 					    (uint32_t)(uintptr_t)tp1->whoTo,
4906 					    tp1->rec.data.tsn);
4907 				}
4908 				sctp_flight_size_increase(tp1);
4909 				sctp_total_flight_increase(stcb, tp1);
4910 				tp1->rec.data.chunk_was_revoked = 1;
4911 				/*
4912 				 * To ensure that this increase in
4913 				 * flightsize, which is artificial, does not
4914 				 * throttle the sender, we also increase the
4915 				 * cwnd artificially.
4916 				 */
4917 				tp1->whoTo->cwnd += tp1->book_size;
4918 				cnt_revoked++;
4919 			}
4920 		}
4921 		if (cnt_revoked) {
4922 			reneged_all = 1;
4923 		}
4924 		asoc->saw_sack_with_frags = 0;
4925 	}
4926 	if (num_nr_seg > 0)
4927 		asoc->saw_sack_with_nr_frags = 1;
4928 	else
4929 		asoc->saw_sack_with_nr_frags = 0;
4930 
4931 	/* JRS - Use the congestion control given in the CC module */
4932 	if (ecne_seen == 0) {
4933 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4934 			if (net->net_ack2 > 0) {
4935 				/*
4936 				 * Karn's rule applies to clearing error
4937 				 * count, this is optional.
4938 				 */
4939 				net->error_count = 0;
4940 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4941 					/* addr came good */
4942 					net->dest_state |= SCTP_ADDR_REACHABLE;
4943 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4944 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4945 				}
4946 				if (net == stcb->asoc.primary_destination) {
4947 					if (stcb->asoc.alternate) {
4948 						/*
4949 						 * release the alternate,
4950 						 * primary is good
4951 						 */
4952 						sctp_free_remote_addr(stcb->asoc.alternate);
4953 						stcb->asoc.alternate = NULL;
4954 					}
4955 				}
4956 				if (net->dest_state & SCTP_ADDR_PF) {
4957 					net->dest_state &= ~SCTP_ADDR_PF;
4958 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4959 					    stcb->sctp_ep, stcb, net,
4960 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4961 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4962 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4963 					/* Done with this net */
4964 					net->net_ack = 0;
4965 				}
4966 				/* restore any doubled timers */
4967 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4968 				if (net->RTO < stcb->asoc.minrto) {
4969 					net->RTO = stcb->asoc.minrto;
4970 				}
4971 				if (net->RTO > stcb->asoc.maxrto) {
4972 					net->RTO = stcb->asoc.maxrto;
4973 				}
4974 			}
4975 		}
4976 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4977 	}
4978 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4979 		/* nothing left in-flight */
4980 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4981 			/* stop all timers */
4982 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4983 			    stcb, net,
4984 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4985 			net->flight_size = 0;
4986 			net->partial_bytes_acked = 0;
4987 		}
4988 		asoc->total_flight = 0;
4989 		asoc->total_flight_count = 0;
4990 	}
4991 	/**********************************/
4992 	/* Now what about shutdown issues */
4993 	/**********************************/
4994 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4995 		/* nothing left on sendqueue.. consider done */
4996 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4997 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4998 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4999 		}
5000 		asoc->peers_rwnd = a_rwnd;
5001 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 			/* SWS sender side engages */
5003 			asoc->peers_rwnd = 0;
5004 		}
5005 		/* clean up */
5006 		if ((asoc->stream_queue_cnt == 1) &&
5007 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5008 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5009 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5010 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5011 		}
5012 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5013 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5014 		    (asoc->stream_queue_cnt == 1) &&
5015 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5016 			struct mbuf *op_err;
5017 
5018 			*abort_now = 1;
5019 			/* XXX */
5020 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5021 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5022 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5023 			return;
5024 		}
5025 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5026 		    (asoc->stream_queue_cnt == 0)) {
5027 			struct sctp_nets *netp;
5028 
5029 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5030 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5031 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5032 			}
5033 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5034 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5035 			sctp_stop_timers_for_shutdown(stcb);
5036 			if (asoc->alternate) {
5037 				netp = asoc->alternate;
5038 			} else {
5039 				netp = asoc->primary_destination;
5040 			}
5041 			sctp_send_shutdown(stcb, netp);
5042 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5043 			    stcb->sctp_ep, stcb, netp);
5044 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5045 			    stcb->sctp_ep, stcb, netp);
5046 			return;
5047 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5048 		    (asoc->stream_queue_cnt == 0)) {
5049 			struct sctp_nets *netp;
5050 
5051 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5052 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5053 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5054 			sctp_stop_timers_for_shutdown(stcb);
5055 			if (asoc->alternate) {
5056 				netp = asoc->alternate;
5057 			} else {
5058 				netp = asoc->primary_destination;
5059 			}
5060 			sctp_send_shutdown_ack(stcb, netp);
5061 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5062 			    stcb->sctp_ep, stcb, netp);
5063 			return;
5064 		}
5065 	}
5066 	/*
5067 	 * Now here we are going to recycle net_ack for a different use...
5068 	 * HEADS UP.
5069 	 */
5070 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5071 		net->net_ack = 0;
5072 	}
5073 
5074 	/*
5075 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5076 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5077 	 * automatically ensure that.
5078 	 */
5079 	if ((asoc->sctp_cmt_on_off > 0) &&
5080 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5081 	    (cmt_dac_flag == 0)) {
5082 		this_sack_lowest_newack = cum_ack;
5083 	}
5084 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5085 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5086 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5087 	}
5088 	/* JRS - Use the congestion control given in the CC module */
5089 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5090 
5091 	/* Now are we exiting loss recovery ? */
5092 	if (will_exit_fast_recovery) {
5093 		/* Ok, we must exit fast recovery */
5094 		asoc->fast_retran_loss_recovery = 0;
5095 	}
5096 	if ((asoc->sat_t3_loss_recovery) &&
5097 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5098 		/* end satellite t3 loss recovery */
5099 		asoc->sat_t3_loss_recovery = 0;
5100 	}
5101 	/*
5102 	 * CMT Fast recovery
5103 	 */
5104 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5105 		if (net->will_exit_fast_recovery) {
5106 			/* Ok, we must exit fast recovery */
5107 			net->fast_retran_loss_recovery = 0;
5108 		}
5109 	}
5110 
5111 	/* Adjust and set the new rwnd value */
5112 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5113 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5114 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5115 	}
5116 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5117 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5118 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5119 		/* SWS sender side engages */
5120 		asoc->peers_rwnd = 0;
5121 	}
5122 	if (asoc->peers_rwnd > old_rwnd) {
5123 		win_probe_recovery = 1;
5124 	}
5125 	/*
5126 	 * Now we must setup so we have a timer up for anyone with
5127 	 * outstanding data.
5128 	 */
5129 	done_once = 0;
5130 again:
5131 	j = 0;
5132 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5133 		if (win_probe_recovery && (net->window_probe)) {
5134 			win_probe_recovered = 1;
5135 			/*-
5136 			 * Find first chunk that was used with
5137 			 * window probe and clear the event. Put
5138 			 * it back into the send queue as if has
5139 			 * not been sent.
5140 			 */
5141 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5142 				if (tp1->window_probe) {
5143 					sctp_window_probe_recovery(stcb, asoc, tp1);
5144 					break;
5145 				}
5146 			}
5147 		}
5148 		if (net->flight_size) {
5149 			j++;
5150 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5151 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5152 				    stcb->sctp_ep, stcb, net);
5153 			}
5154 			if (net->window_probe) {
5155 				net->window_probe = 0;
5156 			}
5157 		} else {
5158 			if (net->window_probe) {
5159 				/*
5160 				 * In window probes we must assure a timer
5161 				 * is still running there
5162 				 */
5163 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5164 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5165 					    stcb->sctp_ep, stcb, net);
5166 
5167 				}
5168 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5169 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5170 				    stcb, net,
5171 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5172 			}
5173 		}
5174 	}
5175 	if ((j == 0) &&
5176 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5177 	    (asoc->sent_queue_retran_cnt == 0) &&
5178 	    (win_probe_recovered == 0) &&
5179 	    (done_once == 0)) {
5180 		/*
5181 		 * huh, this should not happen unless all packets are
5182 		 * PR-SCTP and marked to skip of course.
5183 		 */
5184 		if (sctp_fs_audit(asoc)) {
5185 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5186 				net->flight_size = 0;
5187 			}
5188 			asoc->total_flight = 0;
5189 			asoc->total_flight_count = 0;
5190 			asoc->sent_queue_retran_cnt = 0;
5191 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5192 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5193 					sctp_flight_size_increase(tp1);
5194 					sctp_total_flight_increase(stcb, tp1);
5195 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5196 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5197 				}
5198 			}
5199 		}
5200 		done_once = 1;
5201 		goto again;
5202 	}
5203 	/*********************************************/
5204 	/* Here we perform PR-SCTP procedures        */
5205 	/* (section 4.2)                             */
5206 	/*********************************************/
5207 	/* C1. update advancedPeerAckPoint */
5208 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5209 		asoc->advanced_peer_ack_point = cum_ack;
5210 	}
5211 	/* C2. try to further move advancedPeerAckPoint ahead */
5212 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5213 		struct sctp_tmit_chunk *lchk;
5214 		uint32_t old_adv_peer_ack_point;
5215 
5216 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5217 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5218 		/* C3. See if we need to send a Fwd-TSN */
5219 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5220 			/*
5221 			 * ISSUE with ECN, see FWD-TSN processing.
5222 			 */
5223 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5224 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5225 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5226 				    old_adv_peer_ack_point);
5227 			}
5228 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5229 				send_forward_tsn(stcb, asoc);
5230 			} else if (lchk) {
5231 				/* try to FR fwd-tsn's that get lost too */
5232 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5233 					send_forward_tsn(stcb, asoc);
5234 				}
5235 			}
5236 		}
5237 		if (lchk) {
5238 			/* Assure a timer is up */
5239 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5240 			    stcb->sctp_ep, stcb, lchk->whoTo);
5241 		}
5242 	}
5243 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5244 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5245 		    a_rwnd,
5246 		    stcb->asoc.peers_rwnd,
5247 		    stcb->asoc.total_flight,
5248 		    stcb->asoc.total_output_queue_size);
5249 	}
5250 }
5251 
5252 void
5253 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5254 {
5255 	/* Copy cum-ack */
5256 	uint32_t cum_ack, a_rwnd;
5257 
5258 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5259 	/* Arrange so a_rwnd does NOT change */
5260 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5261 
5262 	/* Now call the express sack handling */
5263 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5264 }
5265 
5266 static void
5267 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5268     struct sctp_stream_in *strmin)
5269 {
5270 	struct sctp_queued_to_read *control, *ncontrol;
5271 	struct sctp_association *asoc;
5272 	uint32_t mid;
5273 	int need_reasm_check = 0;
5274 
5275 	asoc = &stcb->asoc;
5276 	mid = strmin->last_mid_delivered;
5277 	/*
5278 	 * First deliver anything prior to and including the stream no that
5279 	 * came in.
5280 	 */
5281 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5282 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5283 			/* this is deliverable now */
5284 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5285 				if (control->on_strm_q) {
5286 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5287 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5288 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5289 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5290 #ifdef INVARIANTS
5291 					} else {
5292 						panic("strmin: %p ctl: %p unknown %d",
5293 						    strmin, control, control->on_strm_q);
5294 #endif
5295 					}
5296 					control->on_strm_q = 0;
5297 				}
5298 				/* subtract pending on streams */
5299 				if (asoc->size_on_all_streams >= control->length) {
5300 					asoc->size_on_all_streams -= control->length;
5301 				} else {
5302 #ifdef INVARIANTS
5303 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5304 #else
5305 					asoc->size_on_all_streams = 0;
5306 #endif
5307 				}
5308 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5309 				/* deliver it to at least the delivery-q */
5310 				if (stcb->sctp_socket) {
5311 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5312 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5313 					    control,
5314 					    &stcb->sctp_socket->so_rcv,
5315 					    1, SCTP_READ_LOCK_HELD,
5316 					    SCTP_SO_NOT_LOCKED);
5317 				}
5318 			} else {
5319 				/* Its a fragmented message */
5320 				if (control->first_frag_seen) {
5321 					/*
5322 					 * Make it so this is next to
5323 					 * deliver, we restore later
5324 					 */
5325 					strmin->last_mid_delivered = control->mid - 1;
5326 					need_reasm_check = 1;
5327 					break;
5328 				}
5329 			}
5330 		} else {
5331 			/* no more delivery now. */
5332 			break;
5333 		}
5334 	}
5335 	if (need_reasm_check) {
5336 		int ret;
5337 
5338 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5339 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5340 			/* Restore the next to deliver unless we are ahead */
5341 			strmin->last_mid_delivered = mid;
5342 		}
5343 		if (ret == 0) {
5344 			/* Left the front Partial one on */
5345 			return;
5346 		}
5347 		need_reasm_check = 0;
5348 	}
5349 	/*
5350 	 * now we must deliver things in queue the normal way  if any are
5351 	 * now ready.
5352 	 */
5353 	mid = strmin->last_mid_delivered + 1;
5354 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5355 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5356 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5357 				/* this is deliverable now */
5358 				if (control->on_strm_q) {
5359 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5360 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5361 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5362 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5363 #ifdef INVARIANTS
5364 					} else {
5365 						panic("strmin: %p ctl: %p unknown %d",
5366 						    strmin, control, control->on_strm_q);
5367 #endif
5368 					}
5369 					control->on_strm_q = 0;
5370 				}
5371 				/* subtract pending on streams */
5372 				if (asoc->size_on_all_streams >= control->length) {
5373 					asoc->size_on_all_streams -= control->length;
5374 				} else {
5375 #ifdef INVARIANTS
5376 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5377 #else
5378 					asoc->size_on_all_streams = 0;
5379 #endif
5380 				}
5381 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5382 				/* deliver it to at least the delivery-q */
5383 				strmin->last_mid_delivered = control->mid;
5384 				if (stcb->sctp_socket) {
5385 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5386 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5387 					    control,
5388 					    &stcb->sctp_socket->so_rcv, 1,
5389 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5390 
5391 				}
5392 				mid = strmin->last_mid_delivered + 1;
5393 			} else {
5394 				/* Its a fragmented message */
5395 				if (control->first_frag_seen) {
5396 					/*
5397 					 * Make it so this is next to
5398 					 * deliver
5399 					 */
5400 					strmin->last_mid_delivered = control->mid - 1;
5401 					need_reasm_check = 1;
5402 					break;
5403 				}
5404 			}
5405 		} else {
5406 			break;
5407 		}
5408 	}
5409 	if (need_reasm_check) {
5410 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5411 	}
5412 }
5413 
5414 
5415 
5416 static void
5417 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5418     struct sctp_association *asoc,
5419     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5420 {
5421 	struct sctp_queued_to_read *control;
5422 	struct sctp_stream_in *strm;
5423 	struct sctp_tmit_chunk *chk, *nchk;
5424 	int cnt_removed = 0;
5425 
5426 	/*
5427 	 * For now large messages held on the stream reasm that are complete
5428 	 * will be tossed too. We could in theory do more work to spin
5429 	 * through and stop after dumping one msg aka seeing the start of a
5430 	 * new msg at the head, and call the delivery function... to see if
5431 	 * it can be delivered... But for now we just dump everything on the
5432 	 * queue.
5433 	 */
5434 	strm = &asoc->strmin[stream];
5435 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5436 	if (control == NULL) {
5437 		/* Not found */
5438 		return;
5439 	}
5440 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5441 		return;
5442 	}
5443 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5444 		/* Purge hanging chunks */
5445 		if (!asoc->idata_supported && (ordered == 0)) {
5446 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5447 				break;
5448 			}
5449 		}
5450 		cnt_removed++;
5451 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5452 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5453 			asoc->size_on_reasm_queue -= chk->send_size;
5454 		} else {
5455 #ifdef INVARIANTS
5456 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5457 #else
5458 			asoc->size_on_reasm_queue = 0;
5459 #endif
5460 		}
5461 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5462 		if (chk->data) {
5463 			sctp_m_freem(chk->data);
5464 			chk->data = NULL;
5465 		}
5466 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5467 	}
5468 	if (!TAILQ_EMPTY(&control->reasm)) {
5469 		/* This has to be old data, unordered */
5470 		if (control->data) {
5471 			sctp_m_freem(control->data);
5472 			control->data = NULL;
5473 		}
5474 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5475 		chk = TAILQ_FIRST(&control->reasm);
5476 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5477 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5478 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5479 			    chk, SCTP_READ_LOCK_HELD);
5480 		}
5481 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5482 		return;
5483 	}
5484 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5485 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5486 		if (asoc->size_on_all_streams >= control->length) {
5487 			asoc->size_on_all_streams -= control->length;
5488 		} else {
5489 #ifdef INVARIANTS
5490 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5491 #else
5492 			asoc->size_on_all_streams = 0;
5493 #endif
5494 		}
5495 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5496 		control->on_strm_q = 0;
5497 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5498 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5499 		control->on_strm_q = 0;
5500 #ifdef INVARIANTS
5501 	} else if (control->on_strm_q) {
5502 		panic("strm: %p ctl: %p unknown %d",
5503 		    strm, control, control->on_strm_q);
5504 #endif
5505 	}
5506 	control->on_strm_q = 0;
5507 	if (control->on_read_q == 0) {
5508 		sctp_free_remote_addr(control->whoFrom);
5509 		if (control->data) {
5510 			sctp_m_freem(control->data);
5511 			control->data = NULL;
5512 		}
5513 		sctp_free_a_readq(stcb, control);
5514 	}
5515 }
5516 
5517 void
5518 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5519     struct sctp_forward_tsn_chunk *fwd,
5520     int *abort_flag, struct mbuf *m, int offset)
5521 {
5522 	/* The pr-sctp fwd tsn */
5523 	/*
5524 	 * here we will perform all the data receiver side steps for
5525 	 * processing FwdTSN, as required in by pr-sctp draft:
5526 	 *
5527 	 * Assume we get FwdTSN(x):
5528 	 *
5529 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5530 	 * + others we have 3) examine and update re-ordering queue on
5531 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5532 	 * report where we are.
5533 	 */
5534 	struct sctp_association *asoc;
5535 	uint32_t new_cum_tsn, gap;
5536 	unsigned int i, fwd_sz, m_size;
5537 	uint32_t str_seq;
5538 	struct sctp_stream_in *strm;
5539 	struct sctp_queued_to_read *control, *sv;
5540 
5541 	asoc = &stcb->asoc;
5542 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5543 		SCTPDBG(SCTP_DEBUG_INDATA1,
5544 		    "Bad size too small/big fwd-tsn\n");
5545 		return;
5546 	}
5547 	m_size = (stcb->asoc.mapping_array_size << 3);
5548 	/*************************************************************/
5549 	/* 1. Here we update local cumTSN and shift the bitmap array */
5550 	/*************************************************************/
5551 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5552 
5553 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5554 		/* Already got there ... */
5555 		return;
5556 	}
5557 	/*
5558 	 * now we know the new TSN is more advanced, let's find the actual
5559 	 * gap
5560 	 */
5561 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5562 	asoc->cumulative_tsn = new_cum_tsn;
5563 	if (gap >= m_size) {
5564 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5565 			struct mbuf *op_err;
5566 			char msg[SCTP_DIAG_INFO_LEN];
5567 
5568 			/*
5569 			 * out of range (of single byte chunks in the rwnd I
5570 			 * give out). This must be an attacker.
5571 			 */
5572 			*abort_flag = 1;
5573 			snprintf(msg, sizeof(msg),
5574 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5575 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5576 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5577 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5578 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5579 			return;
5580 		}
5581 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5582 
5583 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5584 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5585 		asoc->highest_tsn_inside_map = new_cum_tsn;
5586 
5587 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5588 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5589 
5590 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5591 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5592 		}
5593 	} else {
5594 		SCTP_TCB_LOCK_ASSERT(stcb);
5595 		for (i = 0; i <= gap; i++) {
5596 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5597 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5598 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5599 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5600 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5601 				}
5602 			}
5603 		}
5604 	}
5605 	/*************************************************************/
5606 	/* 2. Clear up re-assembly queue                             */
5607 	/*************************************************************/
5608 
5609 	/* This is now done as part of clearing up the stream/seq */
5610 	if (asoc->idata_supported == 0) {
5611 		uint16_t sid;
5612 
5613 		/* Flush all the un-ordered data based on cum-tsn */
5614 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5615 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5616 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5617 		}
5618 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5619 	}
5620 	/*******************************************************/
5621 	/* 3. Update the PR-stream re-ordering queues and fix  */
5622 	/* delivery issues as needed.                       */
5623 	/*******************************************************/
5624 	fwd_sz -= sizeof(*fwd);
5625 	if (m && fwd_sz) {
5626 		/* New method. */
5627 		unsigned int num_str;
5628 		uint32_t mid, cur_mid;
5629 		uint16_t sid;
5630 		uint16_t ordered, flags;
5631 		struct sctp_strseq *stseq, strseqbuf;
5632 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5633 
5634 		offset += sizeof(*fwd);
5635 
5636 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5637 		if (asoc->idata_supported) {
5638 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5639 		} else {
5640 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5641 		}
5642 		for (i = 0; i < num_str; i++) {
5643 			if (asoc->idata_supported) {
5644 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5645 				    sizeof(struct sctp_strseq_mid),
5646 				    (uint8_t *)&strseqbuf_m);
5647 				offset += sizeof(struct sctp_strseq_mid);
5648 				if (stseq_m == NULL) {
5649 					break;
5650 				}
5651 				sid = ntohs(stseq_m->sid);
5652 				mid = ntohl(stseq_m->mid);
5653 				flags = ntohs(stseq_m->flags);
5654 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5655 					ordered = 0;
5656 				} else {
5657 					ordered = 1;
5658 				}
5659 			} else {
5660 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5661 				    sizeof(struct sctp_strseq),
5662 				    (uint8_t *)&strseqbuf);
5663 				offset += sizeof(struct sctp_strseq);
5664 				if (stseq == NULL) {
5665 					break;
5666 				}
5667 				sid = ntohs(stseq->sid);
5668 				mid = (uint32_t)ntohs(stseq->ssn);
5669 				ordered = 1;
5670 			}
5671 			/* Convert */
5672 
5673 			/* now process */
5674 
5675 			/*
5676 			 * Ok we now look for the stream/seq on the read
5677 			 * queue where its not all delivered. If we find it
5678 			 * we transmute the read entry into a PDI_ABORTED.
5679 			 */
5680 			if (sid >= asoc->streamincnt) {
5681 				/* screwed up streams, stop!  */
5682 				break;
5683 			}
5684 			if ((asoc->str_of_pdapi == sid) &&
5685 			    (asoc->ssn_of_pdapi == mid)) {
5686 				/*
5687 				 * If this is the one we were partially
5688 				 * delivering now then we no longer are.
5689 				 * Note this will change with the reassembly
5690 				 * re-write.
5691 				 */
5692 				asoc->fragmented_delivery_inprogress = 0;
5693 			}
5694 			strm = &asoc->strmin[sid];
5695 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5696 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5697 			}
5698 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5699 				if ((control->sinfo_stream == sid) &&
5700 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5701 					str_seq = (sid << 16) | (0x0000ffff & mid);
5702 					control->pdapi_aborted = 1;
5703 					sv = stcb->asoc.control_pdapi;
5704 					control->end_added = 1;
5705 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5706 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5707 						if (asoc->size_on_all_streams >= control->length) {
5708 							asoc->size_on_all_streams -= control->length;
5709 						} else {
5710 #ifdef INVARIANTS
5711 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5712 #else
5713 							asoc->size_on_all_streams = 0;
5714 #endif
5715 						}
5716 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5717 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5718 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5719 #ifdef INVARIANTS
5720 					} else if (control->on_strm_q) {
5721 						panic("strm: %p ctl: %p unknown %d",
5722 						    strm, control, control->on_strm_q);
5723 #endif
5724 					}
5725 					control->on_strm_q = 0;
5726 					stcb->asoc.control_pdapi = control;
5727 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5728 					    stcb,
5729 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5730 					    (void *)&str_seq,
5731 					    SCTP_SO_NOT_LOCKED);
5732 					stcb->asoc.control_pdapi = sv;
5733 					break;
5734 				} else if ((control->sinfo_stream == sid) &&
5735 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5736 					/* We are past our victim SSN */
5737 					break;
5738 				}
5739 			}
5740 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5741 				/* Update the sequence number */
5742 				strm->last_mid_delivered = mid;
5743 			}
5744 			/* now kick the stream the new way */
5745 			/* sa_ignore NO_NULL_CHK */
5746 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5747 		}
5748 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5749 	}
5750 	/*
5751 	 * Now slide thing forward.
5752 	 */
5753 	sctp_slide_mapping_arrays(stcb);
5754 }
5755