xref: /freebsd/sys/netinet/sctp_indata.c (revision 40427cca7a9ae77b095936fb1954417c290cfb17)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static uint32_t
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		KASSERT(asoc->cnt_on_reasm_queue == 0, ("cnt_on_reasm_queue is %u", asoc->cnt_on_reasm_queue));
96 		KASSERT(asoc->cnt_on_all_streams == 0, ("cnt_on_all_streams is %u", asoc->cnt_on_all_streams));
97 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
98 		return (calc);
99 	}
100 	/* get actual space */
101 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
102 	/*
103 	 * take out what has NOT been put on socket queue and we yet hold
104 	 * for putting up.
105 	 */
106 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
107 	    asoc->cnt_on_reasm_queue * MSIZE));
108 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
109 	    asoc->cnt_on_all_streams * MSIZE));
110 	if (calc == 0) {
111 		/* out of space */
112 		return (calc);
113 	}
114 	/* what is the overhead of all these rwnd's */
115 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
116 	/*
117 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
118 	 * even it is 0. SWS engaged
119 	 */
120 	if (calc < stcb->asoc.my_rwnd_control_len) {
121 		calc = 1;
122 	}
123 	return (calc);
124 }
125 
126 
127 
128 /*
129  * Build out our readq entry based on the incoming packet.
130  */
131 struct sctp_queued_to_read *
132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133     struct sctp_nets *net,
134     uint32_t tsn, uint32_t ppid,
135     uint32_t context, uint16_t sid,
136     uint32_t mid, uint8_t flags,
137     struct mbuf *dm)
138 {
139 	struct sctp_queued_to_read *read_queue_e = NULL;
140 
141 	sctp_alloc_a_readq(stcb, read_queue_e);
142 	if (read_queue_e == NULL) {
143 		goto failed_build;
144 	}
145 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 	read_queue_e->sinfo_stream = sid;
147 	read_queue_e->sinfo_flags = (flags << 8);
148 	read_queue_e->sinfo_ppid = ppid;
149 	read_queue_e->sinfo_context = context;
150 	read_queue_e->sinfo_tsn = tsn;
151 	read_queue_e->sinfo_cumtsn = tsn;
152 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 	read_queue_e->mid = mid;
154 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 	TAILQ_INIT(&read_queue_e->reasm);
156 	read_queue_e->whoFrom = net;
157 	atomic_add_int(&net->ref_count, 1);
158 	read_queue_e->data = dm;
159 	read_queue_e->stcb = stcb;
160 	read_queue_e->port_from = stcb->rport;
161 failed_build:
162 	return (read_queue_e);
163 }
164 
165 struct mbuf *
166 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
167 {
168 	struct sctp_extrcvinfo *seinfo;
169 	struct sctp_sndrcvinfo *outinfo;
170 	struct sctp_rcvinfo *rcvinfo;
171 	struct sctp_nxtinfo *nxtinfo;
172 	struct cmsghdr *cmh;
173 	struct mbuf *ret;
174 	int len;
175 	int use_extended;
176 	int provide_nxt;
177 
178 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
179 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
180 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
181 		/* user does not want any ancillary data */
182 		return (NULL);
183 	}
184 	len = 0;
185 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
186 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
187 	}
188 	seinfo = (struct sctp_extrcvinfo *)sinfo;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
190 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
191 		provide_nxt = 1;
192 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
193 	} else {
194 		provide_nxt = 0;
195 	}
196 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
197 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
198 			use_extended = 1;
199 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
200 		} else {
201 			use_extended = 0;
202 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
203 		}
204 	} else {
205 		use_extended = 0;
206 	}
207 
208 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
209 	if (ret == NULL) {
210 		/* No space */
211 		return (ret);
212 	}
213 	SCTP_BUF_LEN(ret) = 0;
214 
215 	/* We need a CMSG header followed by the struct */
216 	cmh = mtod(ret, struct cmsghdr *);
217 	/*
218 	 * Make sure that there is no un-initialized padding between the
219 	 * cmsg header and cmsg data and after the cmsg data.
220 	 */
221 	memset(cmh, 0, len);
222 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
223 		cmh->cmsg_level = IPPROTO_SCTP;
224 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
225 		cmh->cmsg_type = SCTP_RCVINFO;
226 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
227 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
228 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
229 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
230 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
231 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
232 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
233 		rcvinfo->rcv_context = sinfo->sinfo_context;
234 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
235 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
236 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
237 	}
238 	if (provide_nxt) {
239 		cmh->cmsg_level = IPPROTO_SCTP;
240 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
241 		cmh->cmsg_type = SCTP_NXTINFO;
242 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
243 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
244 		nxtinfo->nxt_flags = 0;
245 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
246 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
247 		}
248 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
249 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
250 		}
251 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
252 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
253 		}
254 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
255 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
256 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
257 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
258 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
259 	}
260 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
261 		cmh->cmsg_level = IPPROTO_SCTP;
262 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
263 		if (use_extended) {
264 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
265 			cmh->cmsg_type = SCTP_EXTRCV;
266 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
267 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
268 		} else {
269 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 			cmh->cmsg_type = SCTP_SNDRCV;
271 			*outinfo = *sinfo;
272 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
273 		}
274 	}
275 	return (ret);
276 }
277 
278 
279 static void
280 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
281 {
282 	uint32_t gap, i, cumackp1;
283 	int fnd = 0;
284 	int in_r = 0, in_nr = 0;
285 
286 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
287 		return;
288 	}
289 	cumackp1 = asoc->cumulative_tsn + 1;
290 	if (SCTP_TSN_GT(cumackp1, tsn)) {
291 		/*
292 		 * this tsn is behind the cum ack and thus we don't need to
293 		 * worry about it being moved from one to the other.
294 		 */
295 		return;
296 	}
297 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
298 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
299 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
300 	if ((in_r == 0) && (in_nr == 0)) {
301 #ifdef INVARIANTS
302 		panic("Things are really messed up now");
303 #else
304 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
305 		sctp_print_mapping_array(asoc);
306 #endif
307 	}
308 	if (in_nr == 0)
309 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if (in_r)
311 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
312 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
313 		asoc->highest_tsn_inside_nr_map = tsn;
314 	}
315 	if (tsn == asoc->highest_tsn_inside_map) {
316 		/* We must back down to see what the new highest is */
317 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
318 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
319 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
320 				asoc->highest_tsn_inside_map = i;
321 				fnd = 1;
322 				break;
323 			}
324 		}
325 		if (!fnd) {
326 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
327 		}
328 	}
329 }
330 
331 static int
332 sctp_place_control_in_stream(struct sctp_stream_in *strm,
333     struct sctp_association *asoc,
334     struct sctp_queued_to_read *control)
335 {
336 	struct sctp_queued_to_read *at;
337 	struct sctp_readhead *q;
338 	uint8_t flags, unordered;
339 
340 	flags = (control->sinfo_flags >> 8);
341 	unordered = flags & SCTP_DATA_UNORDERED;
342 	if (unordered) {
343 		q = &strm->uno_inqueue;
344 		if (asoc->idata_supported == 0) {
345 			if (!TAILQ_EMPTY(q)) {
346 				/*
347 				 * Only one stream can be here in old style
348 				 * -- abort
349 				 */
350 				return (-1);
351 			}
352 			TAILQ_INSERT_TAIL(q, control, next_instrm);
353 			control->on_strm_q = SCTP_ON_UNORDERED;
354 			return (0);
355 		}
356 	} else {
357 		q = &strm->inqueue;
358 	}
359 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
360 		control->end_added = 1;
361 		control->first_frag_seen = 1;
362 		control->last_frag_seen = 1;
363 	}
364 	if (TAILQ_EMPTY(q)) {
365 		/* Empty queue */
366 		TAILQ_INSERT_HEAD(q, control, next_instrm);
367 		if (unordered) {
368 			control->on_strm_q = SCTP_ON_UNORDERED;
369 		} else {
370 			control->on_strm_q = SCTP_ON_ORDERED;
371 		}
372 		return (0);
373 	} else {
374 		TAILQ_FOREACH(at, q, next_instrm) {
375 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
376 				/*
377 				 * one in queue is bigger than the new one,
378 				 * insert before this one
379 				 */
380 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
381 				if (unordered) {
382 					control->on_strm_q = SCTP_ON_UNORDERED;
383 				} else {
384 					control->on_strm_q = SCTP_ON_ORDERED;
385 				}
386 				break;
387 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
388 				/*
389 				 * Gak, He sent me a duplicate msg id
390 				 * number?? return -1 to abort.
391 				 */
392 				return (-1);
393 			} else {
394 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
395 					/*
396 					 * We are at the end, insert it
397 					 * after this one
398 					 */
399 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
400 						sctp_log_strm_del(control, at,
401 						    SCTP_STR_LOG_FROM_INSERT_TL);
402 					}
403 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
404 					if (unordered) {
405 						control->on_strm_q = SCTP_ON_UNORDERED;
406 					} else {
407 						control->on_strm_q = SCTP_ON_ORDERED;
408 					}
409 					break;
410 				}
411 			}
412 		}
413 	}
414 	return (0);
415 }
416 
417 static void
418 sctp_abort_in_reasm(struct sctp_tcb *stcb,
419     struct sctp_queued_to_read *control,
420     struct sctp_tmit_chunk *chk,
421     int *abort_flag, int opspot)
422 {
423 	char msg[SCTP_DIAG_INFO_LEN];
424 	struct mbuf *oper;
425 
426 	if (stcb->asoc.idata_supported) {
427 		snprintf(msg, sizeof(msg),
428 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
429 		    opspot,
430 		    control->fsn_included,
431 		    chk->rec.data.tsn,
432 		    chk->rec.data.sid,
433 		    chk->rec.data.fsn, chk->rec.data.mid);
434 	} else {
435 		snprintf(msg, sizeof(msg),
436 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
437 		    opspot,
438 		    control->fsn_included,
439 		    chk->rec.data.tsn,
440 		    chk->rec.data.sid,
441 		    chk->rec.data.fsn,
442 		    (uint16_t)chk->rec.data.mid);
443 	}
444 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
445 	sctp_m_freem(chk->data);
446 	chk->data = NULL;
447 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
448 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
449 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
450 	*abort_flag = 1;
451 }
452 
453 static void
454 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 {
456 	/*
457 	 * The control could not be placed and must be cleaned.
458 	 */
459 	struct sctp_tmit_chunk *chk, *nchk;
460 
461 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
462 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
463 		if (chk->data)
464 			sctp_m_freem(chk->data);
465 		chk->data = NULL;
466 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
467 	}
468 	sctp_free_a_readq(stcb, control);
469 }
470 
471 /*
472  * Queue the chunk either right into the socket buffer if it is the next one
473  * to go OR put it in the correct place in the delivery queue.  If we do
474  * append to the so_buf, keep doing so until we are out of order as
475  * long as the control's entered are non-fragmented.
476  */
477 static void
478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479     struct sctp_association *asoc,
480     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 {
482 	/*
483 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 	 * all the data in one stream this could happen quite rapidly. One
485 	 * could use the TSN to keep track of things, but this scheme breaks
486 	 * down in the other type of stream usage that could occur. Send a
487 	 * single msg to stream 0, send 4Billion messages to stream 1, now
488 	 * send a message to stream 0. You have a situation where the TSN
489 	 * has wrapped but not in the stream. Is this worth worrying about
490 	 * or should we just change our queue sort at the bottom to be by
491 	 * TSN.
492 	 *
493 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 	 * assignment this could happen... and I don't see how this would be
496 	 * a violation. So for now I am undecided an will leave the sort by
497 	 * SSN alone. Maybe a hybred approach is the answer
498 	 *
499 	 */
500 	struct sctp_queued_to_read *at;
501 	int queue_needed;
502 	uint32_t nxt_todel;
503 	struct mbuf *op_err;
504 	struct sctp_stream_in *strm;
505 	char msg[SCTP_DIAG_INFO_LEN];
506 
507 	strm = &asoc->strmin[control->sinfo_stream];
508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
510 	}
511 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 		/* The incoming sseq is behind where we last delivered? */
513 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 		    strm->last_mid_delivered, control->mid);
515 		/*
516 		 * throw it in the stream so it gets cleaned up in
517 		 * association destruction
518 		 */
519 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 		if (asoc->idata_supported) {
521 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 			    strm->last_mid_delivered, control->sinfo_tsn,
523 			    control->sinfo_stream, control->mid);
524 		} else {
525 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 			    (uint16_t)strm->last_mid_delivered,
527 			    control->sinfo_tsn,
528 			    control->sinfo_stream,
529 			    (uint16_t)control->mid);
530 		}
531 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
534 		*abort_flag = 1;
535 		return;
536 
537 	}
538 	queue_needed = 1;
539 	asoc->size_on_all_streams += control->length;
540 	sctp_ucount_incr(asoc->cnt_on_all_streams);
541 	nxt_todel = strm->last_mid_delivered + 1;
542 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
543 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
544 		struct socket *so;
545 
546 		so = SCTP_INP_SO(stcb->sctp_ep);
547 		atomic_add_int(&stcb->asoc.refcnt, 1);
548 		SCTP_TCB_UNLOCK(stcb);
549 		SCTP_SOCKET_LOCK(so, 1);
550 		SCTP_TCB_LOCK(stcb);
551 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
552 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
553 			SCTP_SOCKET_UNLOCK(so, 1);
554 			return;
555 		}
556 #endif
557 		/* can be delivered right away? */
558 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
559 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
560 		}
561 		/* EY it wont be queued if it could be delivered directly */
562 		queue_needed = 0;
563 		if (asoc->size_on_all_streams >= control->length) {
564 			asoc->size_on_all_streams -= control->length;
565 		} else {
566 #ifdef INVARIANTS
567 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
568 #else
569 			asoc->size_on_all_streams = 0;
570 #endif
571 		}
572 		sctp_ucount_decr(asoc->cnt_on_all_streams);
573 		strm->last_mid_delivered++;
574 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
575 		sctp_add_to_readq(stcb->sctp_ep, stcb,
576 		    control,
577 		    &stcb->sctp_socket->so_rcv, 1,
578 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
579 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
580 			/* all delivered */
581 			nxt_todel = strm->last_mid_delivered + 1;
582 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
583 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
584 				if (control->on_strm_q == SCTP_ON_ORDERED) {
585 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
586 					if (asoc->size_on_all_streams >= control->length) {
587 						asoc->size_on_all_streams -= control->length;
588 					} else {
589 #ifdef INVARIANTS
590 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
591 #else
592 						asoc->size_on_all_streams = 0;
593 #endif
594 					}
595 					sctp_ucount_decr(asoc->cnt_on_all_streams);
596 #ifdef INVARIANTS
597 				} else {
598 					panic("Huh control: %p is on_strm_q: %d",
599 					    control, control->on_strm_q);
600 #endif
601 				}
602 				control->on_strm_q = 0;
603 				strm->last_mid_delivered++;
604 				/*
605 				 * We ignore the return of deliver_data here
606 				 * since we always can hold the chunk on the
607 				 * d-queue. And we have a finite number that
608 				 * can be delivered from the strq.
609 				 */
610 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
611 					sctp_log_strm_del(control, NULL,
612 					    SCTP_STR_LOG_FROM_IMMED_DEL);
613 				}
614 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 				sctp_add_to_readq(stcb->sctp_ep, stcb,
616 				    control,
617 				    &stcb->sctp_socket->so_rcv, 1,
618 				    SCTP_READ_LOCK_NOT_HELD,
619 				    SCTP_SO_LOCKED);
620 				continue;
621 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
622 				*need_reasm = 1;
623 			}
624 			break;
625 		}
626 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
627 		SCTP_SOCKET_UNLOCK(so, 1);
628 #endif
629 	}
630 	if (queue_needed) {
631 		/*
632 		 * Ok, we did not deliver this guy, find the correct place
633 		 * to put it on the queue.
634 		 */
635 		if (sctp_place_control_in_stream(strm, asoc, control)) {
636 			snprintf(msg, sizeof(msg),
637 			    "Queue to str MID: %u duplicate",
638 			    control->mid);
639 			sctp_clean_up_control(stcb, control);
640 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
641 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
642 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
643 			*abort_flag = 1;
644 		}
645 	}
646 }
647 
648 
649 static void
650 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
651 {
652 	struct mbuf *m, *prev = NULL;
653 	struct sctp_tcb *stcb;
654 
655 	stcb = control->stcb;
656 	control->held_length = 0;
657 	control->length = 0;
658 	m = control->data;
659 	while (m) {
660 		if (SCTP_BUF_LEN(m) == 0) {
661 			/* Skip mbufs with NO length */
662 			if (prev == NULL) {
663 				/* First one */
664 				control->data = sctp_m_free(m);
665 				m = control->data;
666 			} else {
667 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
668 				m = SCTP_BUF_NEXT(prev);
669 			}
670 			if (m == NULL) {
671 				control->tail_mbuf = prev;
672 			}
673 			continue;
674 		}
675 		prev = m;
676 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
677 		if (control->on_read_q) {
678 			/*
679 			 * On read queue so we must increment the SB stuff,
680 			 * we assume caller has done any locks of SB.
681 			 */
682 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
683 		}
684 		m = SCTP_BUF_NEXT(m);
685 	}
686 	if (prev) {
687 		control->tail_mbuf = prev;
688 	}
689 }
690 
691 static void
692 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
693 {
694 	struct mbuf *prev = NULL;
695 	struct sctp_tcb *stcb;
696 
697 	stcb = control->stcb;
698 	if (stcb == NULL) {
699 #ifdef INVARIANTS
700 		panic("Control broken");
701 #else
702 		return;
703 #endif
704 	}
705 	if (control->tail_mbuf == NULL) {
706 		/* TSNH */
707 		control->data = m;
708 		sctp_setup_tail_pointer(control);
709 		return;
710 	}
711 	control->tail_mbuf->m_next = m;
712 	while (m) {
713 		if (SCTP_BUF_LEN(m) == 0) {
714 			/* Skip mbufs with NO length */
715 			if (prev == NULL) {
716 				/* First one */
717 				control->tail_mbuf->m_next = sctp_m_free(m);
718 				m = control->tail_mbuf->m_next;
719 			} else {
720 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
721 				m = SCTP_BUF_NEXT(prev);
722 			}
723 			if (m == NULL) {
724 				control->tail_mbuf = prev;
725 			}
726 			continue;
727 		}
728 		prev = m;
729 		if (control->on_read_q) {
730 			/*
731 			 * On read queue so we must increment the SB stuff,
732 			 * we assume caller has done any locks of SB.
733 			 */
734 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
735 		}
736 		*added += SCTP_BUF_LEN(m);
737 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
738 		m = SCTP_BUF_NEXT(m);
739 	}
740 	if (prev) {
741 		control->tail_mbuf = prev;
742 	}
743 }
744 
745 static void
746 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
747 {
748 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
749 	nc->sinfo_stream = control->sinfo_stream;
750 	nc->mid = control->mid;
751 	TAILQ_INIT(&nc->reasm);
752 	nc->top_fsn = control->top_fsn;
753 	nc->mid = control->mid;
754 	nc->sinfo_flags = control->sinfo_flags;
755 	nc->sinfo_ppid = control->sinfo_ppid;
756 	nc->sinfo_context = control->sinfo_context;
757 	nc->fsn_included = 0xffffffff;
758 	nc->sinfo_tsn = control->sinfo_tsn;
759 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
760 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
761 	nc->whoFrom = control->whoFrom;
762 	atomic_add_int(&nc->whoFrom->ref_count, 1);
763 	nc->stcb = control->stcb;
764 	nc->port_from = control->port_from;
765 }
766 
767 static void
768 sctp_reset_a_control(struct sctp_queued_to_read *control,
769     struct sctp_inpcb *inp, uint32_t tsn)
770 {
771 	control->fsn_included = tsn;
772 	if (control->on_read_q) {
773 		/*
774 		 * We have to purge it from there, hopefully this will work
775 		 * :-)
776 		 */
777 		TAILQ_REMOVE(&inp->read_queue, control, next);
778 		control->on_read_q = 0;
779 	}
780 }
781 
782 static int
783 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
784     struct sctp_association *asoc,
785     struct sctp_stream_in *strm,
786     struct sctp_queued_to_read *control,
787     uint32_t pd_point,
788     int inp_read_lock_held)
789 {
790 	/*
791 	 * Special handling for the old un-ordered data chunk. All the
792 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
793 	 * to see if we have it all. If you return one, no other control
794 	 * entries on the un-ordered queue will be looked at. In theory
795 	 * there should be no others entries in reality, unless the guy is
796 	 * sending both unordered NDATA and unordered DATA...
797 	 */
798 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
799 	uint32_t fsn;
800 	struct sctp_queued_to_read *nc;
801 	int cnt_added;
802 
803 	if (control->first_frag_seen == 0) {
804 		/* Nothing we can do, we have not seen the first piece yet */
805 		return (1);
806 	}
807 	/* Collapse any we can */
808 	cnt_added = 0;
809 restart:
810 	fsn = control->fsn_included + 1;
811 	/* Now what can we add? */
812 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
813 		if (chk->rec.data.fsn == fsn) {
814 			/* Ok lets add it */
815 			sctp_alloc_a_readq(stcb, nc);
816 			if (nc == NULL) {
817 				break;
818 			}
819 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
820 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
821 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
822 			fsn++;
823 			cnt_added++;
824 			chk = NULL;
825 			if (control->end_added) {
826 				/* We are done */
827 				if (!TAILQ_EMPTY(&control->reasm)) {
828 					/*
829 					 * Ok we have to move anything left
830 					 * on the control queue to a new
831 					 * control.
832 					 */
833 					sctp_build_readq_entry_from_ctl(nc, control);
834 					tchk = TAILQ_FIRST(&control->reasm);
835 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
836 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
837 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
838 							asoc->size_on_reasm_queue -= tchk->send_size;
839 						} else {
840 #ifdef INVARIANTS
841 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
842 #else
843 							asoc->size_on_reasm_queue = 0;
844 #endif
845 						}
846 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
847 						nc->first_frag_seen = 1;
848 						nc->fsn_included = tchk->rec.data.fsn;
849 						nc->data = tchk->data;
850 						nc->sinfo_ppid = tchk->rec.data.ppid;
851 						nc->sinfo_tsn = tchk->rec.data.tsn;
852 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
853 						tchk->data = NULL;
854 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
855 						sctp_setup_tail_pointer(nc);
856 						tchk = TAILQ_FIRST(&control->reasm);
857 					}
858 					/* Spin the rest onto the queue */
859 					while (tchk) {
860 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
861 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
862 						tchk = TAILQ_FIRST(&control->reasm);
863 					}
864 					/*
865 					 * Now lets add it to the queue
866 					 * after removing control
867 					 */
868 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
869 					nc->on_strm_q = SCTP_ON_UNORDERED;
870 					if (control->on_strm_q) {
871 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
872 						control->on_strm_q = 0;
873 					}
874 				}
875 				if (control->pdapi_started) {
876 					strm->pd_api_started = 0;
877 					control->pdapi_started = 0;
878 				}
879 				if (control->on_strm_q) {
880 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
881 					control->on_strm_q = 0;
882 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
883 				}
884 				if (control->on_read_q == 0) {
885 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
886 					    &stcb->sctp_socket->so_rcv, control->end_added,
887 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
888 				}
889 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
890 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
891 					/*
892 					 * Switch to the new guy and
893 					 * continue
894 					 */
895 					control = nc;
896 					goto restart;
897 				} else {
898 					if (nc->on_strm_q == 0) {
899 						sctp_free_a_readq(stcb, nc);
900 					}
901 				}
902 				return (1);
903 			} else {
904 				sctp_free_a_readq(stcb, nc);
905 			}
906 		} else {
907 			/* Can't add more */
908 			break;
909 		}
910 	}
911 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
912 		strm->pd_api_started = 1;
913 		control->pdapi_started = 1;
914 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
915 		    &stcb->sctp_socket->so_rcv, control->end_added,
916 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
917 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
918 		return (0);
919 	} else {
920 		return (1);
921 	}
922 }
923 
924 static void
925 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
926     struct sctp_association *asoc,
927     struct sctp_queued_to_read *control,
928     struct sctp_tmit_chunk *chk,
929     int *abort_flag)
930 {
931 	struct sctp_tmit_chunk *at;
932 	int inserted;
933 
934 	/*
935 	 * Here we need to place the chunk into the control structure sorted
936 	 * in the correct order.
937 	 */
938 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
939 		/* Its the very first one. */
940 		SCTPDBG(SCTP_DEBUG_XXX,
941 		    "chunk is a first fsn: %u becomes fsn_included\n",
942 		    chk->rec.data.fsn);
943 		if (control->first_frag_seen) {
944 			/*
945 			 * In old un-ordered we can reassembly on one
946 			 * control multiple messages. As long as the next
947 			 * FIRST is greater then the old first (TSN i.e. FSN
948 			 * wise)
949 			 */
950 			struct mbuf *tdata;
951 			uint32_t tmp;
952 
953 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
954 				/*
955 				 * Easy way the start of a new guy beyond
956 				 * the lowest
957 				 */
958 				goto place_chunk;
959 			}
960 			if ((chk->rec.data.fsn == control->fsn_included) ||
961 			    (control->pdapi_started)) {
962 				/*
963 				 * Ok this should not happen, if it does we
964 				 * started the pd-api on the higher TSN
965 				 * (since the equals part is a TSN failure
966 				 * it must be that).
967 				 *
968 				 * We are completly hosed in that case since
969 				 * I have no way to recover. This really
970 				 * will only happen if we can get more TSN's
971 				 * higher before the pd-api-point.
972 				 */
973 				sctp_abort_in_reasm(stcb, control, chk,
974 				    abort_flag,
975 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
976 
977 				return;
978 			}
979 			/*
980 			 * Ok we have two firsts and the one we just got is
981 			 * smaller than the one we previously placed.. yuck!
982 			 * We must swap them out.
983 			 */
984 			/* swap the mbufs */
985 			tdata = control->data;
986 			control->data = chk->data;
987 			chk->data = tdata;
988 			/* Save the lengths */
989 			chk->send_size = control->length;
990 			/* Recompute length of control and tail pointer */
991 			sctp_setup_tail_pointer(control);
992 			/* Fix the FSN included */
993 			tmp = control->fsn_included;
994 			control->fsn_included = chk->rec.data.fsn;
995 			chk->rec.data.fsn = tmp;
996 			/* Fix the TSN included */
997 			tmp = control->sinfo_tsn;
998 			control->sinfo_tsn = chk->rec.data.tsn;
999 			chk->rec.data.tsn = tmp;
1000 			/* Fix the PPID included */
1001 			tmp = control->sinfo_ppid;
1002 			control->sinfo_ppid = chk->rec.data.ppid;
1003 			chk->rec.data.ppid = tmp;
1004 			/* Fix tail pointer */
1005 			goto place_chunk;
1006 		}
1007 		control->first_frag_seen = 1;
1008 		control->fsn_included = chk->rec.data.fsn;
1009 		control->top_fsn = chk->rec.data.fsn;
1010 		control->sinfo_tsn = chk->rec.data.tsn;
1011 		control->sinfo_ppid = chk->rec.data.ppid;
1012 		control->data = chk->data;
1013 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1014 		chk->data = NULL;
1015 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1016 		sctp_setup_tail_pointer(control);
1017 		return;
1018 	}
1019 place_chunk:
1020 	inserted = 0;
1021 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1022 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1023 			/*
1024 			 * This one in queue is bigger than the new one,
1025 			 * insert the new one before at.
1026 			 */
1027 			asoc->size_on_reasm_queue += chk->send_size;
1028 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1029 			inserted = 1;
1030 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1031 			break;
1032 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1033 			/*
1034 			 * They sent a duplicate fsn number. This really
1035 			 * should not happen since the FSN is a TSN and it
1036 			 * should have been dropped earlier.
1037 			 */
1038 			sctp_abort_in_reasm(stcb, control, chk,
1039 			    abort_flag,
1040 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1041 			return;
1042 		}
1043 	}
1044 	if (inserted == 0) {
1045 		/* Its at the end */
1046 		asoc->size_on_reasm_queue += chk->send_size;
1047 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 		control->top_fsn = chk->rec.data.fsn;
1049 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1050 	}
1051 }
1052 
1053 static int
1054 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1055     struct sctp_stream_in *strm, int inp_read_lock_held)
1056 {
1057 	/*
1058 	 * Given a stream, strm, see if any of the SSN's on it that are
1059 	 * fragmented are ready to deliver. If so go ahead and place them on
1060 	 * the read queue. In so placing if we have hit the end, then we
1061 	 * need to remove them from the stream's queue.
1062 	 */
1063 	struct sctp_queued_to_read *control, *nctl = NULL;
1064 	uint32_t next_to_del;
1065 	uint32_t pd_point;
1066 	int ret = 0;
1067 
1068 	if (stcb->sctp_socket) {
1069 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1070 		    stcb->sctp_ep->partial_delivery_point);
1071 	} else {
1072 		pd_point = stcb->sctp_ep->partial_delivery_point;
1073 	}
1074 	control = TAILQ_FIRST(&strm->uno_inqueue);
1075 
1076 	if ((control != NULL) &&
1077 	    (asoc->idata_supported == 0)) {
1078 		/* Special handling needed for "old" data format */
1079 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1080 			goto done_un;
1081 		}
1082 	}
1083 	if (strm->pd_api_started) {
1084 		/* Can't add more */
1085 		return (0);
1086 	}
1087 	while (control) {
1088 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1089 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1090 		nctl = TAILQ_NEXT(control, next_instrm);
1091 		if (control->end_added) {
1092 			/* We just put the last bit on */
1093 			if (control->on_strm_q) {
1094 #ifdef INVARIANTS
1095 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1096 					panic("Huh control: %p on_q: %d -- not unordered?",
1097 					    control, control->on_strm_q);
1098 				}
1099 #endif
1100 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1101 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1102 				control->on_strm_q = 0;
1103 			}
1104 			if (control->on_read_q == 0) {
1105 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1106 				    control,
1107 				    &stcb->sctp_socket->so_rcv, control->end_added,
1108 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1109 			}
1110 		} else {
1111 			/* Can we do a PD-API for this un-ordered guy? */
1112 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1113 				strm->pd_api_started = 1;
1114 				control->pdapi_started = 1;
1115 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1116 				    control,
1117 				    &stcb->sctp_socket->so_rcv, control->end_added,
1118 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1119 
1120 				break;
1121 			}
1122 		}
1123 		control = nctl;
1124 	}
1125 done_un:
1126 	control = TAILQ_FIRST(&strm->inqueue);
1127 	if (strm->pd_api_started) {
1128 		/* Can't add more */
1129 		return (0);
1130 	}
1131 	if (control == NULL) {
1132 		return (ret);
1133 	}
1134 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1135 		/*
1136 		 * Ok the guy at the top was being partially delivered
1137 		 * completed, so we remove it. Note the pd_api flag was
1138 		 * taken off when the chunk was merged on in
1139 		 * sctp_queue_data_for_reasm below.
1140 		 */
1141 		nctl = TAILQ_NEXT(control, next_instrm);
1142 		SCTPDBG(SCTP_DEBUG_XXX,
1143 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1144 		    control, control->end_added, control->mid,
1145 		    control->top_fsn, control->fsn_included,
1146 		    strm->last_mid_delivered);
1147 		if (control->end_added) {
1148 			if (control->on_strm_q) {
1149 #ifdef INVARIANTS
1150 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1151 					panic("Huh control: %p on_q: %d -- not ordered?",
1152 					    control, control->on_strm_q);
1153 				}
1154 #endif
1155 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1156 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1157 				if (asoc->size_on_all_streams >= control->length) {
1158 					asoc->size_on_all_streams -= control->length;
1159 				} else {
1160 #ifdef INVARIANTS
1161 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1162 #else
1163 					asoc->size_on_all_streams = 0;
1164 #endif
1165 				}
1166 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1167 				control->on_strm_q = 0;
1168 			}
1169 			if (strm->pd_api_started && control->pdapi_started) {
1170 				control->pdapi_started = 0;
1171 				strm->pd_api_started = 0;
1172 			}
1173 			if (control->on_read_q == 0) {
1174 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1175 				    control,
1176 				    &stcb->sctp_socket->so_rcv, control->end_added,
1177 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1178 			}
1179 			control = nctl;
1180 		}
1181 	}
1182 	if (strm->pd_api_started) {
1183 		/*
1184 		 * Can't add more must have gotten an un-ordered above being
1185 		 * partially delivered.
1186 		 */
1187 		return (0);
1188 	}
1189 deliver_more:
1190 	next_to_del = strm->last_mid_delivered + 1;
1191 	if (control) {
1192 		SCTPDBG(SCTP_DEBUG_XXX,
1193 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1194 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1195 		    next_to_del);
1196 		nctl = TAILQ_NEXT(control, next_instrm);
1197 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1198 		    (control->first_frag_seen)) {
1199 			int done;
1200 
1201 			/* Ok we can deliver it onto the stream. */
1202 			if (control->end_added) {
1203 				/* We are done with it afterwards */
1204 				if (control->on_strm_q) {
1205 #ifdef INVARIANTS
1206 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1207 						panic("Huh control: %p on_q: %d -- not ordered?",
1208 						    control, control->on_strm_q);
1209 					}
1210 #endif
1211 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1212 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1213 					if (asoc->size_on_all_streams >= control->length) {
1214 						asoc->size_on_all_streams -= control->length;
1215 					} else {
1216 #ifdef INVARIANTS
1217 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1218 #else
1219 						asoc->size_on_all_streams = 0;
1220 #endif
1221 					}
1222 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1223 					control->on_strm_q = 0;
1224 				}
1225 				ret++;
1226 			}
1227 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1228 				/*
1229 				 * A singleton now slipping through - mark
1230 				 * it non-revokable too
1231 				 */
1232 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1233 			} else if (control->end_added == 0) {
1234 				/*
1235 				 * Check if we can defer adding until its
1236 				 * all there
1237 				 */
1238 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1239 					/*
1240 					 * Don't need it or cannot add more
1241 					 * (one being delivered that way)
1242 					 */
1243 					goto out;
1244 				}
1245 			}
1246 			done = (control->end_added) && (control->last_frag_seen);
1247 			if (control->on_read_q == 0) {
1248 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1249 				    control,
1250 				    &stcb->sctp_socket->so_rcv, control->end_added,
1251 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1252 			}
1253 			strm->last_mid_delivered = next_to_del;
1254 			if (done) {
1255 				control = nctl;
1256 				goto deliver_more;
1257 			} else {
1258 				/* We are now doing PD API */
1259 				strm->pd_api_started = 1;
1260 				control->pdapi_started = 1;
1261 			}
1262 		}
1263 	}
1264 out:
1265 	return (ret);
1266 }
1267 
1268 
1269 uint32_t
1270 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1271     struct sctp_stream_in *strm,
1272     struct sctp_tcb *stcb, struct sctp_association *asoc,
1273     struct sctp_tmit_chunk *chk, int hold_rlock)
1274 {
1275 	/*
1276 	 * Given a control and a chunk, merge the data from the chk onto the
1277 	 * control and free up the chunk resources.
1278 	 */
1279 	uint32_t added = 0;
1280 	int i_locked = 0;
1281 
1282 	if (control->on_read_q && (hold_rlock == 0)) {
1283 		/*
1284 		 * Its being pd-api'd so we must do some locks.
1285 		 */
1286 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1287 		i_locked = 1;
1288 	}
1289 	if (control->data == NULL) {
1290 		control->data = chk->data;
1291 		sctp_setup_tail_pointer(control);
1292 	} else {
1293 		sctp_add_to_tail_pointer(control, chk->data, &added);
1294 	}
1295 	control->fsn_included = chk->rec.data.fsn;
1296 	asoc->size_on_reasm_queue -= chk->send_size;
1297 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1298 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1299 	chk->data = NULL;
1300 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1301 		control->first_frag_seen = 1;
1302 		control->sinfo_tsn = chk->rec.data.tsn;
1303 		control->sinfo_ppid = chk->rec.data.ppid;
1304 	}
1305 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1306 		/* Its complete */
1307 		if ((control->on_strm_q) && (control->on_read_q)) {
1308 			if (control->pdapi_started) {
1309 				control->pdapi_started = 0;
1310 				strm->pd_api_started = 0;
1311 			}
1312 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1313 				/* Unordered */
1314 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1315 				control->on_strm_q = 0;
1316 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1317 				/* Ordered */
1318 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1319 				if (asoc->size_on_all_streams >= control->length) {
1320 					asoc->size_on_all_streams -= control->length;
1321 				} else {
1322 #ifdef INVARIANTS
1323 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1324 #else
1325 					asoc->size_on_all_streams = 0;
1326 #endif
1327 				}
1328 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1329 				control->on_strm_q = 0;
1330 #ifdef INVARIANTS
1331 			} else if (control->on_strm_q) {
1332 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1333 				    control->on_strm_q);
1334 #endif
1335 			}
1336 		}
1337 		control->end_added = 1;
1338 		control->last_frag_seen = 1;
1339 	}
1340 	if (i_locked) {
1341 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1342 	}
1343 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1344 	return (added);
1345 }
1346 
1347 /*
1348  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1349  * queue, see if anthing can be delivered. If so pull it off (or as much as
1350  * we can. If we run out of space then we must dump what we can and set the
1351  * appropriate flag to say we queued what we could.
1352  */
1353 static void
1354 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1355     struct sctp_queued_to_read *control,
1356     struct sctp_tmit_chunk *chk,
1357     int created_control,
1358     int *abort_flag, uint32_t tsn)
1359 {
1360 	uint32_t next_fsn;
1361 	struct sctp_tmit_chunk *at, *nat;
1362 	struct sctp_stream_in *strm;
1363 	int do_wakeup, unordered;
1364 	uint32_t lenadded;
1365 
1366 	strm = &asoc->strmin[control->sinfo_stream];
1367 	/*
1368 	 * For old un-ordered data chunks.
1369 	 */
1370 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1371 		unordered = 1;
1372 	} else {
1373 		unordered = 0;
1374 	}
1375 	/* Must be added to the stream-in queue */
1376 	if (created_control) {
1377 		if (unordered == 0) {
1378 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1379 		}
1380 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1381 			/* Duplicate SSN? */
1382 			sctp_clean_up_control(stcb, control);
1383 			sctp_abort_in_reasm(stcb, control, chk,
1384 			    abort_flag,
1385 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1386 			return;
1387 		}
1388 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1389 			/*
1390 			 * Ok we created this control and now lets validate
1391 			 * that its legal i.e. there is a B bit set, if not
1392 			 * and we have up to the cum-ack then its invalid.
1393 			 */
1394 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1395 				sctp_abort_in_reasm(stcb, control, chk,
1396 				    abort_flag,
1397 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1398 				return;
1399 			}
1400 		}
1401 	}
1402 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1403 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1404 		return;
1405 	}
1406 	/*
1407 	 * Ok we must queue the chunk into the reasembly portion: o if its
1408 	 * the first it goes to the control mbuf. o if its not first but the
1409 	 * next in sequence it goes to the control, and each succeeding one
1410 	 * in order also goes. o if its not in order we place it on the list
1411 	 * in its place.
1412 	 */
1413 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1414 		/* Its the very first one. */
1415 		SCTPDBG(SCTP_DEBUG_XXX,
1416 		    "chunk is a first fsn: %u becomes fsn_included\n",
1417 		    chk->rec.data.fsn);
1418 		if (control->first_frag_seen) {
1419 			/*
1420 			 * Error on senders part, they either sent us two
1421 			 * data chunks with FIRST, or they sent two
1422 			 * un-ordered chunks that were fragmented at the
1423 			 * same time in the same stream.
1424 			 */
1425 			sctp_abort_in_reasm(stcb, control, chk,
1426 			    abort_flag,
1427 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1428 			return;
1429 		}
1430 		control->first_frag_seen = 1;
1431 		control->sinfo_ppid = chk->rec.data.ppid;
1432 		control->sinfo_tsn = chk->rec.data.tsn;
1433 		control->fsn_included = chk->rec.data.fsn;
1434 		control->data = chk->data;
1435 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1436 		chk->data = NULL;
1437 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1438 		sctp_setup_tail_pointer(control);
1439 		asoc->size_on_all_streams += control->length;
1440 	} else {
1441 		/* Place the chunk in our list */
1442 		int inserted = 0;
1443 
1444 		if (control->last_frag_seen == 0) {
1445 			/* Still willing to raise highest FSN seen */
1446 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1447 				SCTPDBG(SCTP_DEBUG_XXX,
1448 				    "We have a new top_fsn: %u\n",
1449 				    chk->rec.data.fsn);
1450 				control->top_fsn = chk->rec.data.fsn;
1451 			}
1452 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1453 				SCTPDBG(SCTP_DEBUG_XXX,
1454 				    "The last fsn is now in place fsn: %u\n",
1455 				    chk->rec.data.fsn);
1456 				control->last_frag_seen = 1;
1457 			}
1458 			if (asoc->idata_supported || control->first_frag_seen) {
1459 				/*
1460 				 * For IDATA we always check since we know
1461 				 * that the first fragment is 0. For old
1462 				 * DATA we have to receive the first before
1463 				 * we know the first FSN (which is the TSN).
1464 				 */
1465 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1466 					/*
1467 					 * We have already delivered up to
1468 					 * this so its a dup
1469 					 */
1470 					sctp_abort_in_reasm(stcb, control, chk,
1471 					    abort_flag,
1472 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1473 					return;
1474 				}
1475 			}
1476 		} else {
1477 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1478 				/* Second last? huh? */
1479 				SCTPDBG(SCTP_DEBUG_XXX,
1480 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1481 				    chk->rec.data.fsn, control->top_fsn);
1482 				sctp_abort_in_reasm(stcb, control,
1483 				    chk, abort_flag,
1484 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1485 				return;
1486 			}
1487 			if (asoc->idata_supported || control->first_frag_seen) {
1488 				/*
1489 				 * For IDATA we always check since we know
1490 				 * that the first fragment is 0. For old
1491 				 * DATA we have to receive the first before
1492 				 * we know the first FSN (which is the TSN).
1493 				 */
1494 
1495 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1496 					/*
1497 					 * We have already delivered up to
1498 					 * this so its a dup
1499 					 */
1500 					SCTPDBG(SCTP_DEBUG_XXX,
1501 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1502 					    chk->rec.data.fsn, control->fsn_included);
1503 					sctp_abort_in_reasm(stcb, control, chk,
1504 					    abort_flag,
1505 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1506 					return;
1507 				}
1508 			}
1509 			/*
1510 			 * validate not beyond top FSN if we have seen last
1511 			 * one
1512 			 */
1513 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1514 				SCTPDBG(SCTP_DEBUG_XXX,
1515 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1516 				    chk->rec.data.fsn,
1517 				    control->top_fsn);
1518 				sctp_abort_in_reasm(stcb, control, chk,
1519 				    abort_flag,
1520 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1521 				return;
1522 			}
1523 		}
1524 		/*
1525 		 * If we reach here, we need to place the new chunk in the
1526 		 * reassembly for this control.
1527 		 */
1528 		SCTPDBG(SCTP_DEBUG_XXX,
1529 		    "chunk is a not first fsn: %u needs to be inserted\n",
1530 		    chk->rec.data.fsn);
1531 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1532 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1533 				/*
1534 				 * This one in queue is bigger than the new
1535 				 * one, insert the new one before at.
1536 				 */
1537 				SCTPDBG(SCTP_DEBUG_XXX,
1538 				    "Insert it before fsn: %u\n",
1539 				    at->rec.data.fsn);
1540 				asoc->size_on_reasm_queue += chk->send_size;
1541 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1542 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1543 				inserted = 1;
1544 				break;
1545 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1546 				/*
1547 				 * Gak, He sent me a duplicate str seq
1548 				 * number
1549 				 */
1550 				/*
1551 				 * foo bar, I guess I will just free this
1552 				 * new guy, should we abort too? FIX ME
1553 				 * MAYBE? Or it COULD be that the SSN's have
1554 				 * wrapped. Maybe I should compare to TSN
1555 				 * somehow... sigh for now just blow away
1556 				 * the chunk!
1557 				 */
1558 				SCTPDBG(SCTP_DEBUG_XXX,
1559 				    "Duplicate to fsn: %u -- abort\n",
1560 				    at->rec.data.fsn);
1561 				sctp_abort_in_reasm(stcb, control,
1562 				    chk, abort_flag,
1563 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1564 				return;
1565 			}
1566 		}
1567 		if (inserted == 0) {
1568 			/* Goes on the end */
1569 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1570 			    chk->rec.data.fsn);
1571 			asoc->size_on_reasm_queue += chk->send_size;
1572 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1574 		}
1575 	}
1576 	/*
1577 	 * Ok lets see if we can suck any up into the control structure that
1578 	 * are in seq if it makes sense.
1579 	 */
1580 	do_wakeup = 0;
1581 	/*
1582 	 * If the first fragment has not been seen there is no sense in
1583 	 * looking.
1584 	 */
1585 	if (control->first_frag_seen) {
1586 		next_fsn = control->fsn_included + 1;
1587 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1588 			if (at->rec.data.fsn == next_fsn) {
1589 				/* We can add this one now to the control */
1590 				SCTPDBG(SCTP_DEBUG_XXX,
1591 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1592 				    control, at,
1593 				    at->rec.data.fsn,
1594 				    next_fsn, control->fsn_included);
1595 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1596 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1597 				if (control->on_read_q) {
1598 					do_wakeup = 1;
1599 				} else {
1600 					/*
1601 					 * We only add to the
1602 					 * size-on-all-streams if its not on
1603 					 * the read q. The read q flag will
1604 					 * cause a sballoc so its accounted
1605 					 * for there.
1606 					 */
1607 					asoc->size_on_all_streams += lenadded;
1608 				}
1609 				next_fsn++;
1610 				if (control->end_added && control->pdapi_started) {
1611 					if (strm->pd_api_started) {
1612 						strm->pd_api_started = 0;
1613 						control->pdapi_started = 0;
1614 					}
1615 					if (control->on_read_q == 0) {
1616 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1617 						    control,
1618 						    &stcb->sctp_socket->so_rcv, control->end_added,
1619 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1620 						do_wakeup = 1;
1621 					}
1622 					break;
1623 				}
1624 			} else {
1625 				break;
1626 			}
1627 		}
1628 	}
1629 	if (do_wakeup) {
1630 		/* Need to wakeup the reader */
1631 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1632 	}
1633 }
1634 
1635 static struct sctp_queued_to_read *
1636 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1637 {
1638 	struct sctp_queued_to_read *control;
1639 
1640 	if (ordered) {
1641 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1642 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1643 				break;
1644 			}
1645 		}
1646 	} else {
1647 		if (idata_supported) {
1648 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1649 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1650 					break;
1651 				}
1652 			}
1653 		} else {
1654 			control = TAILQ_FIRST(&strm->uno_inqueue);
1655 		}
1656 	}
1657 	return (control);
1658 }
1659 
1660 static int
1661 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1662     struct mbuf **m, int offset, int chk_length,
1663     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1664     int *break_flag, int last_chunk, uint8_t chk_type)
1665 {
1666 	/* Process a data chunk */
1667 	/* struct sctp_tmit_chunk *chk; */
1668 	struct sctp_tmit_chunk *chk;
1669 	uint32_t tsn, fsn, gap, mid;
1670 	struct mbuf *dmbuf;
1671 	int the_len;
1672 	int need_reasm_check = 0;
1673 	uint16_t sid;
1674 	struct mbuf *op_err;
1675 	char msg[SCTP_DIAG_INFO_LEN];
1676 	struct sctp_queued_to_read *control, *ncontrol;
1677 	uint32_t ppid;
1678 	uint8_t chk_flags;
1679 	struct sctp_stream_reset_list *liste;
1680 	int ordered;
1681 	size_t clen;
1682 	int created_control = 0;
1683 
1684 	if (chk_type == SCTP_IDATA) {
1685 		struct sctp_idata_chunk *chunk, chunk_buf;
1686 
1687 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1688 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1689 		chk_flags = chunk->ch.chunk_flags;
1690 		clen = sizeof(struct sctp_idata_chunk);
1691 		tsn = ntohl(chunk->dp.tsn);
1692 		sid = ntohs(chunk->dp.sid);
1693 		mid = ntohl(chunk->dp.mid);
1694 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1695 			fsn = 0;
1696 			ppid = chunk->dp.ppid_fsn.ppid;
1697 		} else {
1698 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1699 			ppid = 0xffffffff;	/* Use as an invalid value. */
1700 		}
1701 	} else {
1702 		struct sctp_data_chunk *chunk, chunk_buf;
1703 
1704 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1705 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1706 		chk_flags = chunk->ch.chunk_flags;
1707 		clen = sizeof(struct sctp_data_chunk);
1708 		tsn = ntohl(chunk->dp.tsn);
1709 		sid = ntohs(chunk->dp.sid);
1710 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1711 		fsn = tsn;
1712 		ppid = chunk->dp.ppid;
1713 	}
1714 	if ((size_t)chk_length == clen) {
1715 		/*
1716 		 * Need to send an abort since we had a empty data chunk.
1717 		 */
1718 		op_err = sctp_generate_no_user_data_cause(tsn);
1719 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1720 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1721 		*abort_flag = 1;
1722 		return (0);
1723 	}
1724 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1725 		asoc->send_sack = 1;
1726 	}
1727 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1728 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1729 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1730 	}
1731 	if (stcb == NULL) {
1732 		return (0);
1733 	}
1734 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1735 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1736 		/* It is a duplicate */
1737 		SCTP_STAT_INCR(sctps_recvdupdata);
1738 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1739 			/* Record a dup for the next outbound sack */
1740 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1741 			asoc->numduptsns++;
1742 		}
1743 		asoc->send_sack = 1;
1744 		return (0);
1745 	}
1746 	/* Calculate the number of TSN's between the base and this TSN */
1747 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1748 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1749 		/* Can't hold the bit in the mapping at max array, toss it */
1750 		return (0);
1751 	}
1752 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1753 		SCTP_TCB_LOCK_ASSERT(stcb);
1754 		if (sctp_expand_mapping_array(asoc, gap)) {
1755 			/* Can't expand, drop it */
1756 			return (0);
1757 		}
1758 	}
1759 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1760 		*high_tsn = tsn;
1761 	}
1762 	/* See if we have received this one already */
1763 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1764 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1765 		SCTP_STAT_INCR(sctps_recvdupdata);
1766 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1767 			/* Record a dup for the next outbound sack */
1768 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1769 			asoc->numduptsns++;
1770 		}
1771 		asoc->send_sack = 1;
1772 		return (0);
1773 	}
1774 	/*
1775 	 * Check to see about the GONE flag, duplicates would cause a sack
1776 	 * to be sent up above
1777 	 */
1778 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1779 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1780 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1781 		/*
1782 		 * wait a minute, this guy is gone, there is no longer a
1783 		 * receiver. Send peer an ABORT!
1784 		 */
1785 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1786 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1787 		*abort_flag = 1;
1788 		return (0);
1789 	}
1790 	/*
1791 	 * Now before going further we see if there is room. If NOT then we
1792 	 * MAY let one through only IF this TSN is the one we are waiting
1793 	 * for on a partial delivery API.
1794 	 */
1795 
1796 	/* Is the stream valid? */
1797 	if (sid >= asoc->streamincnt) {
1798 		struct sctp_error_invalid_stream *cause;
1799 
1800 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1801 		    0, M_NOWAIT, 1, MT_DATA);
1802 		if (op_err != NULL) {
1803 			/* add some space up front so prepend will work well */
1804 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1805 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1806 			/*
1807 			 * Error causes are just param's and this one has
1808 			 * two back to back phdr, one with the error type
1809 			 * and size, the other with the streamid and a rsvd
1810 			 */
1811 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1812 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1813 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1814 			cause->stream_id = htons(sid);
1815 			cause->reserved = htons(0);
1816 			sctp_queue_op_err(stcb, op_err);
1817 		}
1818 		SCTP_STAT_INCR(sctps_badsid);
1819 		SCTP_TCB_LOCK_ASSERT(stcb);
1820 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1821 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1822 			asoc->highest_tsn_inside_nr_map = tsn;
1823 		}
1824 		if (tsn == (asoc->cumulative_tsn + 1)) {
1825 			/* Update cum-ack */
1826 			asoc->cumulative_tsn = tsn;
1827 		}
1828 		return (0);
1829 	}
1830 	/*
1831 	 * If its a fragmented message, lets see if we can find the control
1832 	 * on the reassembly queues.
1833 	 */
1834 	if ((chk_type == SCTP_IDATA) &&
1835 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1836 	    (fsn == 0)) {
1837 		/*
1838 		 * The first *must* be fsn 0, and other (middle/end) pieces
1839 		 * can *not* be fsn 0. XXX: This can happen in case of a
1840 		 * wrap around. Ignore is for now.
1841 		 */
1842 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1843 		    mid, chk_flags);
1844 		goto err_out;
1845 	}
1846 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1847 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1848 	    chk_flags, control);
1849 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1850 		/* See if we can find the re-assembly entity */
1851 		if (control != NULL) {
1852 			/* We found something, does it belong? */
1853 			if (ordered && (mid != control->mid)) {
1854 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1855 		err_out:
1856 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1857 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1858 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1859 				*abort_flag = 1;
1860 				return (0);
1861 			}
1862 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1863 				/*
1864 				 * We can't have a switched order with an
1865 				 * unordered chunk
1866 				 */
1867 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1868 				    tsn);
1869 				goto err_out;
1870 			}
1871 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1872 				/*
1873 				 * We can't have a switched unordered with a
1874 				 * ordered chunk
1875 				 */
1876 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1877 				    tsn);
1878 				goto err_out;
1879 			}
1880 		}
1881 	} else {
1882 		/*
1883 		 * Its a complete segment. Lets validate we don't have a
1884 		 * re-assembly going on with the same Stream/Seq (for
1885 		 * ordered) or in the same Stream for unordered.
1886 		 */
1887 		if (control != NULL) {
1888 			if (ordered || asoc->idata_supported) {
1889 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1890 				    chk_flags, mid);
1891 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1892 				goto err_out;
1893 			} else {
1894 				if ((tsn == control->fsn_included + 1) &&
1895 				    (control->end_added == 0)) {
1896 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1897 					goto err_out;
1898 				} else {
1899 					control = NULL;
1900 				}
1901 			}
1902 		}
1903 	}
1904 	/* now do the tests */
1905 	if (((asoc->cnt_on_all_streams +
1906 	    asoc->cnt_on_reasm_queue +
1907 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1908 	    (((int)asoc->my_rwnd) <= 0)) {
1909 		/*
1910 		 * When we have NO room in the rwnd we check to make sure
1911 		 * the reader is doing its job...
1912 		 */
1913 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1914 			/* some to read, wake-up */
1915 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1916 			struct socket *so;
1917 
1918 			so = SCTP_INP_SO(stcb->sctp_ep);
1919 			atomic_add_int(&stcb->asoc.refcnt, 1);
1920 			SCTP_TCB_UNLOCK(stcb);
1921 			SCTP_SOCKET_LOCK(so, 1);
1922 			SCTP_TCB_LOCK(stcb);
1923 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1924 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1925 				/* assoc was freed while we were unlocked */
1926 				SCTP_SOCKET_UNLOCK(so, 1);
1927 				return (0);
1928 			}
1929 #endif
1930 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1931 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1932 			SCTP_SOCKET_UNLOCK(so, 1);
1933 #endif
1934 		}
1935 		/* now is it in the mapping array of what we have accepted? */
1936 		if (chk_type == SCTP_DATA) {
1937 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1938 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1939 				/* Nope not in the valid range dump it */
1940 		dump_packet:
1941 				sctp_set_rwnd(stcb, asoc);
1942 				if ((asoc->cnt_on_all_streams +
1943 				    asoc->cnt_on_reasm_queue +
1944 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1945 					SCTP_STAT_INCR(sctps_datadropchklmt);
1946 				} else {
1947 					SCTP_STAT_INCR(sctps_datadroprwnd);
1948 				}
1949 				*break_flag = 1;
1950 				return (0);
1951 			}
1952 		} else {
1953 			if (control == NULL) {
1954 				goto dump_packet;
1955 			}
1956 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1957 				goto dump_packet;
1958 			}
1959 		}
1960 	}
1961 #ifdef SCTP_ASOCLOG_OF_TSNS
1962 	SCTP_TCB_LOCK_ASSERT(stcb);
1963 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1964 		asoc->tsn_in_at = 0;
1965 		asoc->tsn_in_wrapped = 1;
1966 	}
1967 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1968 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1969 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1970 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1971 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1972 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1973 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1974 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1975 	asoc->tsn_in_at++;
1976 #endif
1977 	/*
1978 	 * Before we continue lets validate that we are not being fooled by
1979 	 * an evil attacker. We can only have Nk chunks based on our TSN
1980 	 * spread allowed by the mapping array N * 8 bits, so there is no
1981 	 * way our stream sequence numbers could have wrapped. We of course
1982 	 * only validate the FIRST fragment so the bit must be set.
1983 	 */
1984 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1985 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1986 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1987 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1988 		/* The incoming sseq is behind where we last delivered? */
1989 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1990 		    mid, asoc->strmin[sid].last_mid_delivered);
1991 
1992 		if (asoc->idata_supported) {
1993 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1994 			    asoc->strmin[sid].last_mid_delivered,
1995 			    tsn,
1996 			    sid,
1997 			    mid);
1998 		} else {
1999 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2000 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2001 			    tsn,
2002 			    sid,
2003 			    (uint16_t)mid);
2004 		}
2005 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2006 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2007 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2008 		*abort_flag = 1;
2009 		return (0);
2010 	}
2011 	if (chk_type == SCTP_IDATA) {
2012 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2013 	} else {
2014 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2015 	}
2016 	if (last_chunk == 0) {
2017 		if (chk_type == SCTP_IDATA) {
2018 			dmbuf = SCTP_M_COPYM(*m,
2019 			    (offset + sizeof(struct sctp_idata_chunk)),
2020 			    the_len, M_NOWAIT);
2021 		} else {
2022 			dmbuf = SCTP_M_COPYM(*m,
2023 			    (offset + sizeof(struct sctp_data_chunk)),
2024 			    the_len, M_NOWAIT);
2025 		}
2026 #ifdef SCTP_MBUF_LOGGING
2027 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2028 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2029 		}
2030 #endif
2031 	} else {
2032 		/* We can steal the last chunk */
2033 		int l_len;
2034 
2035 		dmbuf = *m;
2036 		/* lop off the top part */
2037 		if (chk_type == SCTP_IDATA) {
2038 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2039 		} else {
2040 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2041 		}
2042 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2043 			l_len = SCTP_BUF_LEN(dmbuf);
2044 		} else {
2045 			/*
2046 			 * need to count up the size hopefully does not hit
2047 			 * this to often :-0
2048 			 */
2049 			struct mbuf *lat;
2050 
2051 			l_len = 0;
2052 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2053 				l_len += SCTP_BUF_LEN(lat);
2054 			}
2055 		}
2056 		if (l_len > the_len) {
2057 			/* Trim the end round bytes off  too */
2058 			m_adj(dmbuf, -(l_len - the_len));
2059 		}
2060 	}
2061 	if (dmbuf == NULL) {
2062 		SCTP_STAT_INCR(sctps_nomem);
2063 		return (0);
2064 	}
2065 	/*
2066 	 * Now no matter what, we need a control, get one if we don't have
2067 	 * one (we may have gotten it above when we found the message was
2068 	 * fragmented
2069 	 */
2070 	if (control == NULL) {
2071 		sctp_alloc_a_readq(stcb, control);
2072 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2073 		    ppid,
2074 		    sid,
2075 		    chk_flags,
2076 		    NULL, fsn, mid);
2077 		if (control == NULL) {
2078 			SCTP_STAT_INCR(sctps_nomem);
2079 			return (0);
2080 		}
2081 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2082 			struct mbuf *mm;
2083 
2084 			control->data = dmbuf;
2085 			for (mm = control->data; mm; mm = mm->m_next) {
2086 				control->length += SCTP_BUF_LEN(mm);
2087 			}
2088 			control->tail_mbuf = NULL;
2089 			control->end_added = 1;
2090 			control->last_frag_seen = 1;
2091 			control->first_frag_seen = 1;
2092 			control->fsn_included = fsn;
2093 			control->top_fsn = fsn;
2094 		}
2095 		created_control = 1;
2096 	}
2097 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2098 	    chk_flags, ordered, mid, control);
2099 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2100 	    TAILQ_EMPTY(&asoc->resetHead) &&
2101 	    ((ordered == 0) ||
2102 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2103 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2104 		/* Candidate for express delivery */
2105 		/*
2106 		 * Its not fragmented, No PD-API is up, Nothing in the
2107 		 * delivery queue, Its un-ordered OR ordered and the next to
2108 		 * deliver AND nothing else is stuck on the stream queue,
2109 		 * And there is room for it in the socket buffer. Lets just
2110 		 * stuff it up the buffer....
2111 		 */
2112 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2113 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2114 			asoc->highest_tsn_inside_nr_map = tsn;
2115 		}
2116 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2117 		    control, mid);
2118 
2119 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2120 		    control, &stcb->sctp_socket->so_rcv,
2121 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2122 
2123 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2124 			/* for ordered, bump what we delivered */
2125 			asoc->strmin[sid].last_mid_delivered++;
2126 		}
2127 		SCTP_STAT_INCR(sctps_recvexpress);
2128 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2130 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2131 		}
2132 		control = NULL;
2133 		goto finish_express_del;
2134 	}
2135 	/* Now will we need a chunk too? */
2136 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2137 		sctp_alloc_a_chunk(stcb, chk);
2138 		if (chk == NULL) {
2139 			/* No memory so we drop the chunk */
2140 			SCTP_STAT_INCR(sctps_nomem);
2141 			if (last_chunk == 0) {
2142 				/* we copied it, free the copy */
2143 				sctp_m_freem(dmbuf);
2144 			}
2145 			return (0);
2146 		}
2147 		chk->rec.data.tsn = tsn;
2148 		chk->no_fr_allowed = 0;
2149 		chk->rec.data.fsn = fsn;
2150 		chk->rec.data.mid = mid;
2151 		chk->rec.data.sid = sid;
2152 		chk->rec.data.ppid = ppid;
2153 		chk->rec.data.context = stcb->asoc.context;
2154 		chk->rec.data.doing_fast_retransmit = 0;
2155 		chk->rec.data.rcv_flags = chk_flags;
2156 		chk->asoc = asoc;
2157 		chk->send_size = the_len;
2158 		chk->whoTo = net;
2159 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2160 		    chk,
2161 		    control, mid);
2162 		atomic_add_int(&net->ref_count, 1);
2163 		chk->data = dmbuf;
2164 	}
2165 	/* Set the appropriate TSN mark */
2166 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2167 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2168 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2169 			asoc->highest_tsn_inside_nr_map = tsn;
2170 		}
2171 	} else {
2172 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2173 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2174 			asoc->highest_tsn_inside_map = tsn;
2175 		}
2176 	}
2177 	/* Now is it complete (i.e. not fragmented)? */
2178 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2179 		/*
2180 		 * Special check for when streams are resetting. We could be
2181 		 * more smart about this and check the actual stream to see
2182 		 * if it is not being reset.. that way we would not create a
2183 		 * HOLB when amongst streams being reset and those not being
2184 		 * reset.
2185 		 *
2186 		 */
2187 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2188 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2189 			/*
2190 			 * yep its past where we need to reset... go ahead
2191 			 * and queue it.
2192 			 */
2193 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2194 				/* first one on */
2195 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2196 			} else {
2197 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2198 				unsigned char inserted = 0;
2199 
2200 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2201 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2202 
2203 						continue;
2204 					} else {
2205 						/* found it */
2206 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2207 						inserted = 1;
2208 						break;
2209 					}
2210 				}
2211 				if (inserted == 0) {
2212 					/*
2213 					 * must be put at end, use prevP
2214 					 * (all setup from loop) to setup
2215 					 * nextP.
2216 					 */
2217 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2218 				}
2219 			}
2220 			goto finish_express_del;
2221 		}
2222 		if (chk_flags & SCTP_DATA_UNORDERED) {
2223 			/* queue directly into socket buffer */
2224 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2225 			    control, mid);
2226 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2227 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2228 			    control,
2229 			    &stcb->sctp_socket->so_rcv, 1,
2230 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2231 
2232 		} else {
2233 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2234 			    mid);
2235 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2236 			if (*abort_flag) {
2237 				if (last_chunk) {
2238 					*m = NULL;
2239 				}
2240 				return (0);
2241 			}
2242 		}
2243 		goto finish_express_del;
2244 	}
2245 	/* If we reach here its a reassembly */
2246 	need_reasm_check = 1;
2247 	SCTPDBG(SCTP_DEBUG_XXX,
2248 	    "Queue data to stream for reasm control: %p MID: %u\n",
2249 	    control, mid);
2250 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2251 	if (*abort_flag) {
2252 		/*
2253 		 * the assoc is now gone and chk was put onto the reasm
2254 		 * queue, which has all been freed.
2255 		 */
2256 		if (last_chunk) {
2257 			*m = NULL;
2258 		}
2259 		return (0);
2260 	}
2261 finish_express_del:
2262 	/* Here we tidy up things */
2263 	if (tsn == (asoc->cumulative_tsn + 1)) {
2264 		/* Update cum-ack */
2265 		asoc->cumulative_tsn = tsn;
2266 	}
2267 	if (last_chunk) {
2268 		*m = NULL;
2269 	}
2270 	if (ordered) {
2271 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2272 	} else {
2273 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2274 	}
2275 	SCTP_STAT_INCR(sctps_recvdata);
2276 	/* Set it present please */
2277 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2278 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2279 	}
2280 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2281 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2282 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2283 	}
2284 	if (need_reasm_check) {
2285 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2286 		need_reasm_check = 0;
2287 	}
2288 	/* check the special flag for stream resets */
2289 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2290 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2291 		/*
2292 		 * we have finished working through the backlogged TSN's now
2293 		 * time to reset streams. 1: call reset function. 2: free
2294 		 * pending_reply space 3: distribute any chunks in
2295 		 * pending_reply_queue.
2296 		 */
2297 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2298 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2299 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2300 		SCTP_FREE(liste, SCTP_M_STRESET);
2301 		/* sa_ignore FREED_MEMORY */
2302 		liste = TAILQ_FIRST(&asoc->resetHead);
2303 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2304 			/* All can be removed */
2305 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2306 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2307 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2308 				if (*abort_flag) {
2309 					return (0);
2310 				}
2311 				if (need_reasm_check) {
2312 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2313 					need_reasm_check = 0;
2314 				}
2315 			}
2316 		} else {
2317 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2318 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2319 					break;
2320 				}
2321 				/*
2322 				 * if control->sinfo_tsn is <= liste->tsn we
2323 				 * can process it which is the NOT of
2324 				 * control->sinfo_tsn > liste->tsn
2325 				 */
2326 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2327 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2328 				if (*abort_flag) {
2329 					return (0);
2330 				}
2331 				if (need_reasm_check) {
2332 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2333 					need_reasm_check = 0;
2334 				}
2335 			}
2336 		}
2337 	}
2338 	return (1);
2339 }
2340 
2341 static const int8_t sctp_map_lookup_tab[256] = {
2342 	0, 1, 0, 2, 0, 1, 0, 3,
2343 	0, 1, 0, 2, 0, 1, 0, 4,
2344 	0, 1, 0, 2, 0, 1, 0, 3,
2345 	0, 1, 0, 2, 0, 1, 0, 5,
2346 	0, 1, 0, 2, 0, 1, 0, 3,
2347 	0, 1, 0, 2, 0, 1, 0, 4,
2348 	0, 1, 0, 2, 0, 1, 0, 3,
2349 	0, 1, 0, 2, 0, 1, 0, 6,
2350 	0, 1, 0, 2, 0, 1, 0, 3,
2351 	0, 1, 0, 2, 0, 1, 0, 4,
2352 	0, 1, 0, 2, 0, 1, 0, 3,
2353 	0, 1, 0, 2, 0, 1, 0, 5,
2354 	0, 1, 0, 2, 0, 1, 0, 3,
2355 	0, 1, 0, 2, 0, 1, 0, 4,
2356 	0, 1, 0, 2, 0, 1, 0, 3,
2357 	0, 1, 0, 2, 0, 1, 0, 7,
2358 	0, 1, 0, 2, 0, 1, 0, 3,
2359 	0, 1, 0, 2, 0, 1, 0, 4,
2360 	0, 1, 0, 2, 0, 1, 0, 3,
2361 	0, 1, 0, 2, 0, 1, 0, 5,
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 4,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 6,
2366 	0, 1, 0, 2, 0, 1, 0, 3,
2367 	0, 1, 0, 2, 0, 1, 0, 4,
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 5,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 4,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 8
2374 };
2375 
2376 
2377 void
2378 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2379 {
2380 	/*
2381 	 * Now we also need to check the mapping array in a couple of ways.
2382 	 * 1) Did we move the cum-ack point?
2383 	 *
2384 	 * When you first glance at this you might think that all entries
2385 	 * that make up the position of the cum-ack would be in the
2386 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2387 	 * deliverable. Thats true with one exception, when its a fragmented
2388 	 * message we may not deliver the data until some threshold (or all
2389 	 * of it) is in place. So we must OR the nr_mapping_array and
2390 	 * mapping_array to get a true picture of the cum-ack.
2391 	 */
2392 	struct sctp_association *asoc;
2393 	int at;
2394 	uint8_t val;
2395 	int slide_from, slide_end, lgap, distance;
2396 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2397 
2398 	asoc = &stcb->asoc;
2399 
2400 	old_cumack = asoc->cumulative_tsn;
2401 	old_base = asoc->mapping_array_base_tsn;
2402 	old_highest = asoc->highest_tsn_inside_map;
2403 	/*
2404 	 * We could probably improve this a small bit by calculating the
2405 	 * offset of the current cum-ack as the starting point.
2406 	 */
2407 	at = 0;
2408 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2409 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2410 		if (val == 0xff) {
2411 			at += 8;
2412 		} else {
2413 			/* there is a 0 bit */
2414 			at += sctp_map_lookup_tab[val];
2415 			break;
2416 		}
2417 	}
2418 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2419 
2420 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2421 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2422 #ifdef INVARIANTS
2423 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2424 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2425 #else
2426 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2427 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2428 		sctp_print_mapping_array(asoc);
2429 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2430 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2431 		}
2432 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2433 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2434 #endif
2435 	}
2436 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2437 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2438 	} else {
2439 		highest_tsn = asoc->highest_tsn_inside_map;
2440 	}
2441 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2442 		/* The complete array was completed by a single FR */
2443 		/* highest becomes the cum-ack */
2444 		int clr;
2445 #ifdef INVARIANTS
2446 		unsigned int i;
2447 #endif
2448 
2449 		/* clear the array */
2450 		clr = ((at + 7) >> 3);
2451 		if (clr > asoc->mapping_array_size) {
2452 			clr = asoc->mapping_array_size;
2453 		}
2454 		memset(asoc->mapping_array, 0, clr);
2455 		memset(asoc->nr_mapping_array, 0, clr);
2456 #ifdef INVARIANTS
2457 		for (i = 0; i < asoc->mapping_array_size; i++) {
2458 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2459 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2460 				sctp_print_mapping_array(asoc);
2461 			}
2462 		}
2463 #endif
2464 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2465 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2466 	} else if (at >= 8) {
2467 		/* we can slide the mapping array down */
2468 		/* slide_from holds where we hit the first NON 0xff byte */
2469 
2470 		/*
2471 		 * now calculate the ceiling of the move using our highest
2472 		 * TSN value
2473 		 */
2474 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2475 		slide_end = (lgap >> 3);
2476 		if (slide_end < slide_from) {
2477 			sctp_print_mapping_array(asoc);
2478 #ifdef INVARIANTS
2479 			panic("impossible slide");
2480 #else
2481 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2482 			    lgap, slide_end, slide_from, at);
2483 			return;
2484 #endif
2485 		}
2486 		if (slide_end > asoc->mapping_array_size) {
2487 #ifdef INVARIANTS
2488 			panic("would overrun buffer");
2489 #else
2490 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2491 			    asoc->mapping_array_size, slide_end);
2492 			slide_end = asoc->mapping_array_size;
2493 #endif
2494 		}
2495 		distance = (slide_end - slide_from) + 1;
2496 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2497 			sctp_log_map(old_base, old_cumack, old_highest,
2498 			    SCTP_MAP_PREPARE_SLIDE);
2499 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2500 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2501 		}
2502 		if (distance + slide_from > asoc->mapping_array_size ||
2503 		    distance < 0) {
2504 			/*
2505 			 * Here we do NOT slide forward the array so that
2506 			 * hopefully when more data comes in to fill it up
2507 			 * we will be able to slide it forward. Really I
2508 			 * don't think this should happen :-0
2509 			 */
2510 
2511 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2512 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2513 				    (uint32_t)asoc->mapping_array_size,
2514 				    SCTP_MAP_SLIDE_NONE);
2515 			}
2516 		} else {
2517 			int ii;
2518 
2519 			for (ii = 0; ii < distance; ii++) {
2520 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2521 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2522 
2523 			}
2524 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2525 				asoc->mapping_array[ii] = 0;
2526 				asoc->nr_mapping_array[ii] = 0;
2527 			}
2528 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2529 				asoc->highest_tsn_inside_map += (slide_from << 3);
2530 			}
2531 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2532 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2533 			}
2534 			asoc->mapping_array_base_tsn += (slide_from << 3);
2535 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2536 				sctp_log_map(asoc->mapping_array_base_tsn,
2537 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2538 				    SCTP_MAP_SLIDE_RESULT);
2539 			}
2540 		}
2541 	}
2542 }
2543 
2544 void
2545 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2546 {
2547 	struct sctp_association *asoc;
2548 	uint32_t highest_tsn;
2549 	int is_a_gap;
2550 
2551 	sctp_slide_mapping_arrays(stcb);
2552 	asoc = &stcb->asoc;
2553 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2554 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2555 	} else {
2556 		highest_tsn = asoc->highest_tsn_inside_map;
2557 	}
2558 	/* Is there a gap now? */
2559 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2560 
2561 	/*
2562 	 * Now we need to see if we need to queue a sack or just start the
2563 	 * timer (if allowed).
2564 	 */
2565 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2566 		/*
2567 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2568 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2569 		 * SACK
2570 		 */
2571 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2572 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2573 			    stcb->sctp_ep, stcb, NULL,
2574 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2575 		}
2576 		sctp_send_shutdown(stcb,
2577 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2578 		if (is_a_gap) {
2579 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2580 		}
2581 	} else {
2582 		/*
2583 		 * CMT DAC algorithm: increase number of packets received
2584 		 * since last ack
2585 		 */
2586 		stcb->asoc.cmt_dac_pkts_rcvd++;
2587 
2588 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2589 							 * SACK */
2590 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2591 							 * longer is one */
2592 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2593 		    (is_a_gap) ||	/* is still a gap */
2594 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2595 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2596 		    ) {
2597 
2598 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2599 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2600 			    (stcb->asoc.send_sack == 0) &&
2601 			    (stcb->asoc.numduptsns == 0) &&
2602 			    (stcb->asoc.delayed_ack) &&
2603 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2604 
2605 				/*
2606 				 * CMT DAC algorithm: With CMT, delay acks
2607 				 * even in the face of
2608 				 *
2609 				 * reordering. Therefore, if acks that do
2610 				 * not have to be sent because of the above
2611 				 * reasons, will be delayed. That is, acks
2612 				 * that would have been sent due to gap
2613 				 * reports will be delayed with DAC. Start
2614 				 * the delayed ack timer.
2615 				 */
2616 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2617 				    stcb->sctp_ep, stcb, NULL);
2618 			} else {
2619 				/*
2620 				 * Ok we must build a SACK since the timer
2621 				 * is pending, we got our first packet OR
2622 				 * there are gaps or duplicates.
2623 				 */
2624 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2625 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2626 			}
2627 		} else {
2628 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2629 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2630 				    stcb->sctp_ep, stcb, NULL);
2631 			}
2632 		}
2633 	}
2634 }
2635 
2636 int
2637 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2638     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2639     struct sctp_nets *net, uint32_t *high_tsn)
2640 {
2641 	struct sctp_chunkhdr *ch, chunk_buf;
2642 	struct sctp_association *asoc;
2643 	int num_chunks = 0;	/* number of control chunks processed */
2644 	int stop_proc = 0;
2645 	int chk_length, break_flag, last_chunk;
2646 	int abort_flag = 0, was_a_gap;
2647 	struct mbuf *m;
2648 	uint32_t highest_tsn;
2649 
2650 	/* set the rwnd */
2651 	sctp_set_rwnd(stcb, &stcb->asoc);
2652 
2653 	m = *mm;
2654 	SCTP_TCB_LOCK_ASSERT(stcb);
2655 	asoc = &stcb->asoc;
2656 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2657 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2658 	} else {
2659 		highest_tsn = asoc->highest_tsn_inside_map;
2660 	}
2661 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2662 	/*
2663 	 * setup where we got the last DATA packet from for any SACK that
2664 	 * may need to go out. Don't bump the net. This is done ONLY when a
2665 	 * chunk is assigned.
2666 	 */
2667 	asoc->last_data_chunk_from = net;
2668 
2669 	/*-
2670 	 * Now before we proceed we must figure out if this is a wasted
2671 	 * cluster... i.e. it is a small packet sent in and yet the driver
2672 	 * underneath allocated a full cluster for it. If so we must copy it
2673 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2674 	 * with cluster starvation. Note for __Panda__ we don't do this
2675 	 * since it has clusters all the way down to 64 bytes.
2676 	 */
2677 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2678 		/* we only handle mbufs that are singletons.. not chains */
2679 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2680 		if (m) {
2681 			/* ok lets see if we can copy the data up */
2682 			caddr_t *from, *to;
2683 
2684 			/* get the pointers and copy */
2685 			to = mtod(m, caddr_t *);
2686 			from = mtod((*mm), caddr_t *);
2687 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2688 			/* copy the length and free up the old */
2689 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2690 			sctp_m_freem(*mm);
2691 			/* success, back copy */
2692 			*mm = m;
2693 		} else {
2694 			/* We are in trouble in the mbuf world .. yikes */
2695 			m = *mm;
2696 		}
2697 	}
2698 	/* get pointer to the first chunk header */
2699 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2700 	    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2701 	if (ch == NULL) {
2702 		return (1);
2703 	}
2704 	/*
2705 	 * process all DATA chunks...
2706 	 */
2707 	*high_tsn = asoc->cumulative_tsn;
2708 	break_flag = 0;
2709 	asoc->data_pkts_seen++;
2710 	while (stop_proc == 0) {
2711 		/* validate chunk length */
2712 		chk_length = ntohs(ch->chunk_length);
2713 		if (length - *offset < chk_length) {
2714 			/* all done, mutulated chunk */
2715 			stop_proc = 1;
2716 			continue;
2717 		}
2718 		if ((asoc->idata_supported == 1) &&
2719 		    (ch->chunk_type == SCTP_DATA)) {
2720 			struct mbuf *op_err;
2721 			char msg[SCTP_DIAG_INFO_LEN];
2722 
2723 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2724 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2725 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2726 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2727 			return (2);
2728 		}
2729 		if ((asoc->idata_supported == 0) &&
2730 		    (ch->chunk_type == SCTP_IDATA)) {
2731 			struct mbuf *op_err;
2732 			char msg[SCTP_DIAG_INFO_LEN];
2733 
2734 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2735 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2736 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2737 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2738 			return (2);
2739 		}
2740 		if ((ch->chunk_type == SCTP_DATA) ||
2741 		    (ch->chunk_type == SCTP_IDATA)) {
2742 			int clen;
2743 
2744 			if (ch->chunk_type == SCTP_DATA) {
2745 				clen = sizeof(struct sctp_data_chunk);
2746 			} else {
2747 				clen = sizeof(struct sctp_idata_chunk);
2748 			}
2749 			if (chk_length < clen) {
2750 				/*
2751 				 * Need to send an abort since we had a
2752 				 * invalid data chunk.
2753 				 */
2754 				struct mbuf *op_err;
2755 				char msg[SCTP_DIAG_INFO_LEN];
2756 
2757 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2758 				    chk_length);
2759 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2760 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2761 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2762 				return (2);
2763 			}
2764 #ifdef SCTP_AUDITING_ENABLED
2765 			sctp_audit_log(0xB1, 0);
2766 #endif
2767 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2768 				last_chunk = 1;
2769 			} else {
2770 				last_chunk = 0;
2771 			}
2772 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2773 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2774 			    last_chunk, ch->chunk_type)) {
2775 				num_chunks++;
2776 			}
2777 			if (abort_flag)
2778 				return (2);
2779 
2780 			if (break_flag) {
2781 				/*
2782 				 * Set because of out of rwnd space and no
2783 				 * drop rep space left.
2784 				 */
2785 				stop_proc = 1;
2786 				continue;
2787 			}
2788 		} else {
2789 			/* not a data chunk in the data region */
2790 			switch (ch->chunk_type) {
2791 			case SCTP_INITIATION:
2792 			case SCTP_INITIATION_ACK:
2793 			case SCTP_SELECTIVE_ACK:
2794 			case SCTP_NR_SELECTIVE_ACK:
2795 			case SCTP_HEARTBEAT_REQUEST:
2796 			case SCTP_HEARTBEAT_ACK:
2797 			case SCTP_ABORT_ASSOCIATION:
2798 			case SCTP_SHUTDOWN:
2799 			case SCTP_SHUTDOWN_ACK:
2800 			case SCTP_OPERATION_ERROR:
2801 			case SCTP_COOKIE_ECHO:
2802 			case SCTP_COOKIE_ACK:
2803 			case SCTP_ECN_ECHO:
2804 			case SCTP_ECN_CWR:
2805 			case SCTP_SHUTDOWN_COMPLETE:
2806 			case SCTP_AUTHENTICATION:
2807 			case SCTP_ASCONF_ACK:
2808 			case SCTP_PACKET_DROPPED:
2809 			case SCTP_STREAM_RESET:
2810 			case SCTP_FORWARD_CUM_TSN:
2811 			case SCTP_ASCONF:
2812 				{
2813 					/*
2814 					 * Now, what do we do with KNOWN
2815 					 * chunks that are NOT in the right
2816 					 * place?
2817 					 *
2818 					 * For now, I do nothing but ignore
2819 					 * them. We may later want to add
2820 					 * sysctl stuff to switch out and do
2821 					 * either an ABORT() or possibly
2822 					 * process them.
2823 					 */
2824 					struct mbuf *op_err;
2825 					char msg[SCTP_DIAG_INFO_LEN];
2826 
2827 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2828 					    ch->chunk_type);
2829 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2830 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2831 					return (2);
2832 				}
2833 			default:
2834 				/* unknown chunk type, use bit rules */
2835 				if (ch->chunk_type & 0x40) {
2836 					/* Add a error report to the queue */
2837 					struct mbuf *op_err;
2838 					struct sctp_gen_error_cause *cause;
2839 
2840 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2841 					    0, M_NOWAIT, 1, MT_DATA);
2842 					if (op_err != NULL) {
2843 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2844 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2845 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2846 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2847 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2848 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2849 							sctp_queue_op_err(stcb, op_err);
2850 						} else {
2851 							sctp_m_freem(op_err);
2852 						}
2853 					}
2854 				}
2855 				if ((ch->chunk_type & 0x80) == 0) {
2856 					/* discard the rest of this packet */
2857 					stop_proc = 1;
2858 				}	/* else skip this bad chunk and
2859 					 * continue... */
2860 				break;
2861 			}	/* switch of chunk type */
2862 		}
2863 		*offset += SCTP_SIZE32(chk_length);
2864 		if ((*offset >= length) || stop_proc) {
2865 			/* no more data left in the mbuf chain */
2866 			stop_proc = 1;
2867 			continue;
2868 		}
2869 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2870 		    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2871 		if (ch == NULL) {
2872 			*offset = length;
2873 			stop_proc = 1;
2874 			continue;
2875 		}
2876 	}
2877 	if (break_flag) {
2878 		/*
2879 		 * we need to report rwnd overrun drops.
2880 		 */
2881 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2882 	}
2883 	if (num_chunks) {
2884 		/*
2885 		 * Did we get data, if so update the time for auto-close and
2886 		 * give peer credit for being alive.
2887 		 */
2888 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2889 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2890 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2891 			    stcb->asoc.overall_error_count,
2892 			    0,
2893 			    SCTP_FROM_SCTP_INDATA,
2894 			    __LINE__);
2895 		}
2896 		stcb->asoc.overall_error_count = 0;
2897 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2898 	}
2899 	/* now service all of the reassm queue if needed */
2900 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2901 		/* Assure that we ack right away */
2902 		stcb->asoc.send_sack = 1;
2903 	}
2904 	/* Start a sack timer or QUEUE a SACK for sending */
2905 	sctp_sack_check(stcb, was_a_gap);
2906 	return (0);
2907 }
2908 
2909 static int
2910 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2911     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2912     int *num_frs,
2913     uint32_t *biggest_newly_acked_tsn,
2914     uint32_t *this_sack_lowest_newack,
2915     int *rto_ok)
2916 {
2917 	struct sctp_tmit_chunk *tp1;
2918 	unsigned int theTSN;
2919 	int j, wake_him = 0, circled = 0;
2920 
2921 	/* Recover the tp1 we last saw */
2922 	tp1 = *p_tp1;
2923 	if (tp1 == NULL) {
2924 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2925 	}
2926 	for (j = frag_strt; j <= frag_end; j++) {
2927 		theTSN = j + last_tsn;
2928 		while (tp1) {
2929 			if (tp1->rec.data.doing_fast_retransmit)
2930 				(*num_frs) += 1;
2931 
2932 			/*-
2933 			 * CMT: CUCv2 algorithm. For each TSN being
2934 			 * processed from the sent queue, track the
2935 			 * next expected pseudo-cumack, or
2936 			 * rtx_pseudo_cumack, if required. Separate
2937 			 * cumack trackers for first transmissions,
2938 			 * and retransmissions.
2939 			 */
2940 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2941 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2942 			    (tp1->snd_count == 1)) {
2943 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2944 				tp1->whoTo->find_pseudo_cumack = 0;
2945 			}
2946 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2947 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2948 			    (tp1->snd_count > 1)) {
2949 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2950 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2951 			}
2952 			if (tp1->rec.data.tsn == theTSN) {
2953 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2954 					/*-
2955 					 * must be held until
2956 					 * cum-ack passes
2957 					 */
2958 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2959 						/*-
2960 						 * If it is less than RESEND, it is
2961 						 * now no-longer in flight.
2962 						 * Higher values may already be set
2963 						 * via previous Gap Ack Blocks...
2964 						 * i.e. ACKED or RESEND.
2965 						 */
2966 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2967 						    *biggest_newly_acked_tsn)) {
2968 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2969 						}
2970 						/*-
2971 						 * CMT: SFR algo (and HTNA) - set
2972 						 * saw_newack to 1 for dest being
2973 						 * newly acked. update
2974 						 * this_sack_highest_newack if
2975 						 * appropriate.
2976 						 */
2977 						if (tp1->rec.data.chunk_was_revoked == 0)
2978 							tp1->whoTo->saw_newack = 1;
2979 
2980 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2981 						    tp1->whoTo->this_sack_highest_newack)) {
2982 							tp1->whoTo->this_sack_highest_newack =
2983 							    tp1->rec.data.tsn;
2984 						}
2985 						/*-
2986 						 * CMT DAC algo: also update
2987 						 * this_sack_lowest_newack
2988 						 */
2989 						if (*this_sack_lowest_newack == 0) {
2990 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2991 								sctp_log_sack(*this_sack_lowest_newack,
2992 								    last_tsn,
2993 								    tp1->rec.data.tsn,
2994 								    0,
2995 								    0,
2996 								    SCTP_LOG_TSN_ACKED);
2997 							}
2998 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2999 						}
3000 						/*-
3001 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3002 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3003 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3004 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3005 						 * Separate pseudo_cumack trackers for first transmissions and
3006 						 * retransmissions.
3007 						 */
3008 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3009 							if (tp1->rec.data.chunk_was_revoked == 0) {
3010 								tp1->whoTo->new_pseudo_cumack = 1;
3011 							}
3012 							tp1->whoTo->find_pseudo_cumack = 1;
3013 						}
3014 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3015 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3016 						}
3017 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3018 							if (tp1->rec.data.chunk_was_revoked == 0) {
3019 								tp1->whoTo->new_pseudo_cumack = 1;
3020 							}
3021 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3022 						}
3023 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3024 							sctp_log_sack(*biggest_newly_acked_tsn,
3025 							    last_tsn,
3026 							    tp1->rec.data.tsn,
3027 							    frag_strt,
3028 							    frag_end,
3029 							    SCTP_LOG_TSN_ACKED);
3030 						}
3031 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3032 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3033 							    tp1->whoTo->flight_size,
3034 							    tp1->book_size,
3035 							    (uint32_t)(uintptr_t)tp1->whoTo,
3036 							    tp1->rec.data.tsn);
3037 						}
3038 						sctp_flight_size_decrease(tp1);
3039 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3040 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3041 							    tp1);
3042 						}
3043 						sctp_total_flight_decrease(stcb, tp1);
3044 
3045 						tp1->whoTo->net_ack += tp1->send_size;
3046 						if (tp1->snd_count < 2) {
3047 							/*-
3048 							 * True non-retransmited chunk
3049 							 */
3050 							tp1->whoTo->net_ack2 += tp1->send_size;
3051 
3052 							/*-
3053 							 * update RTO too ?
3054 							 */
3055 							if (tp1->do_rtt) {
3056 								if (*rto_ok) {
3057 									tp1->whoTo->RTO =
3058 									    sctp_calculate_rto(stcb,
3059 									    &stcb->asoc,
3060 									    tp1->whoTo,
3061 									    &tp1->sent_rcv_time,
3062 									    sctp_align_safe_nocopy,
3063 									    SCTP_RTT_FROM_DATA);
3064 									*rto_ok = 0;
3065 								}
3066 								if (tp1->whoTo->rto_needed == 0) {
3067 									tp1->whoTo->rto_needed = 1;
3068 								}
3069 								tp1->do_rtt = 0;
3070 							}
3071 						}
3072 					}
3073 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3074 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3075 						    stcb->asoc.this_sack_highest_gap)) {
3076 							stcb->asoc.this_sack_highest_gap =
3077 							    tp1->rec.data.tsn;
3078 						}
3079 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3080 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3081 #ifdef SCTP_AUDITING_ENABLED
3082 							sctp_audit_log(0xB2,
3083 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3084 #endif
3085 						}
3086 					}
3087 					/*-
3088 					 * All chunks NOT UNSENT fall through here and are marked
3089 					 * (leave PR-SCTP ones that are to skip alone though)
3090 					 */
3091 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3092 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3093 						tp1->sent = SCTP_DATAGRAM_MARKED;
3094 					}
3095 					if (tp1->rec.data.chunk_was_revoked) {
3096 						/* deflate the cwnd */
3097 						tp1->whoTo->cwnd -= tp1->book_size;
3098 						tp1->rec.data.chunk_was_revoked = 0;
3099 					}
3100 					/* NR Sack code here */
3101 					if (nr_sacking &&
3102 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3103 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3104 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3105 #ifdef INVARIANTS
3106 						} else {
3107 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3108 #endif
3109 						}
3110 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3111 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3112 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3113 							stcb->asoc.trigger_reset = 1;
3114 						}
3115 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3116 						if (tp1->data) {
3117 							/*
3118 							 * sa_ignore
3119 							 * NO_NULL_CHK
3120 							 */
3121 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3122 							sctp_m_freem(tp1->data);
3123 							tp1->data = NULL;
3124 						}
3125 						wake_him++;
3126 					}
3127 				}
3128 				break;
3129 			}	/* if (tp1->tsn == theTSN) */
3130 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3131 				break;
3132 			}
3133 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3134 			if ((tp1 == NULL) && (circled == 0)) {
3135 				circled++;
3136 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3137 			}
3138 		}		/* end while (tp1) */
3139 		if (tp1 == NULL) {
3140 			circled = 0;
3141 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3142 		}
3143 		/* In case the fragments were not in order we must reset */
3144 	}			/* end for (j = fragStart */
3145 	*p_tp1 = tp1;
3146 	return (wake_him);	/* Return value only used for nr-sack */
3147 }
3148 
3149 
3150 static int
3151 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3152     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3153     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3154     int num_seg, int num_nr_seg, int *rto_ok)
3155 {
3156 	struct sctp_gap_ack_block *frag, block;
3157 	struct sctp_tmit_chunk *tp1;
3158 	int i;
3159 	int num_frs = 0;
3160 	int chunk_freed;
3161 	int non_revocable;
3162 	uint16_t frag_strt, frag_end, prev_frag_end;
3163 
3164 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3165 	prev_frag_end = 0;
3166 	chunk_freed = 0;
3167 
3168 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3169 		if (i == num_seg) {
3170 			prev_frag_end = 0;
3171 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3172 		}
3173 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3174 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3175 		*offset += sizeof(block);
3176 		if (frag == NULL) {
3177 			return (chunk_freed);
3178 		}
3179 		frag_strt = ntohs(frag->start);
3180 		frag_end = ntohs(frag->end);
3181 
3182 		if (frag_strt > frag_end) {
3183 			/* This gap report is malformed, skip it. */
3184 			continue;
3185 		}
3186 		if (frag_strt <= prev_frag_end) {
3187 			/* This gap report is not in order, so restart. */
3188 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3189 		}
3190 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3191 			*biggest_tsn_acked = last_tsn + frag_end;
3192 		}
3193 		if (i < num_seg) {
3194 			non_revocable = 0;
3195 		} else {
3196 			non_revocable = 1;
3197 		}
3198 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3199 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3200 		    this_sack_lowest_newack, rto_ok)) {
3201 			chunk_freed = 1;
3202 		}
3203 		prev_frag_end = frag_end;
3204 	}
3205 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3206 		if (num_frs)
3207 			sctp_log_fr(*biggest_tsn_acked,
3208 			    *biggest_newly_acked_tsn,
3209 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3210 	}
3211 	return (chunk_freed);
3212 }
3213 
3214 static void
3215 sctp_check_for_revoked(struct sctp_tcb *stcb,
3216     struct sctp_association *asoc, uint32_t cumack,
3217     uint32_t biggest_tsn_acked)
3218 {
3219 	struct sctp_tmit_chunk *tp1;
3220 
3221 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3222 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3223 			/*
3224 			 * ok this guy is either ACK or MARKED. If it is
3225 			 * ACKED it has been previously acked but not this
3226 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3227 			 * again.
3228 			 */
3229 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3230 				break;
3231 			}
3232 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3233 				/* it has been revoked */
3234 				tp1->sent = SCTP_DATAGRAM_SENT;
3235 				tp1->rec.data.chunk_was_revoked = 1;
3236 				/*
3237 				 * We must add this stuff back in to assure
3238 				 * timers and such get started.
3239 				 */
3240 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3241 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3242 					    tp1->whoTo->flight_size,
3243 					    tp1->book_size,
3244 					    (uint32_t)(uintptr_t)tp1->whoTo,
3245 					    tp1->rec.data.tsn);
3246 				}
3247 				sctp_flight_size_increase(tp1);
3248 				sctp_total_flight_increase(stcb, tp1);
3249 				/*
3250 				 * We inflate the cwnd to compensate for our
3251 				 * artificial inflation of the flight_size.
3252 				 */
3253 				tp1->whoTo->cwnd += tp1->book_size;
3254 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3255 					sctp_log_sack(asoc->last_acked_seq,
3256 					    cumack,
3257 					    tp1->rec.data.tsn,
3258 					    0,
3259 					    0,
3260 					    SCTP_LOG_TSN_REVOKED);
3261 				}
3262 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3263 				/* it has been re-acked in this SACK */
3264 				tp1->sent = SCTP_DATAGRAM_ACKED;
3265 			}
3266 		}
3267 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3268 			break;
3269 	}
3270 }
3271 
3272 
3273 static void
3274 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3275     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3276 {
3277 	struct sctp_tmit_chunk *tp1;
3278 	int strike_flag = 0;
3279 	struct timeval now;
3280 	int tot_retrans = 0;
3281 	uint32_t sending_seq;
3282 	struct sctp_nets *net;
3283 	int num_dests_sacked = 0;
3284 
3285 	/*
3286 	 * select the sending_seq, this is either the next thing ready to be
3287 	 * sent but not transmitted, OR, the next seq we assign.
3288 	 */
3289 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3290 	if (tp1 == NULL) {
3291 		sending_seq = asoc->sending_seq;
3292 	} else {
3293 		sending_seq = tp1->rec.data.tsn;
3294 	}
3295 
3296 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3297 	if ((asoc->sctp_cmt_on_off > 0) &&
3298 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3299 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3300 			if (net->saw_newack)
3301 				num_dests_sacked++;
3302 		}
3303 	}
3304 	if (stcb->asoc.prsctp_supported) {
3305 		(void)SCTP_GETTIME_TIMEVAL(&now);
3306 	}
3307 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3308 		strike_flag = 0;
3309 		if (tp1->no_fr_allowed) {
3310 			/* this one had a timeout or something */
3311 			continue;
3312 		}
3313 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3314 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3315 				sctp_log_fr(biggest_tsn_newly_acked,
3316 				    tp1->rec.data.tsn,
3317 				    tp1->sent,
3318 				    SCTP_FR_LOG_CHECK_STRIKE);
3319 		}
3320 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3321 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3322 			/* done */
3323 			break;
3324 		}
3325 		if (stcb->asoc.prsctp_supported) {
3326 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3327 				/* Is it expired? */
3328 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3329 					/* Yes so drop it */
3330 					if (tp1->data != NULL) {
3331 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3332 						    SCTP_SO_NOT_LOCKED);
3333 					}
3334 					continue;
3335 				}
3336 			}
3337 		}
3338 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3339 			/* we are beyond the tsn in the sack  */
3340 			break;
3341 		}
3342 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3343 			/* either a RESEND, ACKED, or MARKED */
3344 			/* skip */
3345 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3346 				/* Continue strikin FWD-TSN chunks */
3347 				tp1->rec.data.fwd_tsn_cnt++;
3348 			}
3349 			continue;
3350 		}
3351 		/*
3352 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3353 		 */
3354 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3355 			/*
3356 			 * No new acks were receieved for data sent to this
3357 			 * dest. Therefore, according to the SFR algo for
3358 			 * CMT, no data sent to this dest can be marked for
3359 			 * FR using this SACK.
3360 			 */
3361 			continue;
3362 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3363 		    tp1->whoTo->this_sack_highest_newack)) {
3364 			/*
3365 			 * CMT: New acks were receieved for data sent to
3366 			 * this dest. But no new acks were seen for data
3367 			 * sent after tp1. Therefore, according to the SFR
3368 			 * algo for CMT, tp1 cannot be marked for FR using
3369 			 * this SACK. This step covers part of the DAC algo
3370 			 * and the HTNA algo as well.
3371 			 */
3372 			continue;
3373 		}
3374 		/*
3375 		 * Here we check to see if we were have already done a FR
3376 		 * and if so we see if the biggest TSN we saw in the sack is
3377 		 * smaller than the recovery point. If so we don't strike
3378 		 * the tsn... otherwise we CAN strike the TSN.
3379 		 */
3380 		/*
3381 		 * @@@ JRI: Check for CMT if (accum_moved &&
3382 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3383 		 * 0)) {
3384 		 */
3385 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3386 			/*
3387 			 * Strike the TSN if in fast-recovery and cum-ack
3388 			 * moved.
3389 			 */
3390 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3391 				sctp_log_fr(biggest_tsn_newly_acked,
3392 				    tp1->rec.data.tsn,
3393 				    tp1->sent,
3394 				    SCTP_FR_LOG_STRIKE_CHUNK);
3395 			}
3396 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3397 				tp1->sent++;
3398 			}
3399 			if ((asoc->sctp_cmt_on_off > 0) &&
3400 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3401 				/*
3402 				 * CMT DAC algorithm: If SACK flag is set to
3403 				 * 0, then lowest_newack test will not pass
3404 				 * because it would have been set to the
3405 				 * cumack earlier. If not already to be
3406 				 * rtx'd, If not a mixed sack and if tp1 is
3407 				 * not between two sacked TSNs, then mark by
3408 				 * one more. NOTE that we are marking by one
3409 				 * additional time since the SACK DAC flag
3410 				 * indicates that two packets have been
3411 				 * received after this missing TSN.
3412 				 */
3413 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3414 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3415 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3416 						sctp_log_fr(16 + num_dests_sacked,
3417 						    tp1->rec.data.tsn,
3418 						    tp1->sent,
3419 						    SCTP_FR_LOG_STRIKE_CHUNK);
3420 					}
3421 					tp1->sent++;
3422 				}
3423 			}
3424 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3425 		    (asoc->sctp_cmt_on_off == 0)) {
3426 			/*
3427 			 * For those that have done a FR we must take
3428 			 * special consideration if we strike. I.e the
3429 			 * biggest_newly_acked must be higher than the
3430 			 * sending_seq at the time we did the FR.
3431 			 */
3432 			if (
3433 #ifdef SCTP_FR_TO_ALTERNATE
3434 			/*
3435 			 * If FR's go to new networks, then we must only do
3436 			 * this for singly homed asoc's. However if the FR's
3437 			 * go to the same network (Armando's work) then its
3438 			 * ok to FR multiple times.
3439 			 */
3440 			    (asoc->numnets < 2)
3441 #else
3442 			    (1)
3443 #endif
3444 			    ) {
3445 
3446 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3447 				    tp1->rec.data.fast_retran_tsn)) {
3448 					/*
3449 					 * Strike the TSN, since this ack is
3450 					 * beyond where things were when we
3451 					 * did a FR.
3452 					 */
3453 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3454 						sctp_log_fr(biggest_tsn_newly_acked,
3455 						    tp1->rec.data.tsn,
3456 						    tp1->sent,
3457 						    SCTP_FR_LOG_STRIKE_CHUNK);
3458 					}
3459 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3460 						tp1->sent++;
3461 					}
3462 					strike_flag = 1;
3463 					if ((asoc->sctp_cmt_on_off > 0) &&
3464 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3465 						/*
3466 						 * CMT DAC algorithm: If
3467 						 * SACK flag is set to 0,
3468 						 * then lowest_newack test
3469 						 * will not pass because it
3470 						 * would have been set to
3471 						 * the cumack earlier. If
3472 						 * not already to be rtx'd,
3473 						 * If not a mixed sack and
3474 						 * if tp1 is not between two
3475 						 * sacked TSNs, then mark by
3476 						 * one more. NOTE that we
3477 						 * are marking by one
3478 						 * additional time since the
3479 						 * SACK DAC flag indicates
3480 						 * that two packets have
3481 						 * been received after this
3482 						 * missing TSN.
3483 						 */
3484 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3485 						    (num_dests_sacked == 1) &&
3486 						    SCTP_TSN_GT(this_sack_lowest_newack,
3487 						    tp1->rec.data.tsn)) {
3488 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3489 								sctp_log_fr(32 + num_dests_sacked,
3490 								    tp1->rec.data.tsn,
3491 								    tp1->sent,
3492 								    SCTP_FR_LOG_STRIKE_CHUNK);
3493 							}
3494 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3495 								tp1->sent++;
3496 							}
3497 						}
3498 					}
3499 				}
3500 			}
3501 			/*
3502 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3503 			 * algo covers HTNA.
3504 			 */
3505 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3506 		    biggest_tsn_newly_acked)) {
3507 			/*
3508 			 * We don't strike these: This is the  HTNA
3509 			 * algorithm i.e. we don't strike If our TSN is
3510 			 * larger than the Highest TSN Newly Acked.
3511 			 */
3512 			;
3513 		} else {
3514 			/* Strike the TSN */
3515 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3516 				sctp_log_fr(biggest_tsn_newly_acked,
3517 				    tp1->rec.data.tsn,
3518 				    tp1->sent,
3519 				    SCTP_FR_LOG_STRIKE_CHUNK);
3520 			}
3521 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3522 				tp1->sent++;
3523 			}
3524 			if ((asoc->sctp_cmt_on_off > 0) &&
3525 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3526 				/*
3527 				 * CMT DAC algorithm: If SACK flag is set to
3528 				 * 0, then lowest_newack test will not pass
3529 				 * because it would have been set to the
3530 				 * cumack earlier. If not already to be
3531 				 * rtx'd, If not a mixed sack and if tp1 is
3532 				 * not between two sacked TSNs, then mark by
3533 				 * one more. NOTE that we are marking by one
3534 				 * additional time since the SACK DAC flag
3535 				 * indicates that two packets have been
3536 				 * received after this missing TSN.
3537 				 */
3538 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3539 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3540 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3541 						sctp_log_fr(48 + num_dests_sacked,
3542 						    tp1->rec.data.tsn,
3543 						    tp1->sent,
3544 						    SCTP_FR_LOG_STRIKE_CHUNK);
3545 					}
3546 					tp1->sent++;
3547 				}
3548 			}
3549 		}
3550 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3551 			struct sctp_nets *alt;
3552 
3553 			/* fix counts and things */
3554 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3555 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3556 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3557 				    tp1->book_size,
3558 				    (uint32_t)(uintptr_t)tp1->whoTo,
3559 				    tp1->rec.data.tsn);
3560 			}
3561 			if (tp1->whoTo) {
3562 				tp1->whoTo->net_ack++;
3563 				sctp_flight_size_decrease(tp1);
3564 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3565 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3566 					    tp1);
3567 				}
3568 			}
3569 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3570 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3571 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3572 			}
3573 			/* add back to the rwnd */
3574 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3575 
3576 			/* remove from the total flight */
3577 			sctp_total_flight_decrease(stcb, tp1);
3578 
3579 			if ((stcb->asoc.prsctp_supported) &&
3580 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3581 				/*
3582 				 * Has it been retransmitted tv_sec times? -
3583 				 * we store the retran count there.
3584 				 */
3585 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3586 					/* Yes, so drop it */
3587 					if (tp1->data != NULL) {
3588 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3589 						    SCTP_SO_NOT_LOCKED);
3590 					}
3591 					/* Make sure to flag we had a FR */
3592 					tp1->whoTo->net_ack++;
3593 					continue;
3594 				}
3595 			}
3596 			/*
3597 			 * SCTP_PRINTF("OK, we are now ready to FR this
3598 			 * guy\n");
3599 			 */
3600 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3601 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3602 				    0, SCTP_FR_MARKED);
3603 			}
3604 			if (strike_flag) {
3605 				/* This is a subsequent FR */
3606 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3607 			}
3608 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3609 			if (asoc->sctp_cmt_on_off > 0) {
3610 				/*
3611 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3612 				 * If CMT is being used, then pick dest with
3613 				 * largest ssthresh for any retransmission.
3614 				 */
3615 				tp1->no_fr_allowed = 1;
3616 				alt = tp1->whoTo;
3617 				/* sa_ignore NO_NULL_CHK */
3618 				if (asoc->sctp_cmt_pf > 0) {
3619 					/*
3620 					 * JRS 5/18/07 - If CMT PF is on,
3621 					 * use the PF version of
3622 					 * find_alt_net()
3623 					 */
3624 					alt = sctp_find_alternate_net(stcb, alt, 2);
3625 				} else {
3626 					/*
3627 					 * JRS 5/18/07 - If only CMT is on,
3628 					 * use the CMT version of
3629 					 * find_alt_net()
3630 					 */
3631 					/* sa_ignore NO_NULL_CHK */
3632 					alt = sctp_find_alternate_net(stcb, alt, 1);
3633 				}
3634 				if (alt == NULL) {
3635 					alt = tp1->whoTo;
3636 				}
3637 				/*
3638 				 * CUCv2: If a different dest is picked for
3639 				 * the retransmission, then new
3640 				 * (rtx-)pseudo_cumack needs to be tracked
3641 				 * for orig dest. Let CUCv2 track new (rtx-)
3642 				 * pseudo-cumack always.
3643 				 */
3644 				if (tp1->whoTo) {
3645 					tp1->whoTo->find_pseudo_cumack = 1;
3646 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3647 				}
3648 			} else {/* CMT is OFF */
3649 
3650 #ifdef SCTP_FR_TO_ALTERNATE
3651 				/* Can we find an alternate? */
3652 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3653 #else
3654 				/*
3655 				 * default behavior is to NOT retransmit
3656 				 * FR's to an alternate. Armando Caro's
3657 				 * paper details why.
3658 				 */
3659 				alt = tp1->whoTo;
3660 #endif
3661 			}
3662 
3663 			tp1->rec.data.doing_fast_retransmit = 1;
3664 			tot_retrans++;
3665 			/* mark the sending seq for possible subsequent FR's */
3666 			/*
3667 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3668 			 * (uint32_t)tpi->rec.data.tsn);
3669 			 */
3670 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3671 				/*
3672 				 * If the queue of send is empty then its
3673 				 * the next sequence number that will be
3674 				 * assigned so we subtract one from this to
3675 				 * get the one we last sent.
3676 				 */
3677 				tp1->rec.data.fast_retran_tsn = sending_seq;
3678 			} else {
3679 				/*
3680 				 * If there are chunks on the send queue
3681 				 * (unsent data that has made it from the
3682 				 * stream queues but not out the door, we
3683 				 * take the first one (which will have the
3684 				 * lowest TSN) and subtract one to get the
3685 				 * one we last sent.
3686 				 */
3687 				struct sctp_tmit_chunk *ttt;
3688 
3689 				ttt = TAILQ_FIRST(&asoc->send_queue);
3690 				tp1->rec.data.fast_retran_tsn =
3691 				    ttt->rec.data.tsn;
3692 			}
3693 
3694 			if (tp1->do_rtt) {
3695 				/*
3696 				 * this guy had a RTO calculation pending on
3697 				 * it, cancel it
3698 				 */
3699 				if ((tp1->whoTo != NULL) &&
3700 				    (tp1->whoTo->rto_needed == 0)) {
3701 					tp1->whoTo->rto_needed = 1;
3702 				}
3703 				tp1->do_rtt = 0;
3704 			}
3705 			if (alt != tp1->whoTo) {
3706 				/* yes, there is an alternate. */
3707 				sctp_free_remote_addr(tp1->whoTo);
3708 				/* sa_ignore FREED_MEMORY */
3709 				tp1->whoTo = alt;
3710 				atomic_add_int(&alt->ref_count, 1);
3711 			}
3712 		}
3713 	}
3714 }
3715 
3716 struct sctp_tmit_chunk *
3717 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3718     struct sctp_association *asoc)
3719 {
3720 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3721 	struct timeval now;
3722 	int now_filled = 0;
3723 
3724 	if (asoc->prsctp_supported == 0) {
3725 		return (NULL);
3726 	}
3727 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3728 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3729 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3730 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3731 			/* no chance to advance, out of here */
3732 			break;
3733 		}
3734 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3735 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3736 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3737 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3738 				    asoc->advanced_peer_ack_point,
3739 				    tp1->rec.data.tsn, 0, 0);
3740 			}
3741 		}
3742 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3743 			/*
3744 			 * We can't fwd-tsn past any that are reliable aka
3745 			 * retransmitted until the asoc fails.
3746 			 */
3747 			break;
3748 		}
3749 		if (!now_filled) {
3750 			(void)SCTP_GETTIME_TIMEVAL(&now);
3751 			now_filled = 1;
3752 		}
3753 		/*
3754 		 * now we got a chunk which is marked for another
3755 		 * retransmission to a PR-stream but has run out its chances
3756 		 * already maybe OR has been marked to skip now. Can we skip
3757 		 * it if its a resend?
3758 		 */
3759 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3760 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3761 			/*
3762 			 * Now is this one marked for resend and its time is
3763 			 * now up?
3764 			 */
3765 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3766 				/* Yes so drop it */
3767 				if (tp1->data) {
3768 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3769 					    1, SCTP_SO_NOT_LOCKED);
3770 				}
3771 			} else {
3772 				/*
3773 				 * No, we are done when hit one for resend
3774 				 * whos time as not expired.
3775 				 */
3776 				break;
3777 			}
3778 		}
3779 		/*
3780 		 * Ok now if this chunk is marked to drop it we can clean up
3781 		 * the chunk, advance our peer ack point and we can check
3782 		 * the next chunk.
3783 		 */
3784 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3785 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3786 			/* advance PeerAckPoint goes forward */
3787 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3788 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3789 				a_adv = tp1;
3790 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3791 				/* No update but we do save the chk */
3792 				a_adv = tp1;
3793 			}
3794 		} else {
3795 			/*
3796 			 * If it is still in RESEND we can advance no
3797 			 * further
3798 			 */
3799 			break;
3800 		}
3801 	}
3802 	return (a_adv);
3803 }
3804 
3805 static int
3806 sctp_fs_audit(struct sctp_association *asoc)
3807 {
3808 	struct sctp_tmit_chunk *chk;
3809 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3810 	int ret;
3811 #ifndef INVARIANTS
3812 	int entry_flight, entry_cnt;
3813 #endif
3814 
3815 	ret = 0;
3816 #ifndef INVARIANTS
3817 	entry_flight = asoc->total_flight;
3818 	entry_cnt = asoc->total_flight_count;
3819 #endif
3820 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3821 		return (0);
3822 
3823 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3824 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3825 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3826 			    chk->rec.data.tsn,
3827 			    chk->send_size,
3828 			    chk->snd_count);
3829 			inflight++;
3830 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3831 			resend++;
3832 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3833 			inbetween++;
3834 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3835 			above++;
3836 		} else {
3837 			acked++;
3838 		}
3839 	}
3840 
3841 	if ((inflight > 0) || (inbetween > 0)) {
3842 #ifdef INVARIANTS
3843 		panic("Flight size-express incorrect? \n");
3844 #else
3845 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3846 		    entry_flight, entry_cnt);
3847 
3848 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3849 		    inflight, inbetween, resend, above, acked);
3850 		ret = 1;
3851 #endif
3852 	}
3853 	return (ret);
3854 }
3855 
3856 
3857 static void
3858 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3859     struct sctp_association *asoc,
3860     struct sctp_tmit_chunk *tp1)
3861 {
3862 	tp1->window_probe = 0;
3863 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3864 		/* TSN's skipped we do NOT move back. */
3865 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3866 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3867 		    tp1->book_size,
3868 		    (uint32_t)(uintptr_t)tp1->whoTo,
3869 		    tp1->rec.data.tsn);
3870 		return;
3871 	}
3872 	/* First setup this by shrinking flight */
3873 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3874 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3875 		    tp1);
3876 	}
3877 	sctp_flight_size_decrease(tp1);
3878 	sctp_total_flight_decrease(stcb, tp1);
3879 	/* Now mark for resend */
3880 	tp1->sent = SCTP_DATAGRAM_RESEND;
3881 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3882 
3883 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3884 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3885 		    tp1->whoTo->flight_size,
3886 		    tp1->book_size,
3887 		    (uint32_t)(uintptr_t)tp1->whoTo,
3888 		    tp1->rec.data.tsn);
3889 	}
3890 }
3891 
3892 void
3893 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3894     uint32_t rwnd, int *abort_now, int ecne_seen)
3895 {
3896 	struct sctp_nets *net;
3897 	struct sctp_association *asoc;
3898 	struct sctp_tmit_chunk *tp1, *tp2;
3899 	uint32_t old_rwnd;
3900 	int win_probe_recovery = 0;
3901 	int win_probe_recovered = 0;
3902 	int j, done_once = 0;
3903 	int rto_ok = 1;
3904 	uint32_t send_s;
3905 
3906 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3907 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3908 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3909 	}
3910 	SCTP_TCB_LOCK_ASSERT(stcb);
3911 #ifdef SCTP_ASOCLOG_OF_TSNS
3912 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3913 	stcb->asoc.cumack_log_at++;
3914 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3915 		stcb->asoc.cumack_log_at = 0;
3916 	}
3917 #endif
3918 	asoc = &stcb->asoc;
3919 	old_rwnd = asoc->peers_rwnd;
3920 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3921 		/* old ack */
3922 		return;
3923 	} else if (asoc->last_acked_seq == cumack) {
3924 		/* Window update sack */
3925 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3926 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3927 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3928 			/* SWS sender side engages */
3929 			asoc->peers_rwnd = 0;
3930 		}
3931 		if (asoc->peers_rwnd > old_rwnd) {
3932 			goto again;
3933 		}
3934 		return;
3935 	}
3936 	/* First setup for CC stuff */
3937 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3938 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3939 			/* Drag along the window_tsn for cwr's */
3940 			net->cwr_window_tsn = cumack;
3941 		}
3942 		net->prev_cwnd = net->cwnd;
3943 		net->net_ack = 0;
3944 		net->net_ack2 = 0;
3945 
3946 		/*
3947 		 * CMT: Reset CUC and Fast recovery algo variables before
3948 		 * SACK processing
3949 		 */
3950 		net->new_pseudo_cumack = 0;
3951 		net->will_exit_fast_recovery = 0;
3952 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3953 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3954 		}
3955 	}
3956 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3957 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3958 		    sctpchunk_listhead);
3959 		send_s = tp1->rec.data.tsn + 1;
3960 	} else {
3961 		send_s = asoc->sending_seq;
3962 	}
3963 	if (SCTP_TSN_GE(cumack, send_s)) {
3964 		struct mbuf *op_err;
3965 		char msg[SCTP_DIAG_INFO_LEN];
3966 
3967 		*abort_now = 1;
3968 		/* XXX */
3969 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3970 		    cumack, send_s);
3971 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3972 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3973 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3974 		return;
3975 	}
3976 	asoc->this_sack_highest_gap = cumack;
3977 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3978 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3979 		    stcb->asoc.overall_error_count,
3980 		    0,
3981 		    SCTP_FROM_SCTP_INDATA,
3982 		    __LINE__);
3983 	}
3984 	stcb->asoc.overall_error_count = 0;
3985 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3986 		/* process the new consecutive TSN first */
3987 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3988 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3989 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3990 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3991 				}
3992 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3993 					/*
3994 					 * If it is less than ACKED, it is
3995 					 * now no-longer in flight. Higher
3996 					 * values may occur during marking
3997 					 */
3998 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3999 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4000 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4001 							    tp1->whoTo->flight_size,
4002 							    tp1->book_size,
4003 							    (uint32_t)(uintptr_t)tp1->whoTo,
4004 							    tp1->rec.data.tsn);
4005 						}
4006 						sctp_flight_size_decrease(tp1);
4007 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4008 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4009 							    tp1);
4010 						}
4011 						/* sa_ignore NO_NULL_CHK */
4012 						sctp_total_flight_decrease(stcb, tp1);
4013 					}
4014 					tp1->whoTo->net_ack += tp1->send_size;
4015 					if (tp1->snd_count < 2) {
4016 						/*
4017 						 * True non-retransmited
4018 						 * chunk
4019 						 */
4020 						tp1->whoTo->net_ack2 +=
4021 						    tp1->send_size;
4022 
4023 						/* update RTO too? */
4024 						if (tp1->do_rtt) {
4025 							if (rto_ok) {
4026 								tp1->whoTo->RTO =
4027 								/*
4028 								 * sa_ignore
4029 								 * NO_NULL_CHK
4030 								 */
4031 								    sctp_calculate_rto(stcb,
4032 								    asoc, tp1->whoTo,
4033 								    &tp1->sent_rcv_time,
4034 								    sctp_align_safe_nocopy,
4035 								    SCTP_RTT_FROM_DATA);
4036 								rto_ok = 0;
4037 							}
4038 							if (tp1->whoTo->rto_needed == 0) {
4039 								tp1->whoTo->rto_needed = 1;
4040 							}
4041 							tp1->do_rtt = 0;
4042 						}
4043 					}
4044 					/*
4045 					 * CMT: CUCv2 algorithm. From the
4046 					 * cumack'd TSNs, for each TSN being
4047 					 * acked for the first time, set the
4048 					 * following variables for the
4049 					 * corresp destination.
4050 					 * new_pseudo_cumack will trigger a
4051 					 * cwnd update.
4052 					 * find_(rtx_)pseudo_cumack will
4053 					 * trigger search for the next
4054 					 * expected (rtx-)pseudo-cumack.
4055 					 */
4056 					tp1->whoTo->new_pseudo_cumack = 1;
4057 					tp1->whoTo->find_pseudo_cumack = 1;
4058 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4059 
4060 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4061 						/* sa_ignore NO_NULL_CHK */
4062 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4063 					}
4064 				}
4065 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4066 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4067 				}
4068 				if (tp1->rec.data.chunk_was_revoked) {
4069 					/* deflate the cwnd */
4070 					tp1->whoTo->cwnd -= tp1->book_size;
4071 					tp1->rec.data.chunk_was_revoked = 0;
4072 				}
4073 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4074 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4075 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4076 #ifdef INVARIANTS
4077 					} else {
4078 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4079 #endif
4080 					}
4081 				}
4082 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4083 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4084 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4085 					asoc->trigger_reset = 1;
4086 				}
4087 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4088 				if (tp1->data) {
4089 					/* sa_ignore NO_NULL_CHK */
4090 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4091 					sctp_m_freem(tp1->data);
4092 					tp1->data = NULL;
4093 				}
4094 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4095 					sctp_log_sack(asoc->last_acked_seq,
4096 					    cumack,
4097 					    tp1->rec.data.tsn,
4098 					    0,
4099 					    0,
4100 					    SCTP_LOG_FREE_SENT);
4101 				}
4102 				asoc->sent_queue_cnt--;
4103 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4104 			} else {
4105 				break;
4106 			}
4107 		}
4108 
4109 	}
4110 	/* sa_ignore NO_NULL_CHK */
4111 	if (stcb->sctp_socket) {
4112 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4113 		struct socket *so;
4114 
4115 #endif
4116 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4117 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4118 			/* sa_ignore NO_NULL_CHK */
4119 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4120 		}
4121 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4122 		so = SCTP_INP_SO(stcb->sctp_ep);
4123 		atomic_add_int(&stcb->asoc.refcnt, 1);
4124 		SCTP_TCB_UNLOCK(stcb);
4125 		SCTP_SOCKET_LOCK(so, 1);
4126 		SCTP_TCB_LOCK(stcb);
4127 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4128 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4129 			/* assoc was freed while we were unlocked */
4130 			SCTP_SOCKET_UNLOCK(so, 1);
4131 			return;
4132 		}
4133 #endif
4134 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4135 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4136 		SCTP_SOCKET_UNLOCK(so, 1);
4137 #endif
4138 	} else {
4139 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4140 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4141 		}
4142 	}
4143 
4144 	/* JRS - Use the congestion control given in the CC module */
4145 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4146 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4147 			if (net->net_ack2 > 0) {
4148 				/*
4149 				 * Karn's rule applies to clearing error
4150 				 * count, this is optional.
4151 				 */
4152 				net->error_count = 0;
4153 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4154 					/* addr came good */
4155 					net->dest_state |= SCTP_ADDR_REACHABLE;
4156 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4157 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4158 				}
4159 				if (net == stcb->asoc.primary_destination) {
4160 					if (stcb->asoc.alternate) {
4161 						/*
4162 						 * release the alternate,
4163 						 * primary is good
4164 						 */
4165 						sctp_free_remote_addr(stcb->asoc.alternate);
4166 						stcb->asoc.alternate = NULL;
4167 					}
4168 				}
4169 				if (net->dest_state & SCTP_ADDR_PF) {
4170 					net->dest_state &= ~SCTP_ADDR_PF;
4171 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4172 					    stcb->sctp_ep, stcb, net,
4173 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4174 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4175 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4176 					/* Done with this net */
4177 					net->net_ack = 0;
4178 				}
4179 				/* restore any doubled timers */
4180 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4181 				if (net->RTO < stcb->asoc.minrto) {
4182 					net->RTO = stcb->asoc.minrto;
4183 				}
4184 				if (net->RTO > stcb->asoc.maxrto) {
4185 					net->RTO = stcb->asoc.maxrto;
4186 				}
4187 			}
4188 		}
4189 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4190 	}
4191 	asoc->last_acked_seq = cumack;
4192 
4193 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4194 		/* nothing left in-flight */
4195 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4196 			net->flight_size = 0;
4197 			net->partial_bytes_acked = 0;
4198 		}
4199 		asoc->total_flight = 0;
4200 		asoc->total_flight_count = 0;
4201 	}
4202 	/* RWND update */
4203 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4204 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4205 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4206 		/* SWS sender side engages */
4207 		asoc->peers_rwnd = 0;
4208 	}
4209 	if (asoc->peers_rwnd > old_rwnd) {
4210 		win_probe_recovery = 1;
4211 	}
4212 	/* Now assure a timer where data is queued at */
4213 again:
4214 	j = 0;
4215 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4216 		int to_ticks;
4217 
4218 		if (win_probe_recovery && (net->window_probe)) {
4219 			win_probe_recovered = 1;
4220 			/*
4221 			 * Find first chunk that was used with window probe
4222 			 * and clear the sent
4223 			 */
4224 			/* sa_ignore FREED_MEMORY */
4225 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4226 				if (tp1->window_probe) {
4227 					/* move back to data send queue */
4228 					sctp_window_probe_recovery(stcb, asoc, tp1);
4229 					break;
4230 				}
4231 			}
4232 		}
4233 		if (net->RTO == 0) {
4234 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4235 		} else {
4236 			to_ticks = MSEC_TO_TICKS(net->RTO);
4237 		}
4238 		if (net->flight_size) {
4239 			j++;
4240 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4241 			if (net->window_probe) {
4242 				net->window_probe = 0;
4243 			}
4244 		} else {
4245 			if (net->window_probe) {
4246 				/*
4247 				 * In window probes we must assure a timer
4248 				 * is still running there
4249 				 */
4250 				net->window_probe = 0;
4251 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4252 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4253 				}
4254 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4255 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4256 				    stcb, net,
4257 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4258 			}
4259 		}
4260 	}
4261 	if ((j == 0) &&
4262 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4263 	    (asoc->sent_queue_retran_cnt == 0) &&
4264 	    (win_probe_recovered == 0) &&
4265 	    (done_once == 0)) {
4266 		/*
4267 		 * huh, this should not happen unless all packets are
4268 		 * PR-SCTP and marked to skip of course.
4269 		 */
4270 		if (sctp_fs_audit(asoc)) {
4271 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4272 				net->flight_size = 0;
4273 			}
4274 			asoc->total_flight = 0;
4275 			asoc->total_flight_count = 0;
4276 			asoc->sent_queue_retran_cnt = 0;
4277 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4278 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4279 					sctp_flight_size_increase(tp1);
4280 					sctp_total_flight_increase(stcb, tp1);
4281 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4282 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4283 				}
4284 			}
4285 		}
4286 		done_once = 1;
4287 		goto again;
4288 	}
4289 	/**********************************/
4290 	/* Now what about shutdown issues */
4291 	/**********************************/
4292 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4293 		/* nothing left on sendqueue.. consider done */
4294 		/* clean up */
4295 		if ((asoc->stream_queue_cnt == 1) &&
4296 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4297 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4298 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4299 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4300 		}
4301 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4302 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4303 		    (asoc->stream_queue_cnt == 1) &&
4304 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4305 			struct mbuf *op_err;
4306 
4307 			*abort_now = 1;
4308 			/* XXX */
4309 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4310 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4311 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4312 			return;
4313 		}
4314 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4315 		    (asoc->stream_queue_cnt == 0)) {
4316 			struct sctp_nets *netp;
4317 
4318 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4319 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4320 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4321 			}
4322 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4323 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4324 			sctp_stop_timers_for_shutdown(stcb);
4325 			if (asoc->alternate) {
4326 				netp = asoc->alternate;
4327 			} else {
4328 				netp = asoc->primary_destination;
4329 			}
4330 			sctp_send_shutdown(stcb, netp);
4331 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4332 			    stcb->sctp_ep, stcb, netp);
4333 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4334 			    stcb->sctp_ep, stcb, netp);
4335 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4336 		    (asoc->stream_queue_cnt == 0)) {
4337 			struct sctp_nets *netp;
4338 
4339 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4340 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4341 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4342 			sctp_stop_timers_for_shutdown(stcb);
4343 			if (asoc->alternate) {
4344 				netp = asoc->alternate;
4345 			} else {
4346 				netp = asoc->primary_destination;
4347 			}
4348 			sctp_send_shutdown_ack(stcb, netp);
4349 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4350 			    stcb->sctp_ep, stcb, netp);
4351 		}
4352 	}
4353 	/*********************************************/
4354 	/* Here we perform PR-SCTP procedures        */
4355 	/* (section 4.2)                             */
4356 	/*********************************************/
4357 	/* C1. update advancedPeerAckPoint */
4358 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4359 		asoc->advanced_peer_ack_point = cumack;
4360 	}
4361 	/* PR-Sctp issues need to be addressed too */
4362 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4363 		struct sctp_tmit_chunk *lchk;
4364 		uint32_t old_adv_peer_ack_point;
4365 
4366 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4367 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4368 		/* C3. See if we need to send a Fwd-TSN */
4369 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4370 			/*
4371 			 * ISSUE with ECN, see FWD-TSN processing.
4372 			 */
4373 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4374 				send_forward_tsn(stcb, asoc);
4375 			} else if (lchk) {
4376 				/* try to FR fwd-tsn's that get lost too */
4377 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4378 					send_forward_tsn(stcb, asoc);
4379 				}
4380 			}
4381 		}
4382 		if (lchk) {
4383 			/* Assure a timer is up */
4384 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4385 			    stcb->sctp_ep, stcb, lchk->whoTo);
4386 		}
4387 	}
4388 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4389 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4390 		    rwnd,
4391 		    stcb->asoc.peers_rwnd,
4392 		    stcb->asoc.total_flight,
4393 		    stcb->asoc.total_output_queue_size);
4394 	}
4395 }
4396 
4397 void
4398 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4399     struct sctp_tcb *stcb,
4400     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4401     int *abort_now, uint8_t flags,
4402     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4403 {
4404 	struct sctp_association *asoc;
4405 	struct sctp_tmit_chunk *tp1, *tp2;
4406 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4407 	uint16_t wake_him = 0;
4408 	uint32_t send_s = 0;
4409 	long j;
4410 	int accum_moved = 0;
4411 	int will_exit_fast_recovery = 0;
4412 	uint32_t a_rwnd, old_rwnd;
4413 	int win_probe_recovery = 0;
4414 	int win_probe_recovered = 0;
4415 	struct sctp_nets *net = NULL;
4416 	int done_once;
4417 	int rto_ok = 1;
4418 	uint8_t reneged_all = 0;
4419 	uint8_t cmt_dac_flag;
4420 
4421 	/*
4422 	 * we take any chance we can to service our queues since we cannot
4423 	 * get awoken when the socket is read from :<
4424 	 */
4425 	/*
4426 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4427 	 * old sack, if so discard. 2) If there is nothing left in the send
4428 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4429 	 * too, update any rwnd change and verify no timers are running.
4430 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4431 	 * moved process these first and note that it moved. 4) Process any
4432 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4433 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4434 	 * sync up flightsizes and things, stop all timers and also check
4435 	 * for shutdown_pending state. If so then go ahead and send off the
4436 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4437 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4438 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4439 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4440 	 * if in shutdown_recv state.
4441 	 */
4442 	SCTP_TCB_LOCK_ASSERT(stcb);
4443 	/* CMT DAC algo */
4444 	this_sack_lowest_newack = 0;
4445 	SCTP_STAT_INCR(sctps_slowpath_sack);
4446 	last_tsn = cum_ack;
4447 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4448 #ifdef SCTP_ASOCLOG_OF_TSNS
4449 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4450 	stcb->asoc.cumack_log_at++;
4451 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4452 		stcb->asoc.cumack_log_at = 0;
4453 	}
4454 #endif
4455 	a_rwnd = rwnd;
4456 
4457 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4458 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4459 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4460 	}
4461 	old_rwnd = stcb->asoc.peers_rwnd;
4462 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4463 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4464 		    stcb->asoc.overall_error_count,
4465 		    0,
4466 		    SCTP_FROM_SCTP_INDATA,
4467 		    __LINE__);
4468 	}
4469 	stcb->asoc.overall_error_count = 0;
4470 	asoc = &stcb->asoc;
4471 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4472 		sctp_log_sack(asoc->last_acked_seq,
4473 		    cum_ack,
4474 		    0,
4475 		    num_seg,
4476 		    num_dup,
4477 		    SCTP_LOG_NEW_SACK);
4478 	}
4479 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4480 		uint16_t i;
4481 		uint32_t *dupdata, dblock;
4482 
4483 		for (i = 0; i < num_dup; i++) {
4484 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4485 			    sizeof(uint32_t), (uint8_t *)&dblock);
4486 			if (dupdata == NULL) {
4487 				break;
4488 			}
4489 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4490 		}
4491 	}
4492 	/* reality check */
4493 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4494 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4495 		    sctpchunk_listhead);
4496 		send_s = tp1->rec.data.tsn + 1;
4497 	} else {
4498 		tp1 = NULL;
4499 		send_s = asoc->sending_seq;
4500 	}
4501 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4502 		struct mbuf *op_err;
4503 		char msg[SCTP_DIAG_INFO_LEN];
4504 
4505 		/*
4506 		 * no way, we have not even sent this TSN out yet. Peer is
4507 		 * hopelessly messed up with us.
4508 		 */
4509 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4510 		    cum_ack, send_s);
4511 		if (tp1) {
4512 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4513 			    tp1->rec.data.tsn, (void *)tp1);
4514 		}
4515 hopeless_peer:
4516 		*abort_now = 1;
4517 		/* XXX */
4518 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4519 		    cum_ack, send_s);
4520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4523 		return;
4524 	}
4525 	/**********************/
4526 	/* 1) check the range */
4527 	/**********************/
4528 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4529 		/* acking something behind */
4530 		return;
4531 	}
4532 	/* update the Rwnd of the peer */
4533 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4534 	    TAILQ_EMPTY(&asoc->send_queue) &&
4535 	    (asoc->stream_queue_cnt == 0)) {
4536 		/* nothing left on send/sent and strmq */
4537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4538 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4539 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4540 		}
4541 		asoc->peers_rwnd = a_rwnd;
4542 		if (asoc->sent_queue_retran_cnt) {
4543 			asoc->sent_queue_retran_cnt = 0;
4544 		}
4545 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4546 			/* SWS sender side engages */
4547 			asoc->peers_rwnd = 0;
4548 		}
4549 		/* stop any timers */
4550 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4551 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4552 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4553 			net->partial_bytes_acked = 0;
4554 			net->flight_size = 0;
4555 		}
4556 		asoc->total_flight = 0;
4557 		asoc->total_flight_count = 0;
4558 		return;
4559 	}
4560 	/*
4561 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4562 	 * things. The total byte count acked is tracked in netAckSz AND
4563 	 * netAck2 is used to track the total bytes acked that are un-
4564 	 * amibguious and were never retransmitted. We track these on a per
4565 	 * destination address basis.
4566 	 */
4567 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4568 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4569 			/* Drag along the window_tsn for cwr's */
4570 			net->cwr_window_tsn = cum_ack;
4571 		}
4572 		net->prev_cwnd = net->cwnd;
4573 		net->net_ack = 0;
4574 		net->net_ack2 = 0;
4575 
4576 		/*
4577 		 * CMT: Reset CUC and Fast recovery algo variables before
4578 		 * SACK processing
4579 		 */
4580 		net->new_pseudo_cumack = 0;
4581 		net->will_exit_fast_recovery = 0;
4582 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4583 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4584 		}
4585 	}
4586 	/* process the new consecutive TSN first */
4587 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4588 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4589 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4590 				accum_moved = 1;
4591 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4592 					/*
4593 					 * If it is less than ACKED, it is
4594 					 * now no-longer in flight. Higher
4595 					 * values may occur during marking
4596 					 */
4597 					if ((tp1->whoTo->dest_state &
4598 					    SCTP_ADDR_UNCONFIRMED) &&
4599 					    (tp1->snd_count < 2)) {
4600 						/*
4601 						 * If there was no retran
4602 						 * and the address is
4603 						 * un-confirmed and we sent
4604 						 * there and are now
4605 						 * sacked.. its confirmed,
4606 						 * mark it so.
4607 						 */
4608 						tp1->whoTo->dest_state &=
4609 						    ~SCTP_ADDR_UNCONFIRMED;
4610 					}
4611 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4612 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4613 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4614 							    tp1->whoTo->flight_size,
4615 							    tp1->book_size,
4616 							    (uint32_t)(uintptr_t)tp1->whoTo,
4617 							    tp1->rec.data.tsn);
4618 						}
4619 						sctp_flight_size_decrease(tp1);
4620 						sctp_total_flight_decrease(stcb, tp1);
4621 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4622 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4623 							    tp1);
4624 						}
4625 					}
4626 					tp1->whoTo->net_ack += tp1->send_size;
4627 
4628 					/* CMT SFR and DAC algos */
4629 					this_sack_lowest_newack = tp1->rec.data.tsn;
4630 					tp1->whoTo->saw_newack = 1;
4631 
4632 					if (tp1->snd_count < 2) {
4633 						/*
4634 						 * True non-retransmited
4635 						 * chunk
4636 						 */
4637 						tp1->whoTo->net_ack2 +=
4638 						    tp1->send_size;
4639 
4640 						/* update RTO too? */
4641 						if (tp1->do_rtt) {
4642 							if (rto_ok) {
4643 								tp1->whoTo->RTO =
4644 								    sctp_calculate_rto(stcb,
4645 								    asoc, tp1->whoTo,
4646 								    &tp1->sent_rcv_time,
4647 								    sctp_align_safe_nocopy,
4648 								    SCTP_RTT_FROM_DATA);
4649 								rto_ok = 0;
4650 							}
4651 							if (tp1->whoTo->rto_needed == 0) {
4652 								tp1->whoTo->rto_needed = 1;
4653 							}
4654 							tp1->do_rtt = 0;
4655 						}
4656 					}
4657 					/*
4658 					 * CMT: CUCv2 algorithm. From the
4659 					 * cumack'd TSNs, for each TSN being
4660 					 * acked for the first time, set the
4661 					 * following variables for the
4662 					 * corresp destination.
4663 					 * new_pseudo_cumack will trigger a
4664 					 * cwnd update.
4665 					 * find_(rtx_)pseudo_cumack will
4666 					 * trigger search for the next
4667 					 * expected (rtx-)pseudo-cumack.
4668 					 */
4669 					tp1->whoTo->new_pseudo_cumack = 1;
4670 					tp1->whoTo->find_pseudo_cumack = 1;
4671 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4672 
4673 
4674 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4675 						sctp_log_sack(asoc->last_acked_seq,
4676 						    cum_ack,
4677 						    tp1->rec.data.tsn,
4678 						    0,
4679 						    0,
4680 						    SCTP_LOG_TSN_ACKED);
4681 					}
4682 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4683 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4684 					}
4685 				}
4686 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4687 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4688 #ifdef SCTP_AUDITING_ENABLED
4689 					sctp_audit_log(0xB3,
4690 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4691 #endif
4692 				}
4693 				if (tp1->rec.data.chunk_was_revoked) {
4694 					/* deflate the cwnd */
4695 					tp1->whoTo->cwnd -= tp1->book_size;
4696 					tp1->rec.data.chunk_was_revoked = 0;
4697 				}
4698 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4699 					tp1->sent = SCTP_DATAGRAM_ACKED;
4700 				}
4701 			}
4702 		} else {
4703 			break;
4704 		}
4705 	}
4706 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4707 	/* always set this up to cum-ack */
4708 	asoc->this_sack_highest_gap = last_tsn;
4709 
4710 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4711 
4712 		/*
4713 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4714 		 * to be greater than the cumack. Also reset saw_newack to 0
4715 		 * for all dests.
4716 		 */
4717 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4718 			net->saw_newack = 0;
4719 			net->this_sack_highest_newack = last_tsn;
4720 		}
4721 
4722 		/*
4723 		 * thisSackHighestGap will increase while handling NEW
4724 		 * segments this_sack_highest_newack will increase while
4725 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4726 		 * used for CMT DAC algo. saw_newack will also change.
4727 		 */
4728 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4729 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4730 		    num_seg, num_nr_seg, &rto_ok)) {
4731 			wake_him++;
4732 		}
4733 		/*
4734 		 * validate the biggest_tsn_acked in the gap acks if strict
4735 		 * adherence is wanted.
4736 		 */
4737 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4738 			/*
4739 			 * peer is either confused or we are under attack.
4740 			 * We must abort.
4741 			 */
4742 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4743 			    biggest_tsn_acked, send_s);
4744 			goto hopeless_peer;
4745 		}
4746 	}
4747 	/*******************************************/
4748 	/* cancel ALL T3-send timer if accum moved */
4749 	/*******************************************/
4750 	if (asoc->sctp_cmt_on_off > 0) {
4751 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4752 			if (net->new_pseudo_cumack)
4753 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4754 				    stcb, net,
4755 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4756 
4757 		}
4758 	} else {
4759 		if (accum_moved) {
4760 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4761 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4762 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4763 			}
4764 		}
4765 	}
4766 	/********************************************/
4767 	/* drop the acked chunks from the sentqueue */
4768 	/********************************************/
4769 	asoc->last_acked_seq = cum_ack;
4770 
4771 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4772 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4773 			break;
4774 		}
4775 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4776 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4777 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4778 #ifdef INVARIANTS
4779 			} else {
4780 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4781 #endif
4782 			}
4783 		}
4784 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4785 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4786 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4787 			asoc->trigger_reset = 1;
4788 		}
4789 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4790 		if (PR_SCTP_ENABLED(tp1->flags)) {
4791 			if (asoc->pr_sctp_cnt != 0)
4792 				asoc->pr_sctp_cnt--;
4793 		}
4794 		asoc->sent_queue_cnt--;
4795 		if (tp1->data) {
4796 			/* sa_ignore NO_NULL_CHK */
4797 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4798 			sctp_m_freem(tp1->data);
4799 			tp1->data = NULL;
4800 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4801 				asoc->sent_queue_cnt_removeable--;
4802 			}
4803 		}
4804 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4805 			sctp_log_sack(asoc->last_acked_seq,
4806 			    cum_ack,
4807 			    tp1->rec.data.tsn,
4808 			    0,
4809 			    0,
4810 			    SCTP_LOG_FREE_SENT);
4811 		}
4812 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4813 		wake_him++;
4814 	}
4815 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4816 #ifdef INVARIANTS
4817 		panic("Warning flight size is positive and should be 0");
4818 #else
4819 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4820 		    asoc->total_flight);
4821 #endif
4822 		asoc->total_flight = 0;
4823 	}
4824 	/* sa_ignore NO_NULL_CHK */
4825 	if ((wake_him) && (stcb->sctp_socket)) {
4826 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4827 		struct socket *so;
4828 
4829 #endif
4830 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4831 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4832 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4833 		}
4834 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4835 		so = SCTP_INP_SO(stcb->sctp_ep);
4836 		atomic_add_int(&stcb->asoc.refcnt, 1);
4837 		SCTP_TCB_UNLOCK(stcb);
4838 		SCTP_SOCKET_LOCK(so, 1);
4839 		SCTP_TCB_LOCK(stcb);
4840 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4841 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4842 			/* assoc was freed while we were unlocked */
4843 			SCTP_SOCKET_UNLOCK(so, 1);
4844 			return;
4845 		}
4846 #endif
4847 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4848 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4849 		SCTP_SOCKET_UNLOCK(so, 1);
4850 #endif
4851 	} else {
4852 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4853 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4854 		}
4855 	}
4856 
4857 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4858 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4859 			/* Setup so we will exit RFC2582 fast recovery */
4860 			will_exit_fast_recovery = 1;
4861 		}
4862 	}
4863 	/*
4864 	 * Check for revoked fragments:
4865 	 *
4866 	 * if Previous sack - Had no frags then we can't have any revoked if
4867 	 * Previous sack - Had frag's then - If we now have frags aka
4868 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4869 	 * some of them. else - The peer revoked all ACKED fragments, since
4870 	 * we had some before and now we have NONE.
4871 	 */
4872 
4873 	if (num_seg) {
4874 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4875 		asoc->saw_sack_with_frags = 1;
4876 	} else if (asoc->saw_sack_with_frags) {
4877 		int cnt_revoked = 0;
4878 
4879 		/* Peer revoked all dg's marked or acked */
4880 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4881 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4882 				tp1->sent = SCTP_DATAGRAM_SENT;
4883 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4884 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4885 					    tp1->whoTo->flight_size,
4886 					    tp1->book_size,
4887 					    (uint32_t)(uintptr_t)tp1->whoTo,
4888 					    tp1->rec.data.tsn);
4889 				}
4890 				sctp_flight_size_increase(tp1);
4891 				sctp_total_flight_increase(stcb, tp1);
4892 				tp1->rec.data.chunk_was_revoked = 1;
4893 				/*
4894 				 * To ensure that this increase in
4895 				 * flightsize, which is artificial, does not
4896 				 * throttle the sender, we also increase the
4897 				 * cwnd artificially.
4898 				 */
4899 				tp1->whoTo->cwnd += tp1->book_size;
4900 				cnt_revoked++;
4901 			}
4902 		}
4903 		if (cnt_revoked) {
4904 			reneged_all = 1;
4905 		}
4906 		asoc->saw_sack_with_frags = 0;
4907 	}
4908 	if (num_nr_seg > 0)
4909 		asoc->saw_sack_with_nr_frags = 1;
4910 	else
4911 		asoc->saw_sack_with_nr_frags = 0;
4912 
4913 	/* JRS - Use the congestion control given in the CC module */
4914 	if (ecne_seen == 0) {
4915 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4916 			if (net->net_ack2 > 0) {
4917 				/*
4918 				 * Karn's rule applies to clearing error
4919 				 * count, this is optional.
4920 				 */
4921 				net->error_count = 0;
4922 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4923 					/* addr came good */
4924 					net->dest_state |= SCTP_ADDR_REACHABLE;
4925 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4926 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4927 				}
4928 				if (net == stcb->asoc.primary_destination) {
4929 					if (stcb->asoc.alternate) {
4930 						/*
4931 						 * release the alternate,
4932 						 * primary is good
4933 						 */
4934 						sctp_free_remote_addr(stcb->asoc.alternate);
4935 						stcb->asoc.alternate = NULL;
4936 					}
4937 				}
4938 				if (net->dest_state & SCTP_ADDR_PF) {
4939 					net->dest_state &= ~SCTP_ADDR_PF;
4940 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4941 					    stcb->sctp_ep, stcb, net,
4942 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4943 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4944 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4945 					/* Done with this net */
4946 					net->net_ack = 0;
4947 				}
4948 				/* restore any doubled timers */
4949 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4950 				if (net->RTO < stcb->asoc.minrto) {
4951 					net->RTO = stcb->asoc.minrto;
4952 				}
4953 				if (net->RTO > stcb->asoc.maxrto) {
4954 					net->RTO = stcb->asoc.maxrto;
4955 				}
4956 			}
4957 		}
4958 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4959 	}
4960 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4961 		/* nothing left in-flight */
4962 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4963 			/* stop all timers */
4964 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4965 			    stcb, net,
4966 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4967 			net->flight_size = 0;
4968 			net->partial_bytes_acked = 0;
4969 		}
4970 		asoc->total_flight = 0;
4971 		asoc->total_flight_count = 0;
4972 	}
4973 	/**********************************/
4974 	/* Now what about shutdown issues */
4975 	/**********************************/
4976 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4977 		/* nothing left on sendqueue.. consider done */
4978 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4979 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4980 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4981 		}
4982 		asoc->peers_rwnd = a_rwnd;
4983 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4984 			/* SWS sender side engages */
4985 			asoc->peers_rwnd = 0;
4986 		}
4987 		/* clean up */
4988 		if ((asoc->stream_queue_cnt == 1) &&
4989 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4990 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4991 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4992 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4993 		}
4994 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4995 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4996 		    (asoc->stream_queue_cnt == 1) &&
4997 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4998 			struct mbuf *op_err;
4999 
5000 			*abort_now = 1;
5001 			/* XXX */
5002 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5003 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5004 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5005 			return;
5006 		}
5007 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5008 		    (asoc->stream_queue_cnt == 0)) {
5009 			struct sctp_nets *netp;
5010 
5011 			if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5012 			    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5013 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5014 			}
5015 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5016 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5017 			sctp_stop_timers_for_shutdown(stcb);
5018 			if (asoc->alternate) {
5019 				netp = asoc->alternate;
5020 			} else {
5021 				netp = asoc->primary_destination;
5022 			}
5023 			sctp_send_shutdown(stcb, netp);
5024 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5025 			    stcb->sctp_ep, stcb, netp);
5026 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5027 			    stcb->sctp_ep, stcb, netp);
5028 			return;
5029 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5030 		    (asoc->stream_queue_cnt == 0)) {
5031 			struct sctp_nets *netp;
5032 
5033 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5034 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5035 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5036 			sctp_stop_timers_for_shutdown(stcb);
5037 			if (asoc->alternate) {
5038 				netp = asoc->alternate;
5039 			} else {
5040 				netp = asoc->primary_destination;
5041 			}
5042 			sctp_send_shutdown_ack(stcb, netp);
5043 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5044 			    stcb->sctp_ep, stcb, netp);
5045 			return;
5046 		}
5047 	}
5048 	/*
5049 	 * Now here we are going to recycle net_ack for a different use...
5050 	 * HEADS UP.
5051 	 */
5052 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5053 		net->net_ack = 0;
5054 	}
5055 
5056 	/*
5057 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5058 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5059 	 * automatically ensure that.
5060 	 */
5061 	if ((asoc->sctp_cmt_on_off > 0) &&
5062 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5063 	    (cmt_dac_flag == 0)) {
5064 		this_sack_lowest_newack = cum_ack;
5065 	}
5066 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5067 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5068 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5069 	}
5070 	/* JRS - Use the congestion control given in the CC module */
5071 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5072 
5073 	/* Now are we exiting loss recovery ? */
5074 	if (will_exit_fast_recovery) {
5075 		/* Ok, we must exit fast recovery */
5076 		asoc->fast_retran_loss_recovery = 0;
5077 	}
5078 	if ((asoc->sat_t3_loss_recovery) &&
5079 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5080 		/* end satellite t3 loss recovery */
5081 		asoc->sat_t3_loss_recovery = 0;
5082 	}
5083 	/*
5084 	 * CMT Fast recovery
5085 	 */
5086 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5087 		if (net->will_exit_fast_recovery) {
5088 			/* Ok, we must exit fast recovery */
5089 			net->fast_retran_loss_recovery = 0;
5090 		}
5091 	}
5092 
5093 	/* Adjust and set the new rwnd value */
5094 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5095 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5096 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5097 	}
5098 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5099 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5100 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5101 		/* SWS sender side engages */
5102 		asoc->peers_rwnd = 0;
5103 	}
5104 	if (asoc->peers_rwnd > old_rwnd) {
5105 		win_probe_recovery = 1;
5106 	}
5107 	/*
5108 	 * Now we must setup so we have a timer up for anyone with
5109 	 * outstanding data.
5110 	 */
5111 	done_once = 0;
5112 again:
5113 	j = 0;
5114 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115 		if (win_probe_recovery && (net->window_probe)) {
5116 			win_probe_recovered = 1;
5117 			/*-
5118 			 * Find first chunk that was used with
5119 			 * window probe and clear the event. Put
5120 			 * it back into the send queue as if has
5121 			 * not been sent.
5122 			 */
5123 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5124 				if (tp1->window_probe) {
5125 					sctp_window_probe_recovery(stcb, asoc, tp1);
5126 					break;
5127 				}
5128 			}
5129 		}
5130 		if (net->flight_size) {
5131 			j++;
5132 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5133 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5134 				    stcb->sctp_ep, stcb, net);
5135 			}
5136 			if (net->window_probe) {
5137 				net->window_probe = 0;
5138 			}
5139 		} else {
5140 			if (net->window_probe) {
5141 				/*
5142 				 * In window probes we must assure a timer
5143 				 * is still running there
5144 				 */
5145 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5146 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5147 					    stcb->sctp_ep, stcb, net);
5148 
5149 				}
5150 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5151 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5152 				    stcb, net,
5153 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5154 			}
5155 		}
5156 	}
5157 	if ((j == 0) &&
5158 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5159 	    (asoc->sent_queue_retran_cnt == 0) &&
5160 	    (win_probe_recovered == 0) &&
5161 	    (done_once == 0)) {
5162 		/*
5163 		 * huh, this should not happen unless all packets are
5164 		 * PR-SCTP and marked to skip of course.
5165 		 */
5166 		if (sctp_fs_audit(asoc)) {
5167 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5168 				net->flight_size = 0;
5169 			}
5170 			asoc->total_flight = 0;
5171 			asoc->total_flight_count = 0;
5172 			asoc->sent_queue_retran_cnt = 0;
5173 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5174 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5175 					sctp_flight_size_increase(tp1);
5176 					sctp_total_flight_increase(stcb, tp1);
5177 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5178 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5179 				}
5180 			}
5181 		}
5182 		done_once = 1;
5183 		goto again;
5184 	}
5185 	/*********************************************/
5186 	/* Here we perform PR-SCTP procedures        */
5187 	/* (section 4.2)                             */
5188 	/*********************************************/
5189 	/* C1. update advancedPeerAckPoint */
5190 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5191 		asoc->advanced_peer_ack_point = cum_ack;
5192 	}
5193 	/* C2. try to further move advancedPeerAckPoint ahead */
5194 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5195 		struct sctp_tmit_chunk *lchk;
5196 		uint32_t old_adv_peer_ack_point;
5197 
5198 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5199 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5200 		/* C3. See if we need to send a Fwd-TSN */
5201 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5202 			/*
5203 			 * ISSUE with ECN, see FWD-TSN processing.
5204 			 */
5205 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5206 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5207 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5208 				    old_adv_peer_ack_point);
5209 			}
5210 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5211 				send_forward_tsn(stcb, asoc);
5212 			} else if (lchk) {
5213 				/* try to FR fwd-tsn's that get lost too */
5214 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5215 					send_forward_tsn(stcb, asoc);
5216 				}
5217 			}
5218 		}
5219 		if (lchk) {
5220 			/* Assure a timer is up */
5221 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5222 			    stcb->sctp_ep, stcb, lchk->whoTo);
5223 		}
5224 	}
5225 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5226 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5227 		    a_rwnd,
5228 		    stcb->asoc.peers_rwnd,
5229 		    stcb->asoc.total_flight,
5230 		    stcb->asoc.total_output_queue_size);
5231 	}
5232 }
5233 
5234 void
5235 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5236 {
5237 	/* Copy cum-ack */
5238 	uint32_t cum_ack, a_rwnd;
5239 
5240 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5241 	/* Arrange so a_rwnd does NOT change */
5242 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5243 
5244 	/* Now call the express sack handling */
5245 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5246 }
5247 
5248 static void
5249 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5250     struct sctp_stream_in *strmin)
5251 {
5252 	struct sctp_queued_to_read *control, *ncontrol;
5253 	struct sctp_association *asoc;
5254 	uint32_t mid;
5255 	int need_reasm_check = 0;
5256 
5257 	asoc = &stcb->asoc;
5258 	mid = strmin->last_mid_delivered;
5259 	/*
5260 	 * First deliver anything prior to and including the stream no that
5261 	 * came in.
5262 	 */
5263 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5264 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5265 			/* this is deliverable now */
5266 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5267 				if (control->on_strm_q) {
5268 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5269 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5270 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5271 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5272 #ifdef INVARIANTS
5273 					} else {
5274 						panic("strmin: %p ctl: %p unknown %d",
5275 						    strmin, control, control->on_strm_q);
5276 #endif
5277 					}
5278 					control->on_strm_q = 0;
5279 				}
5280 				/* subtract pending on streams */
5281 				if (asoc->size_on_all_streams >= control->length) {
5282 					asoc->size_on_all_streams -= control->length;
5283 				} else {
5284 #ifdef INVARIANTS
5285 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5286 #else
5287 					asoc->size_on_all_streams = 0;
5288 #endif
5289 				}
5290 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5291 				/* deliver it to at least the delivery-q */
5292 				if (stcb->sctp_socket) {
5293 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5294 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5295 					    control,
5296 					    &stcb->sctp_socket->so_rcv,
5297 					    1, SCTP_READ_LOCK_HELD,
5298 					    SCTP_SO_NOT_LOCKED);
5299 				}
5300 			} else {
5301 				/* Its a fragmented message */
5302 				if (control->first_frag_seen) {
5303 					/*
5304 					 * Make it so this is next to
5305 					 * deliver, we restore later
5306 					 */
5307 					strmin->last_mid_delivered = control->mid - 1;
5308 					need_reasm_check = 1;
5309 					break;
5310 				}
5311 			}
5312 		} else {
5313 			/* no more delivery now. */
5314 			break;
5315 		}
5316 	}
5317 	if (need_reasm_check) {
5318 		int ret;
5319 
5320 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5321 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5322 			/* Restore the next to deliver unless we are ahead */
5323 			strmin->last_mid_delivered = mid;
5324 		}
5325 		if (ret == 0) {
5326 			/* Left the front Partial one on */
5327 			return;
5328 		}
5329 		need_reasm_check = 0;
5330 	}
5331 	/*
5332 	 * now we must deliver things in queue the normal way  if any are
5333 	 * now ready.
5334 	 */
5335 	mid = strmin->last_mid_delivered + 1;
5336 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5337 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5338 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5339 				/* this is deliverable now */
5340 				if (control->on_strm_q) {
5341 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5342 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5343 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5344 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5345 #ifdef INVARIANTS
5346 					} else {
5347 						panic("strmin: %p ctl: %p unknown %d",
5348 						    strmin, control, control->on_strm_q);
5349 #endif
5350 					}
5351 					control->on_strm_q = 0;
5352 				}
5353 				/* subtract pending on streams */
5354 				if (asoc->size_on_all_streams >= control->length) {
5355 					asoc->size_on_all_streams -= control->length;
5356 				} else {
5357 #ifdef INVARIANTS
5358 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5359 #else
5360 					asoc->size_on_all_streams = 0;
5361 #endif
5362 				}
5363 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5364 				/* deliver it to at least the delivery-q */
5365 				strmin->last_mid_delivered = control->mid;
5366 				if (stcb->sctp_socket) {
5367 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5368 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5369 					    control,
5370 					    &stcb->sctp_socket->so_rcv, 1,
5371 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5372 
5373 				}
5374 				mid = strmin->last_mid_delivered + 1;
5375 			} else {
5376 				/* Its a fragmented message */
5377 				if (control->first_frag_seen) {
5378 					/*
5379 					 * Make it so this is next to
5380 					 * deliver
5381 					 */
5382 					strmin->last_mid_delivered = control->mid - 1;
5383 					need_reasm_check = 1;
5384 					break;
5385 				}
5386 			}
5387 		} else {
5388 			break;
5389 		}
5390 	}
5391 	if (need_reasm_check) {
5392 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5393 	}
5394 }
5395 
5396 
5397 
5398 static void
5399 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5400     struct sctp_association *asoc,
5401     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5402 {
5403 	struct sctp_queued_to_read *control;
5404 	struct sctp_stream_in *strm;
5405 	struct sctp_tmit_chunk *chk, *nchk;
5406 	int cnt_removed = 0;
5407 
5408 	/*
5409 	 * For now large messages held on the stream reasm that are complete
5410 	 * will be tossed too. We could in theory do more work to spin
5411 	 * through and stop after dumping one msg aka seeing the start of a
5412 	 * new msg at the head, and call the delivery function... to see if
5413 	 * it can be delivered... But for now we just dump everything on the
5414 	 * queue.
5415 	 */
5416 	strm = &asoc->strmin[stream];
5417 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5418 	if (control == NULL) {
5419 		/* Not found */
5420 		return;
5421 	}
5422 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5423 		return;
5424 	}
5425 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5426 		/* Purge hanging chunks */
5427 		if (!asoc->idata_supported && (ordered == 0)) {
5428 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5429 				break;
5430 			}
5431 		}
5432 		cnt_removed++;
5433 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5434 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5435 			asoc->size_on_reasm_queue -= chk->send_size;
5436 		} else {
5437 #ifdef INVARIANTS
5438 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5439 #else
5440 			asoc->size_on_reasm_queue = 0;
5441 #endif
5442 		}
5443 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5444 		if (chk->data) {
5445 			sctp_m_freem(chk->data);
5446 			chk->data = NULL;
5447 		}
5448 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5449 	}
5450 	if (!TAILQ_EMPTY(&control->reasm)) {
5451 		/* This has to be old data, unordered */
5452 		if (control->data) {
5453 			sctp_m_freem(control->data);
5454 			control->data = NULL;
5455 		}
5456 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5457 		chk = TAILQ_FIRST(&control->reasm);
5458 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5459 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5460 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5461 			    chk, SCTP_READ_LOCK_HELD);
5462 		}
5463 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5464 		return;
5465 	}
5466 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5467 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5468 		if (asoc->size_on_all_streams >= control->length) {
5469 			asoc->size_on_all_streams -= control->length;
5470 		} else {
5471 #ifdef INVARIANTS
5472 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5473 #else
5474 			asoc->size_on_all_streams = 0;
5475 #endif
5476 		}
5477 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5478 		control->on_strm_q = 0;
5479 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5480 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5481 		control->on_strm_q = 0;
5482 #ifdef INVARIANTS
5483 	} else if (control->on_strm_q) {
5484 		panic("strm: %p ctl: %p unknown %d",
5485 		    strm, control, control->on_strm_q);
5486 #endif
5487 	}
5488 	control->on_strm_q = 0;
5489 	if (control->on_read_q == 0) {
5490 		sctp_free_remote_addr(control->whoFrom);
5491 		if (control->data) {
5492 			sctp_m_freem(control->data);
5493 			control->data = NULL;
5494 		}
5495 		sctp_free_a_readq(stcb, control);
5496 	}
5497 }
5498 
5499 void
5500 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5501     struct sctp_forward_tsn_chunk *fwd,
5502     int *abort_flag, struct mbuf *m, int offset)
5503 {
5504 	/* The pr-sctp fwd tsn */
5505 	/*
5506 	 * here we will perform all the data receiver side steps for
5507 	 * processing FwdTSN, as required in by pr-sctp draft:
5508 	 *
5509 	 * Assume we get FwdTSN(x):
5510 	 *
5511 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5512 	 * + others we have 3) examine and update re-ordering queue on
5513 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5514 	 * report where we are.
5515 	 */
5516 	struct sctp_association *asoc;
5517 	uint32_t new_cum_tsn, gap;
5518 	unsigned int i, fwd_sz, m_size;
5519 	uint32_t str_seq;
5520 	struct sctp_stream_in *strm;
5521 	struct sctp_queued_to_read *control, *sv;
5522 
5523 	asoc = &stcb->asoc;
5524 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5525 		SCTPDBG(SCTP_DEBUG_INDATA1,
5526 		    "Bad size too small/big fwd-tsn\n");
5527 		return;
5528 	}
5529 	m_size = (stcb->asoc.mapping_array_size << 3);
5530 	/*************************************************************/
5531 	/* 1. Here we update local cumTSN and shift the bitmap array */
5532 	/*************************************************************/
5533 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5534 
5535 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5536 		/* Already got there ... */
5537 		return;
5538 	}
5539 	/*
5540 	 * now we know the new TSN is more advanced, let's find the actual
5541 	 * gap
5542 	 */
5543 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5544 	asoc->cumulative_tsn = new_cum_tsn;
5545 	if (gap >= m_size) {
5546 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5547 			struct mbuf *op_err;
5548 			char msg[SCTP_DIAG_INFO_LEN];
5549 
5550 			/*
5551 			 * out of range (of single byte chunks in the rwnd I
5552 			 * give out). This must be an attacker.
5553 			 */
5554 			*abort_flag = 1;
5555 			snprintf(msg, sizeof(msg),
5556 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5557 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5558 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5559 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5560 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5561 			return;
5562 		}
5563 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5564 
5565 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5566 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5567 		asoc->highest_tsn_inside_map = new_cum_tsn;
5568 
5569 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5570 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5571 
5572 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5573 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5574 		}
5575 	} else {
5576 		SCTP_TCB_LOCK_ASSERT(stcb);
5577 		for (i = 0; i <= gap; i++) {
5578 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5579 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5580 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5581 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5582 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5583 				}
5584 			}
5585 		}
5586 	}
5587 	/*************************************************************/
5588 	/* 2. Clear up re-assembly queue                             */
5589 	/*************************************************************/
5590 
5591 	/* This is now done as part of clearing up the stream/seq */
5592 	if (asoc->idata_supported == 0) {
5593 		uint16_t sid;
5594 
5595 		/* Flush all the un-ordered data based on cum-tsn */
5596 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5597 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5598 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5599 		}
5600 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5601 	}
5602 	/*******************************************************/
5603 	/* 3. Update the PR-stream re-ordering queues and fix  */
5604 	/* delivery issues as needed.                       */
5605 	/*******************************************************/
5606 	fwd_sz -= sizeof(*fwd);
5607 	if (m && fwd_sz) {
5608 		/* New method. */
5609 		unsigned int num_str;
5610 		uint32_t mid, cur_mid;
5611 		uint16_t sid;
5612 		uint16_t ordered, flags;
5613 		struct sctp_strseq *stseq, strseqbuf;
5614 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5615 
5616 		offset += sizeof(*fwd);
5617 
5618 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5619 		if (asoc->idata_supported) {
5620 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5621 		} else {
5622 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5623 		}
5624 		for (i = 0; i < num_str; i++) {
5625 			if (asoc->idata_supported) {
5626 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5627 				    sizeof(struct sctp_strseq_mid),
5628 				    (uint8_t *)&strseqbuf_m);
5629 				offset += sizeof(struct sctp_strseq_mid);
5630 				if (stseq_m == NULL) {
5631 					break;
5632 				}
5633 				sid = ntohs(stseq_m->sid);
5634 				mid = ntohl(stseq_m->mid);
5635 				flags = ntohs(stseq_m->flags);
5636 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5637 					ordered = 0;
5638 				} else {
5639 					ordered = 1;
5640 				}
5641 			} else {
5642 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5643 				    sizeof(struct sctp_strseq),
5644 				    (uint8_t *)&strseqbuf);
5645 				offset += sizeof(struct sctp_strseq);
5646 				if (stseq == NULL) {
5647 					break;
5648 				}
5649 				sid = ntohs(stseq->sid);
5650 				mid = (uint32_t)ntohs(stseq->ssn);
5651 				ordered = 1;
5652 			}
5653 			/* Convert */
5654 
5655 			/* now process */
5656 
5657 			/*
5658 			 * Ok we now look for the stream/seq on the read
5659 			 * queue where its not all delivered. If we find it
5660 			 * we transmute the read entry into a PDI_ABORTED.
5661 			 */
5662 			if (sid >= asoc->streamincnt) {
5663 				/* screwed up streams, stop!  */
5664 				break;
5665 			}
5666 			if ((asoc->str_of_pdapi == sid) &&
5667 			    (asoc->ssn_of_pdapi == mid)) {
5668 				/*
5669 				 * If this is the one we were partially
5670 				 * delivering now then we no longer are.
5671 				 * Note this will change with the reassembly
5672 				 * re-write.
5673 				 */
5674 				asoc->fragmented_delivery_inprogress = 0;
5675 			}
5676 			strm = &asoc->strmin[sid];
5677 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5678 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5679 			}
5680 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5681 				if ((control->sinfo_stream == sid) &&
5682 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5683 					str_seq = (sid << 16) | (0x0000ffff & mid);
5684 					control->pdapi_aborted = 1;
5685 					sv = stcb->asoc.control_pdapi;
5686 					control->end_added = 1;
5687 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5688 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5689 						if (asoc->size_on_all_streams >= control->length) {
5690 							asoc->size_on_all_streams -= control->length;
5691 						} else {
5692 #ifdef INVARIANTS
5693 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5694 #else
5695 							asoc->size_on_all_streams = 0;
5696 #endif
5697 						}
5698 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5699 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5700 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5701 #ifdef INVARIANTS
5702 					} else if (control->on_strm_q) {
5703 						panic("strm: %p ctl: %p unknown %d",
5704 						    strm, control, control->on_strm_q);
5705 #endif
5706 					}
5707 					control->on_strm_q = 0;
5708 					stcb->asoc.control_pdapi = control;
5709 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5710 					    stcb,
5711 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5712 					    (void *)&str_seq,
5713 					    SCTP_SO_NOT_LOCKED);
5714 					stcb->asoc.control_pdapi = sv;
5715 					break;
5716 				} else if ((control->sinfo_stream == sid) &&
5717 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5718 					/* We are past our victim SSN */
5719 					break;
5720 				}
5721 			}
5722 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5723 				/* Update the sequence number */
5724 				strm->last_mid_delivered = mid;
5725 			}
5726 			/* now kick the stream the new way */
5727 			/* sa_ignore NO_NULL_CHK */
5728 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5729 		}
5730 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5731 	}
5732 	/*
5733 	 * Now slide thing forward.
5734 	 */
5735 	sctp_slide_mapping_arrays(stcb);
5736 }
5737