xref: /freebsd/sys/netinet/sctp_indata.c (revision f096ed981fe534c36ed8662ef307042123e443f3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <sys/proc.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
55 /*
56  * NOTES: On the outbound side of things I need to check the sack timer to
57  * see if I should generate a sack into the chunk queue (if I have data to
58  * send that is and will be sending it .. for bundling.
59  *
60  * The callback in sctp_usrreq.c will get called when the socket is read from.
61  * This will cause sctp_service_queues() to get called on the top entry in
62  * the list.
63  */
64 static uint32_t
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66     struct sctp_stream_in *strm,
67     struct sctp_tcb *stcb,
68     struct sctp_association *asoc,
69     struct sctp_tmit_chunk *chk, int lock_held);
70 
71 
72 void
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 {
75 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
76 }
77 
78 /* Calculate what the rwnd would be */
79 uint32_t
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
81 {
82 	uint32_t calc = 0;
83 
84 	/*
85 	 * This is really set wrong with respect to a 1-2-m socket. Since
86 	 * the sb_cc is the count that everyone as put up. When we re-write
87 	 * sctp_soreceive then we will fix this so that ONLY this
88 	 * associations data is taken into account.
89 	 */
90 	if (stcb->sctp_socket == NULL) {
91 		return (calc);
92 	}
93 
94 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
95 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
96 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
97 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->cnt_on_reasm_queue == 0 &&
100 	    asoc->cnt_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 	/*
108 	 * take out what has NOT been put on socket queue and we yet hold
109 	 * for putting up.
110 	 */
111 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
112 	    asoc->cnt_on_reasm_queue * MSIZE));
113 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
114 	    asoc->cnt_on_all_streams * MSIZE));
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 
120 	/* what is the overhead of all these rwnd's */
121 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 	/*
123 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
124 	 * even it is 0. SWS engaged
125 	 */
126 	if (calc < stcb->asoc.my_rwnd_control_len) {
127 		calc = 1;
128 	}
129 	return (calc);
130 }
131 
132 
133 
134 /*
135  * Build out our readq entry based on the incoming packet.
136  */
137 struct sctp_queued_to_read *
138 sctp_build_readq_entry(struct sctp_tcb *stcb,
139     struct sctp_nets *net,
140     uint32_t tsn, uint32_t ppid,
141     uint32_t context, uint16_t sid,
142     uint32_t mid, uint8_t flags,
143     struct mbuf *dm)
144 {
145 	struct sctp_queued_to_read *read_queue_e = NULL;
146 
147 	sctp_alloc_a_readq(stcb, read_queue_e);
148 	if (read_queue_e == NULL) {
149 		goto failed_build;
150 	}
151 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
152 	read_queue_e->sinfo_stream = sid;
153 	read_queue_e->sinfo_flags = (flags << 8);
154 	read_queue_e->sinfo_ppid = ppid;
155 	read_queue_e->sinfo_context = context;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->mid = mid;
160 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
161 	TAILQ_INIT(&read_queue_e->reasm);
162 	read_queue_e->whoFrom = net;
163 	atomic_add_int(&net->ref_count, 1);
164 	read_queue_e->data = dm;
165 	read_queue_e->stcb = stcb;
166 	read_queue_e->port_from = stcb->rport;
167 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
168 		read_queue_e->do_not_ref_stcb = 1;
169 	}
170 failed_build:
171 	return (read_queue_e);
172 }
173 
174 struct mbuf *
175 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
176 {
177 	struct sctp_extrcvinfo *seinfo;
178 	struct sctp_sndrcvinfo *outinfo;
179 	struct sctp_rcvinfo *rcvinfo;
180 	struct sctp_nxtinfo *nxtinfo;
181 	struct cmsghdr *cmh;
182 	struct mbuf *ret;
183 	int len;
184 	int use_extended;
185 	int provide_nxt;
186 
187 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
188 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
189 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
190 		/* user does not want any ancillary data */
191 		return (NULL);
192 	}
193 
194 	len = 0;
195 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
196 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
197 	}
198 	seinfo = (struct sctp_extrcvinfo *)sinfo;
199 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
200 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
201 		provide_nxt = 1;
202 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 	} else {
204 		provide_nxt = 0;
205 	}
206 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
207 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
208 			use_extended = 1;
209 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
210 		} else {
211 			use_extended = 0;
212 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
213 		}
214 	} else {
215 		use_extended = 0;
216 	}
217 
218 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
219 	if (ret == NULL) {
220 		/* No space */
221 		return (ret);
222 	}
223 	SCTP_BUF_LEN(ret) = 0;
224 
225 	/* We need a CMSG header followed by the struct */
226 	cmh = mtod(ret, struct cmsghdr *);
227 	/*
228 	 * Make sure that there is no un-initialized padding between the
229 	 * cmsg header and cmsg data and after the cmsg data.
230 	 */
231 	memset(cmh, 0, len);
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
233 		cmh->cmsg_level = IPPROTO_SCTP;
234 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
235 		cmh->cmsg_type = SCTP_RCVINFO;
236 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
237 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
238 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
239 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
240 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
241 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
242 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
243 		rcvinfo->rcv_context = sinfo->sinfo_context;
244 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
245 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
246 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
247 	}
248 	if (provide_nxt) {
249 		cmh->cmsg_level = IPPROTO_SCTP;
250 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
251 		cmh->cmsg_type = SCTP_NXTINFO;
252 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
253 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
254 		nxtinfo->nxt_flags = 0;
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
256 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
257 		}
258 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
259 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
260 		}
261 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
262 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
263 		}
264 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
265 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
266 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
267 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
268 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
269 	}
270 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
271 		cmh->cmsg_level = IPPROTO_SCTP;
272 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
273 		if (use_extended) {
274 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
275 			cmh->cmsg_type = SCTP_EXTRCV;
276 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
277 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
278 		} else {
279 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
280 			cmh->cmsg_type = SCTP_SNDRCV;
281 			*outinfo = *sinfo;
282 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
283 		}
284 	}
285 	return (ret);
286 }
287 
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 	int in_r = 0, in_nr = 0;
295 
296 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
297 		return;
298 	}
299 	cumackp1 = asoc->cumulative_tsn + 1;
300 	if (SCTP_TSN_GT(cumackp1, tsn)) {
301 		/*
302 		 * this tsn is behind the cum ack and thus we don't need to
303 		 * worry about it being moved from one to the other.
304 		 */
305 		return;
306 	}
307 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
308 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
309 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
310 	if ((in_r == 0) && (in_nr == 0)) {
311 #ifdef INVARIANTS
312 		panic("Things are really messed up now");
313 #else
314 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
315 		sctp_print_mapping_array(asoc);
316 #endif
317 	}
318 	if (in_nr == 0)
319 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
320 	if (in_r)
321 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
322 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
323 		asoc->highest_tsn_inside_nr_map = tsn;
324 	}
325 	if (tsn == asoc->highest_tsn_inside_map) {
326 		/* We must back down to see what the new highest is */
327 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
328 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
329 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
330 				asoc->highest_tsn_inside_map = i;
331 				fnd = 1;
332 				break;
333 			}
334 		}
335 		if (!fnd) {
336 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
337 		}
338 	}
339 }
340 
341 static int
342 sctp_place_control_in_stream(struct sctp_stream_in *strm,
343     struct sctp_association *asoc,
344     struct sctp_queued_to_read *control)
345 {
346 	struct sctp_queued_to_read *at;
347 	struct sctp_readhead *q;
348 	uint8_t flags, unordered;
349 
350 	flags = (control->sinfo_flags >> 8);
351 	unordered = flags & SCTP_DATA_UNORDERED;
352 	if (unordered) {
353 		q = &strm->uno_inqueue;
354 		if (asoc->idata_supported == 0) {
355 			if (!TAILQ_EMPTY(q)) {
356 				/*
357 				 * Only one stream can be here in old style
358 				 * -- abort
359 				 */
360 				return (-1);
361 			}
362 			TAILQ_INSERT_TAIL(q, control, next_instrm);
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 			return (0);
365 		}
366 	} else {
367 		q = &strm->inqueue;
368 	}
369 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
370 		control->end_added = 1;
371 		control->first_frag_seen = 1;
372 		control->last_frag_seen = 1;
373 	}
374 	if (TAILQ_EMPTY(q)) {
375 		/* Empty queue */
376 		TAILQ_INSERT_HEAD(q, control, next_instrm);
377 		if (unordered) {
378 			control->on_strm_q = SCTP_ON_UNORDERED;
379 		} else {
380 			control->on_strm_q = SCTP_ON_ORDERED;
381 		}
382 		return (0);
383 	} else {
384 		TAILQ_FOREACH(at, q, next_instrm) {
385 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * one in queue is bigger than the new one,
388 				 * insert before this one
389 				 */
390 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
391 				if (unordered) {
392 					control->on_strm_q = SCTP_ON_UNORDERED;
393 				} else {
394 					control->on_strm_q = SCTP_ON_ORDERED;
395 				}
396 				break;
397 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
398 				/*
399 				 * Gak, He sent me a duplicate msg id
400 				 * number?? return -1 to abort.
401 				 */
402 				return (-1);
403 			} else {
404 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
405 					/*
406 					 * We are at the end, insert it
407 					 * after this one
408 					 */
409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
410 						sctp_log_strm_del(control, at,
411 						    SCTP_STR_LOG_FROM_INSERT_TL);
412 					}
413 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
414 					if (unordered) {
415 						control->on_strm_q = SCTP_ON_UNORDERED;
416 					} else {
417 						control->on_strm_q = SCTP_ON_ORDERED;
418 					}
419 					break;
420 				}
421 			}
422 		}
423 	}
424 	return (0);
425 }
426 
427 static void
428 sctp_abort_in_reasm(struct sctp_tcb *stcb,
429     struct sctp_queued_to_read *control,
430     struct sctp_tmit_chunk *chk,
431     int *abort_flag, int opspot)
432 {
433 	char msg[SCTP_DIAG_INFO_LEN];
434 	struct mbuf *oper;
435 
436 	if (stcb->asoc.idata_supported) {
437 		SCTP_SNPRINTF(msg, sizeof(msg),
438 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
439 		    opspot,
440 		    control->fsn_included,
441 		    chk->rec.data.tsn,
442 		    chk->rec.data.sid,
443 		    chk->rec.data.fsn, chk->rec.data.mid);
444 	} else {
445 		SCTP_SNPRINTF(msg, sizeof(msg),
446 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
447 		    opspot,
448 		    control->fsn_included,
449 		    chk->rec.data.tsn,
450 		    chk->rec.data.sid,
451 		    chk->rec.data.fsn,
452 		    (uint16_t)chk->rec.data.mid);
453 	}
454 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
455 	sctp_m_freem(chk->data);
456 	chk->data = NULL;
457 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
458 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
459 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
460 	*abort_flag = 1;
461 }
462 
463 static void
464 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
465 {
466 	/*
467 	 * The control could not be placed and must be cleaned.
468 	 */
469 	struct sctp_tmit_chunk *chk, *nchk;
470 
471 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
472 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
473 		if (chk->data)
474 			sctp_m_freem(chk->data);
475 		chk->data = NULL;
476 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
477 	}
478 	sctp_free_remote_addr(control->whoFrom);
479 	if (control->data) {
480 		sctp_m_freem(control->data);
481 		control->data = NULL;
482 	}
483 	sctp_free_a_readq(stcb, control);
484 }
485 
486 /*
487  * Queue the chunk either right into the socket buffer if it is the next one
488  * to go OR put it in the correct place in the delivery queue.  If we do
489  * append to the so_buf, keep doing so until we are out of order as
490  * long as the control's entered are non-fragmented.
491  */
492 static void
493 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
494     struct sctp_association *asoc,
495     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
496 {
497 	/*
498 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
499 	 * all the data in one stream this could happen quite rapidly. One
500 	 * could use the TSN to keep track of things, but this scheme breaks
501 	 * down in the other type of stream usage that could occur. Send a
502 	 * single msg to stream 0, send 4Billion messages to stream 1, now
503 	 * send a message to stream 0. You have a situation where the TSN
504 	 * has wrapped but not in the stream. Is this worth worrying about
505 	 * or should we just change our queue sort at the bottom to be by
506 	 * TSN.
507 	 *
508 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
509 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
510 	 * assignment this could happen... and I don't see how this would be
511 	 * a violation. So for now I am undecided an will leave the sort by
512 	 * SSN alone. Maybe a hybred approach is the answer
513 	 *
514 	 */
515 	struct sctp_queued_to_read *at;
516 	int queue_needed;
517 	uint32_t nxt_todel;
518 	struct mbuf *op_err;
519 	struct sctp_stream_in *strm;
520 	char msg[SCTP_DIAG_INFO_LEN];
521 
522 	strm = &asoc->strmin[control->sinfo_stream];
523 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
524 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
525 	}
526 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
527 		/* The incoming sseq is behind where we last delivered? */
528 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
529 		    strm->last_mid_delivered, control->mid);
530 		/*
531 		 * throw it in the stream so it gets cleaned up in
532 		 * association destruction
533 		 */
534 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
535 		if (asoc->idata_supported) {
536 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
537 			    strm->last_mid_delivered, control->sinfo_tsn,
538 			    control->sinfo_stream, control->mid);
539 		} else {
540 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
541 			    (uint16_t)strm->last_mid_delivered,
542 			    control->sinfo_tsn,
543 			    control->sinfo_stream,
544 			    (uint16_t)control->mid);
545 		}
546 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
549 		*abort_flag = 1;
550 		return;
551 
552 	}
553 	queue_needed = 1;
554 	asoc->size_on_all_streams += control->length;
555 	sctp_ucount_incr(asoc->cnt_on_all_streams);
556 	nxt_todel = strm->last_mid_delivered + 1;
557 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
558 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
559 		struct socket *so;
560 
561 		so = SCTP_INP_SO(stcb->sctp_ep);
562 		atomic_add_int(&stcb->asoc.refcnt, 1);
563 		SCTP_TCB_UNLOCK(stcb);
564 		SCTP_SOCKET_LOCK(so, 1);
565 		SCTP_TCB_LOCK(stcb);
566 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
567 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
568 			SCTP_SOCKET_UNLOCK(so, 1);
569 			return;
570 		}
571 #endif
572 		/* can be delivered right away? */
573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
574 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
575 		}
576 		/* EY it wont be queued if it could be delivered directly */
577 		queue_needed = 0;
578 		if (asoc->size_on_all_streams >= control->length) {
579 			asoc->size_on_all_streams -= control->length;
580 		} else {
581 #ifdef INVARIANTS
582 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
583 #else
584 			asoc->size_on_all_streams = 0;
585 #endif
586 		}
587 		sctp_ucount_decr(asoc->cnt_on_all_streams);
588 		strm->last_mid_delivered++;
589 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
590 		sctp_add_to_readq(stcb->sctp_ep, stcb,
591 		    control,
592 		    &stcb->sctp_socket->so_rcv, 1,
593 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
594 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
595 			/* all delivered */
596 			nxt_todel = strm->last_mid_delivered + 1;
597 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
598 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
599 				if (control->on_strm_q == SCTP_ON_ORDERED) {
600 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
601 					if (asoc->size_on_all_streams >= control->length) {
602 						asoc->size_on_all_streams -= control->length;
603 					} else {
604 #ifdef INVARIANTS
605 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
606 #else
607 						asoc->size_on_all_streams = 0;
608 #endif
609 					}
610 					sctp_ucount_decr(asoc->cnt_on_all_streams);
611 #ifdef INVARIANTS
612 				} else {
613 					panic("Huh control: %p is on_strm_q: %d",
614 					    control, control->on_strm_q);
615 #endif
616 				}
617 				control->on_strm_q = 0;
618 				strm->last_mid_delivered++;
619 				/*
620 				 * We ignore the return of deliver_data here
621 				 * since we always can hold the chunk on the
622 				 * d-queue. And we have a finite number that
623 				 * can be delivered from the strq.
624 				 */
625 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
626 					sctp_log_strm_del(control, NULL,
627 					    SCTP_STR_LOG_FROM_IMMED_DEL);
628 				}
629 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
630 				sctp_add_to_readq(stcb->sctp_ep, stcb,
631 				    control,
632 				    &stcb->sctp_socket->so_rcv, 1,
633 				    SCTP_READ_LOCK_NOT_HELD,
634 				    SCTP_SO_LOCKED);
635 				continue;
636 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
637 				*need_reasm = 1;
638 			}
639 			break;
640 		}
641 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
642 		SCTP_SOCKET_UNLOCK(so, 1);
643 #endif
644 	}
645 	if (queue_needed) {
646 		/*
647 		 * Ok, we did not deliver this guy, find the correct place
648 		 * to put it on the queue.
649 		 */
650 		if (sctp_place_control_in_stream(strm, asoc, control)) {
651 			SCTP_SNPRINTF(msg, sizeof(msg),
652 			    "Queue to str MID: %u duplicate", control->mid);
653 			sctp_clean_up_control(stcb, control);
654 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
655 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
656 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
657 			*abort_flag = 1;
658 		}
659 	}
660 }
661 
662 
663 static void
664 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
665 {
666 	struct mbuf *m, *prev = NULL;
667 	struct sctp_tcb *stcb;
668 
669 	stcb = control->stcb;
670 	control->held_length = 0;
671 	control->length = 0;
672 	m = control->data;
673 	while (m) {
674 		if (SCTP_BUF_LEN(m) == 0) {
675 			/* Skip mbufs with NO length */
676 			if (prev == NULL) {
677 				/* First one */
678 				control->data = sctp_m_free(m);
679 				m = control->data;
680 			} else {
681 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
682 				m = SCTP_BUF_NEXT(prev);
683 			}
684 			if (m == NULL) {
685 				control->tail_mbuf = prev;
686 			}
687 			continue;
688 		}
689 		prev = m;
690 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
691 		if (control->on_read_q) {
692 			/*
693 			 * On read queue so we must increment the SB stuff,
694 			 * we assume caller has done any locks of SB.
695 			 */
696 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
697 		}
698 		m = SCTP_BUF_NEXT(m);
699 	}
700 	if (prev) {
701 		control->tail_mbuf = prev;
702 	}
703 }
704 
705 static void
706 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
707 {
708 	struct mbuf *prev = NULL;
709 	struct sctp_tcb *stcb;
710 
711 	stcb = control->stcb;
712 	if (stcb == NULL) {
713 #ifdef INVARIANTS
714 		panic("Control broken");
715 #else
716 		return;
717 #endif
718 	}
719 	if (control->tail_mbuf == NULL) {
720 		/* TSNH */
721 		sctp_m_freem(control->data);
722 		control->data = m;
723 		sctp_setup_tail_pointer(control);
724 		return;
725 	}
726 	control->tail_mbuf->m_next = m;
727 	while (m) {
728 		if (SCTP_BUF_LEN(m) == 0) {
729 			/* Skip mbufs with NO length */
730 			if (prev == NULL) {
731 				/* First one */
732 				control->tail_mbuf->m_next = sctp_m_free(m);
733 				m = control->tail_mbuf->m_next;
734 			} else {
735 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
736 				m = SCTP_BUF_NEXT(prev);
737 			}
738 			if (m == NULL) {
739 				control->tail_mbuf = prev;
740 			}
741 			continue;
742 		}
743 		prev = m;
744 		if (control->on_read_q) {
745 			/*
746 			 * On read queue so we must increment the SB stuff,
747 			 * we assume caller has done any locks of SB.
748 			 */
749 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
750 		}
751 		*added += SCTP_BUF_LEN(m);
752 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
753 		m = SCTP_BUF_NEXT(m);
754 	}
755 	if (prev) {
756 		control->tail_mbuf = prev;
757 	}
758 }
759 
760 static void
761 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
762 {
763 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
764 	nc->sinfo_stream = control->sinfo_stream;
765 	nc->mid = control->mid;
766 	TAILQ_INIT(&nc->reasm);
767 	nc->top_fsn = control->top_fsn;
768 	nc->mid = control->mid;
769 	nc->sinfo_flags = control->sinfo_flags;
770 	nc->sinfo_ppid = control->sinfo_ppid;
771 	nc->sinfo_context = control->sinfo_context;
772 	nc->fsn_included = 0xffffffff;
773 	nc->sinfo_tsn = control->sinfo_tsn;
774 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
775 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
776 	nc->whoFrom = control->whoFrom;
777 	atomic_add_int(&nc->whoFrom->ref_count, 1);
778 	nc->stcb = control->stcb;
779 	nc->port_from = control->port_from;
780 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
781 }
782 
783 static void
784 sctp_reset_a_control(struct sctp_queued_to_read *control,
785     struct sctp_inpcb *inp, uint32_t tsn)
786 {
787 	control->fsn_included = tsn;
788 	if (control->on_read_q) {
789 		/*
790 		 * We have to purge it from there, hopefully this will work
791 		 * :-)
792 		 */
793 		TAILQ_REMOVE(&inp->read_queue, control, next);
794 		control->on_read_q = 0;
795 	}
796 }
797 
798 static int
799 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
800     struct sctp_association *asoc,
801     struct sctp_stream_in *strm,
802     struct sctp_queued_to_read *control,
803     uint32_t pd_point,
804     int inp_read_lock_held)
805 {
806 	/*
807 	 * Special handling for the old un-ordered data chunk. All the
808 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
809 	 * to see if we have it all. If you return one, no other control
810 	 * entries on the un-ordered queue will be looked at. In theory
811 	 * there should be no others entries in reality, unless the guy is
812 	 * sending both unordered NDATA and unordered DATA...
813 	 */
814 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
815 	uint32_t fsn;
816 	struct sctp_queued_to_read *nc;
817 	int cnt_added;
818 
819 	if (control->first_frag_seen == 0) {
820 		/* Nothing we can do, we have not seen the first piece yet */
821 		return (1);
822 	}
823 	/* Collapse any we can */
824 	cnt_added = 0;
825 restart:
826 	fsn = control->fsn_included + 1;
827 	/* Now what can we add? */
828 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
829 		if (chk->rec.data.fsn == fsn) {
830 			/* Ok lets add it */
831 			sctp_alloc_a_readq(stcb, nc);
832 			if (nc == NULL) {
833 				break;
834 			}
835 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
836 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
837 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
838 			fsn++;
839 			cnt_added++;
840 			chk = NULL;
841 			if (control->end_added) {
842 				/* We are done */
843 				if (!TAILQ_EMPTY(&control->reasm)) {
844 					/*
845 					 * Ok we have to move anything left
846 					 * on the control queue to a new
847 					 * control.
848 					 */
849 					sctp_build_readq_entry_from_ctl(nc, control);
850 					tchk = TAILQ_FIRST(&control->reasm);
851 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
852 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
853 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
854 							asoc->size_on_reasm_queue -= tchk->send_size;
855 						} else {
856 #ifdef INVARIANTS
857 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
858 #else
859 							asoc->size_on_reasm_queue = 0;
860 #endif
861 						}
862 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
863 						nc->first_frag_seen = 1;
864 						nc->fsn_included = tchk->rec.data.fsn;
865 						nc->data = tchk->data;
866 						nc->sinfo_ppid = tchk->rec.data.ppid;
867 						nc->sinfo_tsn = tchk->rec.data.tsn;
868 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
869 						tchk->data = NULL;
870 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
871 						sctp_setup_tail_pointer(nc);
872 						tchk = TAILQ_FIRST(&control->reasm);
873 					}
874 					/* Spin the rest onto the queue */
875 					while (tchk) {
876 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
877 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
878 						tchk = TAILQ_FIRST(&control->reasm);
879 					}
880 					/*
881 					 * Now lets add it to the queue
882 					 * after removing control
883 					 */
884 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
885 					nc->on_strm_q = SCTP_ON_UNORDERED;
886 					if (control->on_strm_q) {
887 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 						control->on_strm_q = 0;
889 					}
890 				}
891 				if (control->pdapi_started) {
892 					strm->pd_api_started = 0;
893 					control->pdapi_started = 0;
894 				}
895 				if (control->on_strm_q) {
896 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
897 					control->on_strm_q = 0;
898 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
899 				}
900 				if (control->on_read_q == 0) {
901 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
902 					    &stcb->sctp_socket->so_rcv, control->end_added,
903 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
904 				}
905 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
906 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
907 					/*
908 					 * Switch to the new guy and
909 					 * continue
910 					 */
911 					control = nc;
912 					goto restart;
913 				} else {
914 					if (nc->on_strm_q == 0) {
915 						sctp_free_a_readq(stcb, nc);
916 					}
917 				}
918 				return (1);
919 			} else {
920 				sctp_free_a_readq(stcb, nc);
921 			}
922 		} else {
923 			/* Can't add more */
924 			break;
925 		}
926 	}
927 	if (cnt_added && strm->pd_api_started) {
928 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
929 	}
930 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
931 		strm->pd_api_started = 1;
932 		control->pdapi_started = 1;
933 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
934 		    &stcb->sctp_socket->so_rcv, control->end_added,
935 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
936 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
937 		return (0);
938 	} else {
939 		return (1);
940 	}
941 }
942 
943 static void
944 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
945     struct sctp_association *asoc,
946     struct sctp_queued_to_read *control,
947     struct sctp_tmit_chunk *chk,
948     int *abort_flag)
949 {
950 	struct sctp_tmit_chunk *at;
951 	int inserted;
952 
953 	/*
954 	 * Here we need to place the chunk into the control structure sorted
955 	 * in the correct order.
956 	 */
957 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
958 		/* Its the very first one. */
959 		SCTPDBG(SCTP_DEBUG_XXX,
960 		    "chunk is a first fsn: %u becomes fsn_included\n",
961 		    chk->rec.data.fsn);
962 		at = TAILQ_FIRST(&control->reasm);
963 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
964 			/*
965 			 * The first chunk in the reassembly is a smaller
966 			 * TSN than this one, even though this has a first,
967 			 * it must be from a subsequent msg.
968 			 */
969 			goto place_chunk;
970 		}
971 		if (control->first_frag_seen) {
972 			/*
973 			 * In old un-ordered we can reassembly on one
974 			 * control multiple messages. As long as the next
975 			 * FIRST is greater then the old first (TSN i.e. FSN
976 			 * wise)
977 			 */
978 			struct mbuf *tdata;
979 			uint32_t tmp;
980 
981 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
982 				/*
983 				 * Easy way the start of a new guy beyond
984 				 * the lowest
985 				 */
986 				goto place_chunk;
987 			}
988 			if ((chk->rec.data.fsn == control->fsn_included) ||
989 			    (control->pdapi_started)) {
990 				/*
991 				 * Ok this should not happen, if it does we
992 				 * started the pd-api on the higher TSN
993 				 * (since the equals part is a TSN failure
994 				 * it must be that).
995 				 *
996 				 * We are completly hosed in that case since
997 				 * I have no way to recover. This really
998 				 * will only happen if we can get more TSN's
999 				 * higher before the pd-api-point.
1000 				 */
1001 				sctp_abort_in_reasm(stcb, control, chk,
1002 				    abort_flag,
1003 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1004 
1005 				return;
1006 			}
1007 			/*
1008 			 * Ok we have two firsts and the one we just got is
1009 			 * smaller than the one we previously placed.. yuck!
1010 			 * We must swap them out.
1011 			 */
1012 			/* swap the mbufs */
1013 			tdata = control->data;
1014 			control->data = chk->data;
1015 			chk->data = tdata;
1016 			/* Save the lengths */
1017 			chk->send_size = control->length;
1018 			/* Recompute length of control and tail pointer */
1019 			sctp_setup_tail_pointer(control);
1020 			/* Fix the FSN included */
1021 			tmp = control->fsn_included;
1022 			control->fsn_included = chk->rec.data.fsn;
1023 			chk->rec.data.fsn = tmp;
1024 			/* Fix the TSN included */
1025 			tmp = control->sinfo_tsn;
1026 			control->sinfo_tsn = chk->rec.data.tsn;
1027 			chk->rec.data.tsn = tmp;
1028 			/* Fix the PPID included */
1029 			tmp = control->sinfo_ppid;
1030 			control->sinfo_ppid = chk->rec.data.ppid;
1031 			chk->rec.data.ppid = tmp;
1032 			/* Fix tail pointer */
1033 			goto place_chunk;
1034 		}
1035 		control->first_frag_seen = 1;
1036 		control->fsn_included = chk->rec.data.fsn;
1037 		control->top_fsn = chk->rec.data.fsn;
1038 		control->sinfo_tsn = chk->rec.data.tsn;
1039 		control->sinfo_ppid = chk->rec.data.ppid;
1040 		control->data = chk->data;
1041 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1042 		chk->data = NULL;
1043 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1044 		sctp_setup_tail_pointer(control);
1045 		return;
1046 	}
1047 place_chunk:
1048 	inserted = 0;
1049 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1050 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1051 			/*
1052 			 * This one in queue is bigger than the new one,
1053 			 * insert the new one before at.
1054 			 */
1055 			asoc->size_on_reasm_queue += chk->send_size;
1056 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1057 			inserted = 1;
1058 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1059 			break;
1060 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1061 			/*
1062 			 * They sent a duplicate fsn number. This really
1063 			 * should not happen since the FSN is a TSN and it
1064 			 * should have been dropped earlier.
1065 			 */
1066 			sctp_abort_in_reasm(stcb, control, chk,
1067 			    abort_flag,
1068 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1069 			return;
1070 		}
1071 
1072 	}
1073 	if (inserted == 0) {
1074 		/* Its at the end */
1075 		asoc->size_on_reasm_queue += chk->send_size;
1076 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1077 		control->top_fsn = chk->rec.data.fsn;
1078 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1079 	}
1080 }
1081 
1082 static int
1083 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1084     struct sctp_stream_in *strm, int inp_read_lock_held)
1085 {
1086 	/*
1087 	 * Given a stream, strm, see if any of the SSN's on it that are
1088 	 * fragmented are ready to deliver. If so go ahead and place them on
1089 	 * the read queue. In so placing if we have hit the end, then we
1090 	 * need to remove them from the stream's queue.
1091 	 */
1092 	struct sctp_queued_to_read *control, *nctl = NULL;
1093 	uint32_t next_to_del;
1094 	uint32_t pd_point;
1095 	int ret = 0;
1096 
1097 	if (stcb->sctp_socket) {
1098 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1099 		    stcb->sctp_ep->partial_delivery_point);
1100 	} else {
1101 		pd_point = stcb->sctp_ep->partial_delivery_point;
1102 	}
1103 	control = TAILQ_FIRST(&strm->uno_inqueue);
1104 
1105 	if ((control != NULL) &&
1106 	    (asoc->idata_supported == 0)) {
1107 		/* Special handling needed for "old" data format */
1108 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1109 			goto done_un;
1110 		}
1111 	}
1112 	if (strm->pd_api_started) {
1113 		/* Can't add more */
1114 		return (0);
1115 	}
1116 	while (control) {
1117 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1118 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1119 		nctl = TAILQ_NEXT(control, next_instrm);
1120 		if (control->end_added) {
1121 			/* We just put the last bit on */
1122 			if (control->on_strm_q) {
1123 #ifdef INVARIANTS
1124 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1125 					panic("Huh control: %p on_q: %d -- not unordered?",
1126 					    control, control->on_strm_q);
1127 				}
1128 #endif
1129 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1130 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1131 				control->on_strm_q = 0;
1132 			}
1133 			if (control->on_read_q == 0) {
1134 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1135 				    control,
1136 				    &stcb->sctp_socket->so_rcv, control->end_added,
1137 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1138 			}
1139 		} else {
1140 			/* Can we do a PD-API for this un-ordered guy? */
1141 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1142 				strm->pd_api_started = 1;
1143 				control->pdapi_started = 1;
1144 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1145 				    control,
1146 				    &stcb->sctp_socket->so_rcv, control->end_added,
1147 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148 
1149 				break;
1150 			}
1151 		}
1152 		control = nctl;
1153 	}
1154 done_un:
1155 	control = TAILQ_FIRST(&strm->inqueue);
1156 	if (strm->pd_api_started) {
1157 		/* Can't add more */
1158 		return (0);
1159 	}
1160 	if (control == NULL) {
1161 		return (ret);
1162 	}
1163 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1164 		/*
1165 		 * Ok the guy at the top was being partially delivered
1166 		 * completed, so we remove it. Note the pd_api flag was
1167 		 * taken off when the chunk was merged on in
1168 		 * sctp_queue_data_for_reasm below.
1169 		 */
1170 		nctl = TAILQ_NEXT(control, next_instrm);
1171 		SCTPDBG(SCTP_DEBUG_XXX,
1172 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1173 		    control, control->end_added, control->mid,
1174 		    control->top_fsn, control->fsn_included,
1175 		    strm->last_mid_delivered);
1176 		if (control->end_added) {
1177 			if (control->on_strm_q) {
1178 #ifdef INVARIANTS
1179 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1180 					panic("Huh control: %p on_q: %d -- not ordered?",
1181 					    control, control->on_strm_q);
1182 				}
1183 #endif
1184 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1185 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1186 				if (asoc->size_on_all_streams >= control->length) {
1187 					asoc->size_on_all_streams -= control->length;
1188 				} else {
1189 #ifdef INVARIANTS
1190 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1191 #else
1192 					asoc->size_on_all_streams = 0;
1193 #endif
1194 				}
1195 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1196 				control->on_strm_q = 0;
1197 			}
1198 			if (strm->pd_api_started && control->pdapi_started) {
1199 				control->pdapi_started = 0;
1200 				strm->pd_api_started = 0;
1201 			}
1202 			if (control->on_read_q == 0) {
1203 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1204 				    control,
1205 				    &stcb->sctp_socket->so_rcv, control->end_added,
1206 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1207 			}
1208 			control = nctl;
1209 		}
1210 	}
1211 	if (strm->pd_api_started) {
1212 		/*
1213 		 * Can't add more must have gotten an un-ordered above being
1214 		 * partially delivered.
1215 		 */
1216 		return (0);
1217 	}
1218 deliver_more:
1219 	next_to_del = strm->last_mid_delivered + 1;
1220 	if (control) {
1221 		SCTPDBG(SCTP_DEBUG_XXX,
1222 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1223 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1224 		    next_to_del);
1225 		nctl = TAILQ_NEXT(control, next_instrm);
1226 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1227 		    (control->first_frag_seen)) {
1228 			int done;
1229 
1230 			/* Ok we can deliver it onto the stream. */
1231 			if (control->end_added) {
1232 				/* We are done with it afterwards */
1233 				if (control->on_strm_q) {
1234 #ifdef INVARIANTS
1235 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1236 						panic("Huh control: %p on_q: %d -- not ordered?",
1237 						    control, control->on_strm_q);
1238 					}
1239 #endif
1240 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1241 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1242 					if (asoc->size_on_all_streams >= control->length) {
1243 						asoc->size_on_all_streams -= control->length;
1244 					} else {
1245 #ifdef INVARIANTS
1246 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1247 #else
1248 						asoc->size_on_all_streams = 0;
1249 #endif
1250 					}
1251 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1252 					control->on_strm_q = 0;
1253 				}
1254 				ret++;
1255 			}
1256 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1257 				/*
1258 				 * A singleton now slipping through - mark
1259 				 * it non-revokable too
1260 				 */
1261 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1262 			} else if (control->end_added == 0) {
1263 				/*
1264 				 * Check if we can defer adding until its
1265 				 * all there
1266 				 */
1267 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1268 					/*
1269 					 * Don't need it or cannot add more
1270 					 * (one being delivered that way)
1271 					 */
1272 					goto out;
1273 				}
1274 			}
1275 			done = (control->end_added) && (control->last_frag_seen);
1276 			if (control->on_read_q == 0) {
1277 				if (!done) {
1278 					if (asoc->size_on_all_streams >= control->length) {
1279 						asoc->size_on_all_streams -= control->length;
1280 					} else {
1281 #ifdef INVARIANTS
1282 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1283 #else
1284 						asoc->size_on_all_streams = 0;
1285 #endif
1286 					}
1287 					strm->pd_api_started = 1;
1288 					control->pdapi_started = 1;
1289 				}
1290 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1291 				    control,
1292 				    &stcb->sctp_socket->so_rcv, control->end_added,
1293 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1294 			}
1295 			strm->last_mid_delivered = next_to_del;
1296 			if (done) {
1297 				control = nctl;
1298 				goto deliver_more;
1299 			}
1300 		}
1301 	}
1302 out:
1303 	return (ret);
1304 }
1305 
1306 
1307 uint32_t
1308 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1309     struct sctp_stream_in *strm,
1310     struct sctp_tcb *stcb, struct sctp_association *asoc,
1311     struct sctp_tmit_chunk *chk, int hold_rlock)
1312 {
1313 	/*
1314 	 * Given a control and a chunk, merge the data from the chk onto the
1315 	 * control and free up the chunk resources.
1316 	 */
1317 	uint32_t added = 0;
1318 	int i_locked = 0;
1319 
1320 	if (control->on_read_q && (hold_rlock == 0)) {
1321 		/*
1322 		 * Its being pd-api'd so we must do some locks.
1323 		 */
1324 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1325 		i_locked = 1;
1326 	}
1327 	if (control->data == NULL) {
1328 		control->data = chk->data;
1329 		sctp_setup_tail_pointer(control);
1330 	} else {
1331 		sctp_add_to_tail_pointer(control, chk->data, &added);
1332 	}
1333 	control->fsn_included = chk->rec.data.fsn;
1334 	asoc->size_on_reasm_queue -= chk->send_size;
1335 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1336 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1337 	chk->data = NULL;
1338 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1339 		control->first_frag_seen = 1;
1340 		control->sinfo_tsn = chk->rec.data.tsn;
1341 		control->sinfo_ppid = chk->rec.data.ppid;
1342 	}
1343 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1344 		/* Its complete */
1345 		if ((control->on_strm_q) && (control->on_read_q)) {
1346 			if (control->pdapi_started) {
1347 				control->pdapi_started = 0;
1348 				strm->pd_api_started = 0;
1349 			}
1350 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1351 				/* Unordered */
1352 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1353 				control->on_strm_q = 0;
1354 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1355 				/* Ordered */
1356 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1357 				/*
1358 				 * Don't need to decrement
1359 				 * size_on_all_streams, since control is on
1360 				 * the read queue.
1361 				 */
1362 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1363 				control->on_strm_q = 0;
1364 #ifdef INVARIANTS
1365 			} else if (control->on_strm_q) {
1366 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1367 				    control->on_strm_q);
1368 #endif
1369 			}
1370 		}
1371 		control->end_added = 1;
1372 		control->last_frag_seen = 1;
1373 	}
1374 	if (i_locked) {
1375 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1376 	}
1377 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1378 	return (added);
1379 }
1380 
1381 /*
1382  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1383  * queue, see if anthing can be delivered. If so pull it off (or as much as
1384  * we can. If we run out of space then we must dump what we can and set the
1385  * appropriate flag to say we queued what we could.
1386  */
1387 static void
1388 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1389     struct sctp_queued_to_read *control,
1390     struct sctp_tmit_chunk *chk,
1391     int created_control,
1392     int *abort_flag, uint32_t tsn)
1393 {
1394 	uint32_t next_fsn;
1395 	struct sctp_tmit_chunk *at, *nat;
1396 	struct sctp_stream_in *strm;
1397 	int do_wakeup, unordered;
1398 	uint32_t lenadded;
1399 
1400 	strm = &asoc->strmin[control->sinfo_stream];
1401 	/*
1402 	 * For old un-ordered data chunks.
1403 	 */
1404 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1405 		unordered = 1;
1406 	} else {
1407 		unordered = 0;
1408 	}
1409 	/* Must be added to the stream-in queue */
1410 	if (created_control) {
1411 		if (unordered == 0) {
1412 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1413 		}
1414 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1415 			/* Duplicate SSN? */
1416 			sctp_abort_in_reasm(stcb, control, chk,
1417 			    abort_flag,
1418 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1419 			sctp_clean_up_control(stcb, control);
1420 			return;
1421 		}
1422 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1423 			/*
1424 			 * Ok we created this control and now lets validate
1425 			 * that its legal i.e. there is a B bit set, if not
1426 			 * and we have up to the cum-ack then its invalid.
1427 			 */
1428 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1429 				sctp_abort_in_reasm(stcb, control, chk,
1430 				    abort_flag,
1431 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1432 				return;
1433 			}
1434 		}
1435 	}
1436 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1437 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1438 		return;
1439 	}
1440 	/*
1441 	 * Ok we must queue the chunk into the reasembly portion: o if its
1442 	 * the first it goes to the control mbuf. o if its not first but the
1443 	 * next in sequence it goes to the control, and each succeeding one
1444 	 * in order also goes. o if its not in order we place it on the list
1445 	 * in its place.
1446 	 */
1447 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1448 		/* Its the very first one. */
1449 		SCTPDBG(SCTP_DEBUG_XXX,
1450 		    "chunk is a first fsn: %u becomes fsn_included\n",
1451 		    chk->rec.data.fsn);
1452 		if (control->first_frag_seen) {
1453 			/*
1454 			 * Error on senders part, they either sent us two
1455 			 * data chunks with FIRST, or they sent two
1456 			 * un-ordered chunks that were fragmented at the
1457 			 * same time in the same stream.
1458 			 */
1459 			sctp_abort_in_reasm(stcb, control, chk,
1460 			    abort_flag,
1461 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1462 			return;
1463 		}
1464 		control->first_frag_seen = 1;
1465 		control->sinfo_ppid = chk->rec.data.ppid;
1466 		control->sinfo_tsn = chk->rec.data.tsn;
1467 		control->fsn_included = chk->rec.data.fsn;
1468 		control->data = chk->data;
1469 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1470 		chk->data = NULL;
1471 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1472 		sctp_setup_tail_pointer(control);
1473 		asoc->size_on_all_streams += control->length;
1474 	} else {
1475 		/* Place the chunk in our list */
1476 		int inserted = 0;
1477 
1478 		if (control->last_frag_seen == 0) {
1479 			/* Still willing to raise highest FSN seen */
1480 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1481 				SCTPDBG(SCTP_DEBUG_XXX,
1482 				    "We have a new top_fsn: %u\n",
1483 				    chk->rec.data.fsn);
1484 				control->top_fsn = chk->rec.data.fsn;
1485 			}
1486 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 				SCTPDBG(SCTP_DEBUG_XXX,
1488 				    "The last fsn is now in place fsn: %u\n",
1489 				    chk->rec.data.fsn);
1490 				control->last_frag_seen = 1;
1491 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1492 					SCTPDBG(SCTP_DEBUG_XXX,
1493 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1494 					    chk->rec.data.fsn,
1495 					    control->top_fsn);
1496 					sctp_abort_in_reasm(stcb, control, chk,
1497 					    abort_flag,
1498 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1499 					return;
1500 				}
1501 			}
1502 			if (asoc->idata_supported || control->first_frag_seen) {
1503 				/*
1504 				 * For IDATA we always check since we know
1505 				 * that the first fragment is 0. For old
1506 				 * DATA we have to receive the first before
1507 				 * we know the first FSN (which is the TSN).
1508 				 */
1509 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1510 					/*
1511 					 * We have already delivered up to
1512 					 * this so its a dup
1513 					 */
1514 					sctp_abort_in_reasm(stcb, control, chk,
1515 					    abort_flag,
1516 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1517 					return;
1518 				}
1519 			}
1520 		} else {
1521 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1522 				/* Second last? huh? */
1523 				SCTPDBG(SCTP_DEBUG_XXX,
1524 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1525 				    chk->rec.data.fsn, control->top_fsn);
1526 				sctp_abort_in_reasm(stcb, control,
1527 				    chk, abort_flag,
1528 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1529 				return;
1530 			}
1531 			if (asoc->idata_supported || control->first_frag_seen) {
1532 				/*
1533 				 * For IDATA we always check since we know
1534 				 * that the first fragment is 0. For old
1535 				 * DATA we have to receive the first before
1536 				 * we know the first FSN (which is the TSN).
1537 				 */
1538 
1539 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1540 					/*
1541 					 * We have already delivered up to
1542 					 * this so its a dup
1543 					 */
1544 					SCTPDBG(SCTP_DEBUG_XXX,
1545 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1546 					    chk->rec.data.fsn, control->fsn_included);
1547 					sctp_abort_in_reasm(stcb, control, chk,
1548 					    abort_flag,
1549 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1550 					return;
1551 				}
1552 			}
1553 			/*
1554 			 * validate not beyond top FSN if we have seen last
1555 			 * one
1556 			 */
1557 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1558 				SCTPDBG(SCTP_DEBUG_XXX,
1559 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1560 				    chk->rec.data.fsn,
1561 				    control->top_fsn);
1562 				sctp_abort_in_reasm(stcb, control, chk,
1563 				    abort_flag,
1564 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1565 				return;
1566 			}
1567 		}
1568 		/*
1569 		 * If we reach here, we need to place the new chunk in the
1570 		 * reassembly for this control.
1571 		 */
1572 		SCTPDBG(SCTP_DEBUG_XXX,
1573 		    "chunk is a not first fsn: %u needs to be inserted\n",
1574 		    chk->rec.data.fsn);
1575 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1576 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1577 				/*
1578 				 * This one in queue is bigger than the new
1579 				 * one, insert the new one before at.
1580 				 */
1581 				SCTPDBG(SCTP_DEBUG_XXX,
1582 				    "Insert it before fsn: %u\n",
1583 				    at->rec.data.fsn);
1584 				asoc->size_on_reasm_queue += chk->send_size;
1585 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1586 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1587 				inserted = 1;
1588 				break;
1589 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1590 				/*
1591 				 * Gak, He sent me a duplicate str seq
1592 				 * number
1593 				 */
1594 				/*
1595 				 * foo bar, I guess I will just free this
1596 				 * new guy, should we abort too? FIX ME
1597 				 * MAYBE? Or it COULD be that the SSN's have
1598 				 * wrapped. Maybe I should compare to TSN
1599 				 * somehow... sigh for now just blow away
1600 				 * the chunk!
1601 				 */
1602 				SCTPDBG(SCTP_DEBUG_XXX,
1603 				    "Duplicate to fsn: %u -- abort\n",
1604 				    at->rec.data.fsn);
1605 				sctp_abort_in_reasm(stcb, control,
1606 				    chk, abort_flag,
1607 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1608 				return;
1609 			}
1610 		}
1611 		if (inserted == 0) {
1612 			/* Goes on the end */
1613 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1614 			    chk->rec.data.fsn);
1615 			asoc->size_on_reasm_queue += chk->send_size;
1616 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1617 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1618 		}
1619 	}
1620 	/*
1621 	 * Ok lets see if we can suck any up into the control structure that
1622 	 * are in seq if it makes sense.
1623 	 */
1624 	do_wakeup = 0;
1625 	/*
1626 	 * If the first fragment has not been seen there is no sense in
1627 	 * looking.
1628 	 */
1629 	if (control->first_frag_seen) {
1630 		next_fsn = control->fsn_included + 1;
1631 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1632 			if (at->rec.data.fsn == next_fsn) {
1633 				/* We can add this one now to the control */
1634 				SCTPDBG(SCTP_DEBUG_XXX,
1635 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1636 				    control, at,
1637 				    at->rec.data.fsn,
1638 				    next_fsn, control->fsn_included);
1639 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1640 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1641 				if (control->on_read_q) {
1642 					do_wakeup = 1;
1643 				} else {
1644 					/*
1645 					 * We only add to the
1646 					 * size-on-all-streams if its not on
1647 					 * the read q. The read q flag will
1648 					 * cause a sballoc so its accounted
1649 					 * for there.
1650 					 */
1651 					asoc->size_on_all_streams += lenadded;
1652 				}
1653 				next_fsn++;
1654 				if (control->end_added && control->pdapi_started) {
1655 					if (strm->pd_api_started) {
1656 						strm->pd_api_started = 0;
1657 						control->pdapi_started = 0;
1658 					}
1659 					if (control->on_read_q == 0) {
1660 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1661 						    control,
1662 						    &stcb->sctp_socket->so_rcv, control->end_added,
1663 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1664 					}
1665 					break;
1666 				}
1667 			} else {
1668 				break;
1669 			}
1670 		}
1671 	}
1672 	if (do_wakeup) {
1673 		/* Need to wakeup the reader */
1674 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1675 	}
1676 }
1677 
1678 static struct sctp_queued_to_read *
1679 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1680 {
1681 	struct sctp_queued_to_read *control;
1682 
1683 	if (ordered) {
1684 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1685 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1686 				break;
1687 			}
1688 		}
1689 	} else {
1690 		if (idata_supported) {
1691 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1692 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1693 					break;
1694 				}
1695 			}
1696 		} else {
1697 			control = TAILQ_FIRST(&strm->uno_inqueue);
1698 		}
1699 	}
1700 	return (control);
1701 }
1702 
1703 static int
1704 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1705     struct mbuf **m, int offset, int chk_length,
1706     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1707     int *break_flag, int last_chunk, uint8_t chk_type)
1708 {
1709 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1710 	uint32_t tsn, fsn, gap, mid;
1711 	struct mbuf *dmbuf;
1712 	int the_len;
1713 	int need_reasm_check = 0;
1714 	uint16_t sid;
1715 	struct mbuf *op_err;
1716 	char msg[SCTP_DIAG_INFO_LEN];
1717 	struct sctp_queued_to_read *control, *ncontrol;
1718 	uint32_t ppid;
1719 	uint8_t chk_flags;
1720 	struct sctp_stream_reset_list *liste;
1721 	int ordered;
1722 	size_t clen;
1723 	int created_control = 0;
1724 
1725 	if (chk_type == SCTP_IDATA) {
1726 		struct sctp_idata_chunk *chunk, chunk_buf;
1727 
1728 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1729 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1730 		chk_flags = chunk->ch.chunk_flags;
1731 		clen = sizeof(struct sctp_idata_chunk);
1732 		tsn = ntohl(chunk->dp.tsn);
1733 		sid = ntohs(chunk->dp.sid);
1734 		mid = ntohl(chunk->dp.mid);
1735 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1736 			fsn = 0;
1737 			ppid = chunk->dp.ppid_fsn.ppid;
1738 		} else {
1739 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1740 			ppid = 0xffffffff;	/* Use as an invalid value. */
1741 		}
1742 	} else {
1743 		struct sctp_data_chunk *chunk, chunk_buf;
1744 
1745 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1746 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1747 		chk_flags = chunk->ch.chunk_flags;
1748 		clen = sizeof(struct sctp_data_chunk);
1749 		tsn = ntohl(chunk->dp.tsn);
1750 		sid = ntohs(chunk->dp.sid);
1751 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1752 		fsn = tsn;
1753 		ppid = chunk->dp.ppid;
1754 	}
1755 	if ((size_t)chk_length == clen) {
1756 		/*
1757 		 * Need to send an abort since we had a empty data chunk.
1758 		 */
1759 		op_err = sctp_generate_no_user_data_cause(tsn);
1760 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1761 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1762 		*abort_flag = 1;
1763 		return (0);
1764 	}
1765 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1766 		asoc->send_sack = 1;
1767 	}
1768 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1769 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1770 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1771 	}
1772 	if (stcb == NULL) {
1773 		return (0);
1774 	}
1775 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1776 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1777 		/* It is a duplicate */
1778 		SCTP_STAT_INCR(sctps_recvdupdata);
1779 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1780 			/* Record a dup for the next outbound sack */
1781 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1782 			asoc->numduptsns++;
1783 		}
1784 		asoc->send_sack = 1;
1785 		return (0);
1786 	}
1787 	/* Calculate the number of TSN's between the base and this TSN */
1788 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1789 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1790 		/* Can't hold the bit in the mapping at max array, toss it */
1791 		return (0);
1792 	}
1793 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1794 		SCTP_TCB_LOCK_ASSERT(stcb);
1795 		if (sctp_expand_mapping_array(asoc, gap)) {
1796 			/* Can't expand, drop it */
1797 			return (0);
1798 		}
1799 	}
1800 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1801 		*high_tsn = tsn;
1802 	}
1803 	/* See if we have received this one already */
1804 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1805 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1806 		SCTP_STAT_INCR(sctps_recvdupdata);
1807 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1808 			/* Record a dup for the next outbound sack */
1809 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1810 			asoc->numduptsns++;
1811 		}
1812 		asoc->send_sack = 1;
1813 		return (0);
1814 	}
1815 	/*
1816 	 * Check to see about the GONE flag, duplicates would cause a sack
1817 	 * to be sent up above
1818 	 */
1819 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1820 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1821 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1822 		/*
1823 		 * wait a minute, this guy is gone, there is no longer a
1824 		 * receiver. Send peer an ABORT!
1825 		 */
1826 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1827 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		*abort_flag = 1;
1829 		return (0);
1830 	}
1831 	/*
1832 	 * Now before going further we see if there is room. If NOT then we
1833 	 * MAY let one through only IF this TSN is the one we are waiting
1834 	 * for on a partial delivery API.
1835 	 */
1836 
1837 	/* Is the stream valid? */
1838 	if (sid >= asoc->streamincnt) {
1839 		struct sctp_error_invalid_stream *cause;
1840 
1841 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1842 		    0, M_NOWAIT, 1, MT_DATA);
1843 		if (op_err != NULL) {
1844 			/* add some space up front so prepend will work well */
1845 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1846 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1847 			/*
1848 			 * Error causes are just param's and this one has
1849 			 * two back to back phdr, one with the error type
1850 			 * and size, the other with the streamid and a rsvd
1851 			 */
1852 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1853 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1854 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1855 			cause->stream_id = htons(sid);
1856 			cause->reserved = htons(0);
1857 			sctp_queue_op_err(stcb, op_err);
1858 		}
1859 		SCTP_STAT_INCR(sctps_badsid);
1860 		SCTP_TCB_LOCK_ASSERT(stcb);
1861 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1862 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1863 			asoc->highest_tsn_inside_nr_map = tsn;
1864 		}
1865 		if (tsn == (asoc->cumulative_tsn + 1)) {
1866 			/* Update cum-ack */
1867 			asoc->cumulative_tsn = tsn;
1868 		}
1869 		return (0);
1870 	}
1871 	/*
1872 	 * If its a fragmented message, lets see if we can find the control
1873 	 * on the reassembly queues.
1874 	 */
1875 	if ((chk_type == SCTP_IDATA) &&
1876 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1877 	    (fsn == 0)) {
1878 		/*
1879 		 * The first *must* be fsn 0, and other (middle/end) pieces
1880 		 * can *not* be fsn 0. XXX: This can happen in case of a
1881 		 * wrap around. Ignore is for now.
1882 		 */
1883 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1884 		goto err_out;
1885 	}
1886 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1887 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1888 	    chk_flags, control);
1889 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1890 		/* See if we can find the re-assembly entity */
1891 		if (control != NULL) {
1892 			/* We found something, does it belong? */
1893 			if (ordered && (mid != control->mid)) {
1894 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1895 		err_out:
1896 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1897 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1898 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1899 				*abort_flag = 1;
1900 				return (0);
1901 			}
1902 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1903 				/*
1904 				 * We can't have a switched order with an
1905 				 * unordered chunk
1906 				 */
1907 				SCTP_SNPRINTF(msg, sizeof(msg),
1908 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1909 				    tsn);
1910 				goto err_out;
1911 			}
1912 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1913 				/*
1914 				 * We can't have a switched unordered with a
1915 				 * ordered chunk
1916 				 */
1917 				SCTP_SNPRINTF(msg, sizeof(msg),
1918 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1919 				    tsn);
1920 				goto err_out;
1921 			}
1922 		}
1923 	} else {
1924 		/*
1925 		 * Its a complete segment. Lets validate we don't have a
1926 		 * re-assembly going on with the same Stream/Seq (for
1927 		 * ordered) or in the same Stream for unordered.
1928 		 */
1929 		if (control != NULL) {
1930 			if (ordered || asoc->idata_supported) {
1931 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1932 				    chk_flags, mid);
1933 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1934 				goto err_out;
1935 			} else {
1936 				if ((tsn == control->fsn_included + 1) &&
1937 				    (control->end_added == 0)) {
1938 					SCTP_SNPRINTF(msg, sizeof(msg),
1939 					    "Illegal message sequence, missing end for MID: %8.8x",
1940 					    control->fsn_included);
1941 					goto err_out;
1942 				} else {
1943 					control = NULL;
1944 				}
1945 			}
1946 		}
1947 	}
1948 	/* now do the tests */
1949 	if (((asoc->cnt_on_all_streams +
1950 	    asoc->cnt_on_reasm_queue +
1951 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1952 	    (((int)asoc->my_rwnd) <= 0)) {
1953 		/*
1954 		 * When we have NO room in the rwnd we check to make sure
1955 		 * the reader is doing its job...
1956 		 */
1957 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1958 			/* some to read, wake-up */
1959 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1960 			struct socket *so;
1961 
1962 			so = SCTP_INP_SO(stcb->sctp_ep);
1963 			atomic_add_int(&stcb->asoc.refcnt, 1);
1964 			SCTP_TCB_UNLOCK(stcb);
1965 			SCTP_SOCKET_LOCK(so, 1);
1966 			SCTP_TCB_LOCK(stcb);
1967 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1968 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1969 				/* assoc was freed while we were unlocked */
1970 				SCTP_SOCKET_UNLOCK(so, 1);
1971 				return (0);
1972 			}
1973 #endif
1974 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1975 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1976 			SCTP_SOCKET_UNLOCK(so, 1);
1977 #endif
1978 		}
1979 		/* now is it in the mapping array of what we have accepted? */
1980 		if (chk_type == SCTP_DATA) {
1981 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1982 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1983 				/* Nope not in the valid range dump it */
1984 		dump_packet:
1985 				sctp_set_rwnd(stcb, asoc);
1986 				if ((asoc->cnt_on_all_streams +
1987 				    asoc->cnt_on_reasm_queue +
1988 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1989 					SCTP_STAT_INCR(sctps_datadropchklmt);
1990 				} else {
1991 					SCTP_STAT_INCR(sctps_datadroprwnd);
1992 				}
1993 				*break_flag = 1;
1994 				return (0);
1995 			}
1996 		} else {
1997 			if (control == NULL) {
1998 				goto dump_packet;
1999 			}
2000 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
2001 				goto dump_packet;
2002 			}
2003 		}
2004 	}
2005 #ifdef SCTP_ASOCLOG_OF_TSNS
2006 	SCTP_TCB_LOCK_ASSERT(stcb);
2007 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2008 		asoc->tsn_in_at = 0;
2009 		asoc->tsn_in_wrapped = 1;
2010 	}
2011 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2012 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2013 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2014 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2015 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2016 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2017 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2018 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2019 	asoc->tsn_in_at++;
2020 #endif
2021 	/*
2022 	 * Before we continue lets validate that we are not being fooled by
2023 	 * an evil attacker. We can only have Nk chunks based on our TSN
2024 	 * spread allowed by the mapping array N * 8 bits, so there is no
2025 	 * way our stream sequence numbers could have wrapped. We of course
2026 	 * only validate the FIRST fragment so the bit must be set.
2027 	 */
2028 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2029 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2030 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2031 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2032 		/* The incoming sseq is behind where we last delivered? */
2033 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2034 		    mid, asoc->strmin[sid].last_mid_delivered);
2035 
2036 		if (asoc->idata_supported) {
2037 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2038 			    asoc->strmin[sid].last_mid_delivered,
2039 			    tsn,
2040 			    sid,
2041 			    mid);
2042 		} else {
2043 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2044 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2045 			    tsn,
2046 			    sid,
2047 			    (uint16_t)mid);
2048 		}
2049 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2050 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2051 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2052 		*abort_flag = 1;
2053 		return (0);
2054 	}
2055 	if (chk_type == SCTP_IDATA) {
2056 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2057 	} else {
2058 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2059 	}
2060 	if (last_chunk == 0) {
2061 		if (chk_type == SCTP_IDATA) {
2062 			dmbuf = SCTP_M_COPYM(*m,
2063 			    (offset + sizeof(struct sctp_idata_chunk)),
2064 			    the_len, M_NOWAIT);
2065 		} else {
2066 			dmbuf = SCTP_M_COPYM(*m,
2067 			    (offset + sizeof(struct sctp_data_chunk)),
2068 			    the_len, M_NOWAIT);
2069 		}
2070 #ifdef SCTP_MBUF_LOGGING
2071 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2072 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2073 		}
2074 #endif
2075 	} else {
2076 		/* We can steal the last chunk */
2077 		int l_len;
2078 
2079 		dmbuf = *m;
2080 		/* lop off the top part */
2081 		if (chk_type == SCTP_IDATA) {
2082 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2083 		} else {
2084 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2085 		}
2086 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2087 			l_len = SCTP_BUF_LEN(dmbuf);
2088 		} else {
2089 			/*
2090 			 * need to count up the size hopefully does not hit
2091 			 * this to often :-0
2092 			 */
2093 			struct mbuf *lat;
2094 
2095 			l_len = 0;
2096 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2097 				l_len += SCTP_BUF_LEN(lat);
2098 			}
2099 		}
2100 		if (l_len > the_len) {
2101 			/* Trim the end round bytes off  too */
2102 			m_adj(dmbuf, -(l_len - the_len));
2103 		}
2104 	}
2105 	if (dmbuf == NULL) {
2106 		SCTP_STAT_INCR(sctps_nomem);
2107 		return (0);
2108 	}
2109 	/*
2110 	 * Now no matter what, we need a control, get one if we don't have
2111 	 * one (we may have gotten it above when we found the message was
2112 	 * fragmented
2113 	 */
2114 	if (control == NULL) {
2115 		sctp_alloc_a_readq(stcb, control);
2116 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2117 		    ppid,
2118 		    sid,
2119 		    chk_flags,
2120 		    NULL, fsn, mid);
2121 		if (control == NULL) {
2122 			SCTP_STAT_INCR(sctps_nomem);
2123 			return (0);
2124 		}
2125 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2126 			struct mbuf *mm;
2127 
2128 			control->data = dmbuf;
2129 			control->tail_mbuf = NULL;
2130 			for (mm = control->data; mm; mm = mm->m_next) {
2131 				control->length += SCTP_BUF_LEN(mm);
2132 				if (SCTP_BUF_NEXT(mm) == NULL) {
2133 					control->tail_mbuf = mm;
2134 				}
2135 			}
2136 			control->end_added = 1;
2137 			control->last_frag_seen = 1;
2138 			control->first_frag_seen = 1;
2139 			control->fsn_included = fsn;
2140 			control->top_fsn = fsn;
2141 		}
2142 		created_control = 1;
2143 	}
2144 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2145 	    chk_flags, ordered, mid, control);
2146 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2147 	    TAILQ_EMPTY(&asoc->resetHead) &&
2148 	    ((ordered == 0) ||
2149 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2150 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2151 		/* Candidate for express delivery */
2152 		/*
2153 		 * Its not fragmented, No PD-API is up, Nothing in the
2154 		 * delivery queue, Its un-ordered OR ordered and the next to
2155 		 * deliver AND nothing else is stuck on the stream queue,
2156 		 * And there is room for it in the socket buffer. Lets just
2157 		 * stuff it up the buffer....
2158 		 */
2159 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2160 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2161 			asoc->highest_tsn_inside_nr_map = tsn;
2162 		}
2163 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2164 		    control, mid);
2165 
2166 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2167 		    control, &stcb->sctp_socket->so_rcv,
2168 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2169 
2170 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2171 			/* for ordered, bump what we delivered */
2172 			asoc->strmin[sid].last_mid_delivered++;
2173 		}
2174 		SCTP_STAT_INCR(sctps_recvexpress);
2175 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2176 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2177 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2178 		}
2179 		control = NULL;
2180 		goto finish_express_del;
2181 	}
2182 
2183 	/* Now will we need a chunk too? */
2184 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2185 		sctp_alloc_a_chunk(stcb, chk);
2186 		if (chk == NULL) {
2187 			/* No memory so we drop the chunk */
2188 			SCTP_STAT_INCR(sctps_nomem);
2189 			if (last_chunk == 0) {
2190 				/* we copied it, free the copy */
2191 				sctp_m_freem(dmbuf);
2192 			}
2193 			return (0);
2194 		}
2195 		chk->rec.data.tsn = tsn;
2196 		chk->no_fr_allowed = 0;
2197 		chk->rec.data.fsn = fsn;
2198 		chk->rec.data.mid = mid;
2199 		chk->rec.data.sid = sid;
2200 		chk->rec.data.ppid = ppid;
2201 		chk->rec.data.context = stcb->asoc.context;
2202 		chk->rec.data.doing_fast_retransmit = 0;
2203 		chk->rec.data.rcv_flags = chk_flags;
2204 		chk->asoc = asoc;
2205 		chk->send_size = the_len;
2206 		chk->whoTo = net;
2207 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2208 		    chk,
2209 		    control, mid);
2210 		atomic_add_int(&net->ref_count, 1);
2211 		chk->data = dmbuf;
2212 	}
2213 	/* Set the appropriate TSN mark */
2214 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2215 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2216 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2217 			asoc->highest_tsn_inside_nr_map = tsn;
2218 		}
2219 	} else {
2220 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2221 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2222 			asoc->highest_tsn_inside_map = tsn;
2223 		}
2224 	}
2225 	/* Now is it complete (i.e. not fragmented)? */
2226 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2227 		/*
2228 		 * Special check for when streams are resetting. We could be
2229 		 * more smart about this and check the actual stream to see
2230 		 * if it is not being reset.. that way we would not create a
2231 		 * HOLB when amongst streams being reset and those not being
2232 		 * reset.
2233 		 *
2234 		 */
2235 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2236 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2237 			/*
2238 			 * yep its past where we need to reset... go ahead
2239 			 * and queue it.
2240 			 */
2241 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2242 				/* first one on */
2243 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2244 			} else {
2245 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2246 				unsigned char inserted = 0;
2247 
2248 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2249 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2250 
2251 						continue;
2252 					} else {
2253 						/* found it */
2254 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2255 						inserted = 1;
2256 						break;
2257 					}
2258 				}
2259 				if (inserted == 0) {
2260 					/*
2261 					 * must be put at end, use prevP
2262 					 * (all setup from loop) to setup
2263 					 * nextP.
2264 					 */
2265 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2266 				}
2267 			}
2268 			goto finish_express_del;
2269 		}
2270 		if (chk_flags & SCTP_DATA_UNORDERED) {
2271 			/* queue directly into socket buffer */
2272 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2273 			    control, mid);
2274 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2275 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2276 			    control,
2277 			    &stcb->sctp_socket->so_rcv, 1,
2278 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2279 
2280 		} else {
2281 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2282 			    mid);
2283 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2284 			if (*abort_flag) {
2285 				if (last_chunk) {
2286 					*m = NULL;
2287 				}
2288 				return (0);
2289 			}
2290 		}
2291 		goto finish_express_del;
2292 	}
2293 	/* If we reach here its a reassembly */
2294 	need_reasm_check = 1;
2295 	SCTPDBG(SCTP_DEBUG_XXX,
2296 	    "Queue data to stream for reasm control: %p MID: %u\n",
2297 	    control, mid);
2298 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2299 	if (*abort_flag) {
2300 		/*
2301 		 * the assoc is now gone and chk was put onto the reasm
2302 		 * queue, which has all been freed.
2303 		 */
2304 		if (last_chunk) {
2305 			*m = NULL;
2306 		}
2307 		return (0);
2308 	}
2309 finish_express_del:
2310 	/* Here we tidy up things */
2311 	if (tsn == (asoc->cumulative_tsn + 1)) {
2312 		/* Update cum-ack */
2313 		asoc->cumulative_tsn = tsn;
2314 	}
2315 	if (last_chunk) {
2316 		*m = NULL;
2317 	}
2318 	if (ordered) {
2319 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2320 	} else {
2321 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2322 	}
2323 	SCTP_STAT_INCR(sctps_recvdata);
2324 	/* Set it present please */
2325 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2326 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2327 	}
2328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2329 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2330 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2331 	}
2332 	if (need_reasm_check) {
2333 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2334 		need_reasm_check = 0;
2335 	}
2336 	/* check the special flag for stream resets */
2337 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2338 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2339 		/*
2340 		 * we have finished working through the backlogged TSN's now
2341 		 * time to reset streams. 1: call reset function. 2: free
2342 		 * pending_reply space 3: distribute any chunks in
2343 		 * pending_reply_queue.
2344 		 */
2345 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2346 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2347 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2348 		SCTP_FREE(liste, SCTP_M_STRESET);
2349 		/* sa_ignore FREED_MEMORY */
2350 		liste = TAILQ_FIRST(&asoc->resetHead);
2351 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2352 			/* All can be removed */
2353 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2354 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2355 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2356 				if (*abort_flag) {
2357 					return (0);
2358 				}
2359 				if (need_reasm_check) {
2360 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2361 					need_reasm_check = 0;
2362 				}
2363 			}
2364 		} else {
2365 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2366 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2367 					break;
2368 				}
2369 				/*
2370 				 * if control->sinfo_tsn is <= liste->tsn we
2371 				 * can process it which is the NOT of
2372 				 * control->sinfo_tsn > liste->tsn
2373 				 */
2374 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2375 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2376 				if (*abort_flag) {
2377 					return (0);
2378 				}
2379 				if (need_reasm_check) {
2380 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2381 					need_reasm_check = 0;
2382 				}
2383 			}
2384 		}
2385 	}
2386 	return (1);
2387 }
2388 
2389 static const int8_t sctp_map_lookup_tab[256] = {
2390 	0, 1, 0, 2, 0, 1, 0, 3,
2391 	0, 1, 0, 2, 0, 1, 0, 4,
2392 	0, 1, 0, 2, 0, 1, 0, 3,
2393 	0, 1, 0, 2, 0, 1, 0, 5,
2394 	0, 1, 0, 2, 0, 1, 0, 3,
2395 	0, 1, 0, 2, 0, 1, 0, 4,
2396 	0, 1, 0, 2, 0, 1, 0, 3,
2397 	0, 1, 0, 2, 0, 1, 0, 6,
2398 	0, 1, 0, 2, 0, 1, 0, 3,
2399 	0, 1, 0, 2, 0, 1, 0, 4,
2400 	0, 1, 0, 2, 0, 1, 0, 3,
2401 	0, 1, 0, 2, 0, 1, 0, 5,
2402 	0, 1, 0, 2, 0, 1, 0, 3,
2403 	0, 1, 0, 2, 0, 1, 0, 4,
2404 	0, 1, 0, 2, 0, 1, 0, 3,
2405 	0, 1, 0, 2, 0, 1, 0, 7,
2406 	0, 1, 0, 2, 0, 1, 0, 3,
2407 	0, 1, 0, 2, 0, 1, 0, 4,
2408 	0, 1, 0, 2, 0, 1, 0, 3,
2409 	0, 1, 0, 2, 0, 1, 0, 5,
2410 	0, 1, 0, 2, 0, 1, 0, 3,
2411 	0, 1, 0, 2, 0, 1, 0, 4,
2412 	0, 1, 0, 2, 0, 1, 0, 3,
2413 	0, 1, 0, 2, 0, 1, 0, 6,
2414 	0, 1, 0, 2, 0, 1, 0, 3,
2415 	0, 1, 0, 2, 0, 1, 0, 4,
2416 	0, 1, 0, 2, 0, 1, 0, 3,
2417 	0, 1, 0, 2, 0, 1, 0, 5,
2418 	0, 1, 0, 2, 0, 1, 0, 3,
2419 	0, 1, 0, 2, 0, 1, 0, 4,
2420 	0, 1, 0, 2, 0, 1, 0, 3,
2421 	0, 1, 0, 2, 0, 1, 0, 8
2422 };
2423 
2424 
2425 void
2426 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2427 {
2428 	/*
2429 	 * Now we also need to check the mapping array in a couple of ways.
2430 	 * 1) Did we move the cum-ack point?
2431 	 *
2432 	 * When you first glance at this you might think that all entries
2433 	 * that make up the position of the cum-ack would be in the
2434 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2435 	 * deliverable. Thats true with one exception, when its a fragmented
2436 	 * message we may not deliver the data until some threshold (or all
2437 	 * of it) is in place. So we must OR the nr_mapping_array and
2438 	 * mapping_array to get a true picture of the cum-ack.
2439 	 */
2440 	struct sctp_association *asoc;
2441 	int at;
2442 	uint8_t val;
2443 	int slide_from, slide_end, lgap, distance;
2444 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2445 
2446 	asoc = &stcb->asoc;
2447 
2448 	old_cumack = asoc->cumulative_tsn;
2449 	old_base = asoc->mapping_array_base_tsn;
2450 	old_highest = asoc->highest_tsn_inside_map;
2451 	/*
2452 	 * We could probably improve this a small bit by calculating the
2453 	 * offset of the current cum-ack as the starting point.
2454 	 */
2455 	at = 0;
2456 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2457 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2458 		if (val == 0xff) {
2459 			at += 8;
2460 		} else {
2461 			/* there is a 0 bit */
2462 			at += sctp_map_lookup_tab[val];
2463 			break;
2464 		}
2465 	}
2466 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2467 
2468 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2469 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2470 #ifdef INVARIANTS
2471 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2472 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2473 #else
2474 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2475 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2476 		sctp_print_mapping_array(asoc);
2477 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2478 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2479 		}
2480 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2481 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2482 #endif
2483 	}
2484 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2485 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2486 	} else {
2487 		highest_tsn = asoc->highest_tsn_inside_map;
2488 	}
2489 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2490 		/* The complete array was completed by a single FR */
2491 		/* highest becomes the cum-ack */
2492 		int clr;
2493 #ifdef INVARIANTS
2494 		unsigned int i;
2495 #endif
2496 
2497 		/* clear the array */
2498 		clr = ((at + 7) >> 3);
2499 		if (clr > asoc->mapping_array_size) {
2500 			clr = asoc->mapping_array_size;
2501 		}
2502 		memset(asoc->mapping_array, 0, clr);
2503 		memset(asoc->nr_mapping_array, 0, clr);
2504 #ifdef INVARIANTS
2505 		for (i = 0; i < asoc->mapping_array_size; i++) {
2506 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2507 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2508 				sctp_print_mapping_array(asoc);
2509 			}
2510 		}
2511 #endif
2512 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2513 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2514 	} else if (at >= 8) {
2515 		/* we can slide the mapping array down */
2516 		/* slide_from holds where we hit the first NON 0xff byte */
2517 
2518 		/*
2519 		 * now calculate the ceiling of the move using our highest
2520 		 * TSN value
2521 		 */
2522 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2523 		slide_end = (lgap >> 3);
2524 		if (slide_end < slide_from) {
2525 			sctp_print_mapping_array(asoc);
2526 #ifdef INVARIANTS
2527 			panic("impossible slide");
2528 #else
2529 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2530 			    lgap, slide_end, slide_from, at);
2531 			return;
2532 #endif
2533 		}
2534 		if (slide_end > asoc->mapping_array_size) {
2535 #ifdef INVARIANTS
2536 			panic("would overrun buffer");
2537 #else
2538 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2539 			    asoc->mapping_array_size, slide_end);
2540 			slide_end = asoc->mapping_array_size;
2541 #endif
2542 		}
2543 		distance = (slide_end - slide_from) + 1;
2544 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2545 			sctp_log_map(old_base, old_cumack, old_highest,
2546 			    SCTP_MAP_PREPARE_SLIDE);
2547 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2548 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2549 		}
2550 		if (distance + slide_from > asoc->mapping_array_size ||
2551 		    distance < 0) {
2552 			/*
2553 			 * Here we do NOT slide forward the array so that
2554 			 * hopefully when more data comes in to fill it up
2555 			 * we will be able to slide it forward. Really I
2556 			 * don't think this should happen :-0
2557 			 */
2558 
2559 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2560 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2561 				    (uint32_t)asoc->mapping_array_size,
2562 				    SCTP_MAP_SLIDE_NONE);
2563 			}
2564 		} else {
2565 			int ii;
2566 
2567 			for (ii = 0; ii < distance; ii++) {
2568 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2569 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2570 
2571 			}
2572 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2573 				asoc->mapping_array[ii] = 0;
2574 				asoc->nr_mapping_array[ii] = 0;
2575 			}
2576 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2577 				asoc->highest_tsn_inside_map += (slide_from << 3);
2578 			}
2579 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2580 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2581 			}
2582 			asoc->mapping_array_base_tsn += (slide_from << 3);
2583 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2584 				sctp_log_map(asoc->mapping_array_base_tsn,
2585 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2586 				    SCTP_MAP_SLIDE_RESULT);
2587 			}
2588 		}
2589 	}
2590 }
2591 
2592 void
2593 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2594 {
2595 	struct sctp_association *asoc;
2596 	uint32_t highest_tsn;
2597 	int is_a_gap;
2598 
2599 	sctp_slide_mapping_arrays(stcb);
2600 	asoc = &stcb->asoc;
2601 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2602 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2603 	} else {
2604 		highest_tsn = asoc->highest_tsn_inside_map;
2605 	}
2606 	/* Is there a gap now? */
2607 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2608 
2609 	/*
2610 	 * Now we need to see if we need to queue a sack or just start the
2611 	 * timer (if allowed).
2612 	 */
2613 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2614 		/*
2615 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2616 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2617 		 * SACK
2618 		 */
2619 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2620 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2621 			    stcb->sctp_ep, stcb, NULL,
2622 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2623 		}
2624 		sctp_send_shutdown(stcb,
2625 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2626 		if (is_a_gap) {
2627 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2628 		}
2629 	} else {
2630 		/*
2631 		 * CMT DAC algorithm: increase number of packets received
2632 		 * since last ack
2633 		 */
2634 		stcb->asoc.cmt_dac_pkts_rcvd++;
2635 
2636 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2637 							 * SACK */
2638 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2639 							 * longer is one */
2640 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2641 		    (is_a_gap) ||	/* is still a gap */
2642 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2643 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2644 		    ) {
2645 
2646 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2647 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2648 			    (stcb->asoc.send_sack == 0) &&
2649 			    (stcb->asoc.numduptsns == 0) &&
2650 			    (stcb->asoc.delayed_ack) &&
2651 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2652 
2653 				/*
2654 				 * CMT DAC algorithm: With CMT, delay acks
2655 				 * even in the face of
2656 				 *
2657 				 * reordering. Therefore, if acks that do
2658 				 * not have to be sent because of the above
2659 				 * reasons, will be delayed. That is, acks
2660 				 * that would have been sent due to gap
2661 				 * reports will be delayed with DAC. Start
2662 				 * the delayed ack timer.
2663 				 */
2664 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665 				    stcb->sctp_ep, stcb, NULL);
2666 			} else {
2667 				/*
2668 				 * Ok we must build a SACK since the timer
2669 				 * is pending, we got our first packet OR
2670 				 * there are gaps or duplicates.
2671 				 */
2672 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2673 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2674 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2675 			}
2676 		} else {
2677 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2678 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2679 				    stcb->sctp_ep, stcb, NULL);
2680 			}
2681 		}
2682 	}
2683 }
2684 
2685 int
2686 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2687     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2688     struct sctp_nets *net, uint32_t *high_tsn)
2689 {
2690 	struct sctp_chunkhdr *ch, chunk_buf;
2691 	struct sctp_association *asoc;
2692 	int num_chunks = 0;	/* number of control chunks processed */
2693 	int stop_proc = 0;
2694 	int break_flag, last_chunk;
2695 	int abort_flag = 0, was_a_gap;
2696 	struct mbuf *m;
2697 	uint32_t highest_tsn;
2698 	uint16_t chk_length;
2699 
2700 	/* set the rwnd */
2701 	sctp_set_rwnd(stcb, &stcb->asoc);
2702 
2703 	m = *mm;
2704 	SCTP_TCB_LOCK_ASSERT(stcb);
2705 	asoc = &stcb->asoc;
2706 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2707 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2708 	} else {
2709 		highest_tsn = asoc->highest_tsn_inside_map;
2710 	}
2711 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2712 	/*
2713 	 * setup where we got the last DATA packet from for any SACK that
2714 	 * may need to go out. Don't bump the net. This is done ONLY when a
2715 	 * chunk is assigned.
2716 	 */
2717 	asoc->last_data_chunk_from = net;
2718 
2719 	/*-
2720 	 * Now before we proceed we must figure out if this is a wasted
2721 	 * cluster... i.e. it is a small packet sent in and yet the driver
2722 	 * underneath allocated a full cluster for it. If so we must copy it
2723 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2724 	 * with cluster starvation. Note for __Panda__ we don't do this
2725 	 * since it has clusters all the way down to 64 bytes.
2726 	 */
2727 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2728 		/* we only handle mbufs that are singletons.. not chains */
2729 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2730 		if (m) {
2731 			/* ok lets see if we can copy the data up */
2732 			caddr_t *from, *to;
2733 
2734 			/* get the pointers and copy */
2735 			to = mtod(m, caddr_t *);
2736 			from = mtod((*mm), caddr_t *);
2737 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2738 			/* copy the length and free up the old */
2739 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2740 			sctp_m_freem(*mm);
2741 			/* success, back copy */
2742 			*mm = m;
2743 		} else {
2744 			/* We are in trouble in the mbuf world .. yikes */
2745 			m = *mm;
2746 		}
2747 	}
2748 	/* get pointer to the first chunk header */
2749 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2750 	    sizeof(struct sctp_chunkhdr),
2751 	    (uint8_t *)&chunk_buf);
2752 	if (ch == NULL) {
2753 		return (1);
2754 	}
2755 	/*
2756 	 * process all DATA chunks...
2757 	 */
2758 	*high_tsn = asoc->cumulative_tsn;
2759 	break_flag = 0;
2760 	asoc->data_pkts_seen++;
2761 	while (stop_proc == 0) {
2762 		/* validate chunk length */
2763 		chk_length = ntohs(ch->chunk_length);
2764 		if (length - *offset < chk_length) {
2765 			/* all done, mutulated chunk */
2766 			stop_proc = 1;
2767 			continue;
2768 		}
2769 		if ((asoc->idata_supported == 1) &&
2770 		    (ch->chunk_type == SCTP_DATA)) {
2771 			struct mbuf *op_err;
2772 			char msg[SCTP_DIAG_INFO_LEN];
2773 
2774 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2775 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2776 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2777 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2778 			return (2);
2779 		}
2780 		if ((asoc->idata_supported == 0) &&
2781 		    (ch->chunk_type == SCTP_IDATA)) {
2782 			struct mbuf *op_err;
2783 			char msg[SCTP_DIAG_INFO_LEN];
2784 
2785 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2786 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2787 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2788 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2789 			return (2);
2790 		}
2791 		if ((ch->chunk_type == SCTP_DATA) ||
2792 		    (ch->chunk_type == SCTP_IDATA)) {
2793 			uint16_t clen;
2794 
2795 			if (ch->chunk_type == SCTP_DATA) {
2796 				clen = sizeof(struct sctp_data_chunk);
2797 			} else {
2798 				clen = sizeof(struct sctp_idata_chunk);
2799 			}
2800 			if (chk_length < clen) {
2801 				/*
2802 				 * Need to send an abort since we had a
2803 				 * invalid data chunk.
2804 				 */
2805 				struct mbuf *op_err;
2806 				char msg[SCTP_DIAG_INFO_LEN];
2807 
2808 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2809 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2810 				    chk_length);
2811 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2812 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2813 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2814 				return (2);
2815 			}
2816 #ifdef SCTP_AUDITING_ENABLED
2817 			sctp_audit_log(0xB1, 0);
2818 #endif
2819 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2820 				last_chunk = 1;
2821 			} else {
2822 				last_chunk = 0;
2823 			}
2824 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2825 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2826 			    last_chunk, ch->chunk_type)) {
2827 				num_chunks++;
2828 			}
2829 			if (abort_flag)
2830 				return (2);
2831 
2832 			if (break_flag) {
2833 				/*
2834 				 * Set because of out of rwnd space and no
2835 				 * drop rep space left.
2836 				 */
2837 				stop_proc = 1;
2838 				continue;
2839 			}
2840 		} else {
2841 			/* not a data chunk in the data region */
2842 			switch (ch->chunk_type) {
2843 			case SCTP_INITIATION:
2844 			case SCTP_INITIATION_ACK:
2845 			case SCTP_SELECTIVE_ACK:
2846 			case SCTP_NR_SELECTIVE_ACK:
2847 			case SCTP_HEARTBEAT_REQUEST:
2848 			case SCTP_HEARTBEAT_ACK:
2849 			case SCTP_ABORT_ASSOCIATION:
2850 			case SCTP_SHUTDOWN:
2851 			case SCTP_SHUTDOWN_ACK:
2852 			case SCTP_OPERATION_ERROR:
2853 			case SCTP_COOKIE_ECHO:
2854 			case SCTP_COOKIE_ACK:
2855 			case SCTP_ECN_ECHO:
2856 			case SCTP_ECN_CWR:
2857 			case SCTP_SHUTDOWN_COMPLETE:
2858 			case SCTP_AUTHENTICATION:
2859 			case SCTP_ASCONF_ACK:
2860 			case SCTP_PACKET_DROPPED:
2861 			case SCTP_STREAM_RESET:
2862 			case SCTP_FORWARD_CUM_TSN:
2863 			case SCTP_ASCONF:
2864 				{
2865 					/*
2866 					 * Now, what do we do with KNOWN
2867 					 * chunks that are NOT in the right
2868 					 * place?
2869 					 *
2870 					 * For now, I do nothing but ignore
2871 					 * them. We may later want to add
2872 					 * sysctl stuff to switch out and do
2873 					 * either an ABORT() or possibly
2874 					 * process them.
2875 					 */
2876 					struct mbuf *op_err;
2877 					char msg[SCTP_DIAG_INFO_LEN];
2878 
2879 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2880 					    ch->chunk_type);
2881 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2882 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2883 					return (2);
2884 				}
2885 			default:
2886 				/*
2887 				 * Unknown chunk type: use bit rules after
2888 				 * checking length
2889 				 */
2890 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2891 					/*
2892 					 * Need to send an abort since we
2893 					 * had a invalid chunk.
2894 					 */
2895 					struct mbuf *op_err;
2896 					char msg[SCTP_DIAG_INFO_LEN];
2897 
2898 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2899 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2900 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2901 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2902 					return (2);
2903 				}
2904 				if (ch->chunk_type & 0x40) {
2905 					/* Add a error report to the queue */
2906 					struct mbuf *op_err;
2907 					struct sctp_gen_error_cause *cause;
2908 
2909 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2910 					    0, M_NOWAIT, 1, MT_DATA);
2911 					if (op_err != NULL) {
2912 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2913 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2914 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2915 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2916 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2917 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2918 							sctp_queue_op_err(stcb, op_err);
2919 						} else {
2920 							sctp_m_freem(op_err);
2921 						}
2922 					}
2923 				}
2924 				if ((ch->chunk_type & 0x80) == 0) {
2925 					/* discard the rest of this packet */
2926 					stop_proc = 1;
2927 				}	/* else skip this bad chunk and
2928 					 * continue... */
2929 				break;
2930 			}	/* switch of chunk type */
2931 		}
2932 		*offset += SCTP_SIZE32(chk_length);
2933 		if ((*offset >= length) || stop_proc) {
2934 			/* no more data left in the mbuf chain */
2935 			stop_proc = 1;
2936 			continue;
2937 		}
2938 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2939 		    sizeof(struct sctp_chunkhdr),
2940 		    (uint8_t *)&chunk_buf);
2941 		if (ch == NULL) {
2942 			*offset = length;
2943 			stop_proc = 1;
2944 			continue;
2945 		}
2946 	}
2947 	if (break_flag) {
2948 		/*
2949 		 * we need to report rwnd overrun drops.
2950 		 */
2951 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2952 	}
2953 	if (num_chunks) {
2954 		/*
2955 		 * Did we get data, if so update the time for auto-close and
2956 		 * give peer credit for being alive.
2957 		 */
2958 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2959 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2960 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2961 			    stcb->asoc.overall_error_count,
2962 			    0,
2963 			    SCTP_FROM_SCTP_INDATA,
2964 			    __LINE__);
2965 		}
2966 		stcb->asoc.overall_error_count = 0;
2967 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2968 	}
2969 	/* now service all of the reassm queue if needed */
2970 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2971 		/* Assure that we ack right away */
2972 		stcb->asoc.send_sack = 1;
2973 	}
2974 	/* Start a sack timer or QUEUE a SACK for sending */
2975 	sctp_sack_check(stcb, was_a_gap);
2976 	return (0);
2977 }
2978 
2979 static int
2980 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2981     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2982     int *num_frs,
2983     uint32_t *biggest_newly_acked_tsn,
2984     uint32_t *this_sack_lowest_newack,
2985     int *rto_ok)
2986 {
2987 	struct sctp_tmit_chunk *tp1;
2988 	unsigned int theTSN;
2989 	int j, wake_him = 0, circled = 0;
2990 
2991 	/* Recover the tp1 we last saw */
2992 	tp1 = *p_tp1;
2993 	if (tp1 == NULL) {
2994 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2995 	}
2996 	for (j = frag_strt; j <= frag_end; j++) {
2997 		theTSN = j + last_tsn;
2998 		while (tp1) {
2999 			if (tp1->rec.data.doing_fast_retransmit)
3000 				(*num_frs) += 1;
3001 
3002 			/*-
3003 			 * CMT: CUCv2 algorithm. For each TSN being
3004 			 * processed from the sent queue, track the
3005 			 * next expected pseudo-cumack, or
3006 			 * rtx_pseudo_cumack, if required. Separate
3007 			 * cumack trackers for first transmissions,
3008 			 * and retransmissions.
3009 			 */
3010 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3011 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
3012 			    (tp1->snd_count == 1)) {
3013 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
3014 				tp1->whoTo->find_pseudo_cumack = 0;
3015 			}
3016 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3017 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3018 			    (tp1->snd_count > 1)) {
3019 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3020 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3021 			}
3022 			if (tp1->rec.data.tsn == theTSN) {
3023 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3024 					/*-
3025 					 * must be held until
3026 					 * cum-ack passes
3027 					 */
3028 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3029 						/*-
3030 						 * If it is less than RESEND, it is
3031 						 * now no-longer in flight.
3032 						 * Higher values may already be set
3033 						 * via previous Gap Ack Blocks...
3034 						 * i.e. ACKED or RESEND.
3035 						 */
3036 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3037 						    *biggest_newly_acked_tsn)) {
3038 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3039 						}
3040 						/*-
3041 						 * CMT: SFR algo (and HTNA) - set
3042 						 * saw_newack to 1 for dest being
3043 						 * newly acked. update
3044 						 * this_sack_highest_newack if
3045 						 * appropriate.
3046 						 */
3047 						if (tp1->rec.data.chunk_was_revoked == 0)
3048 							tp1->whoTo->saw_newack = 1;
3049 
3050 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3051 						    tp1->whoTo->this_sack_highest_newack)) {
3052 							tp1->whoTo->this_sack_highest_newack =
3053 							    tp1->rec.data.tsn;
3054 						}
3055 						/*-
3056 						 * CMT DAC algo: also update
3057 						 * this_sack_lowest_newack
3058 						 */
3059 						if (*this_sack_lowest_newack == 0) {
3060 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3061 								sctp_log_sack(*this_sack_lowest_newack,
3062 								    last_tsn,
3063 								    tp1->rec.data.tsn,
3064 								    0,
3065 								    0,
3066 								    SCTP_LOG_TSN_ACKED);
3067 							}
3068 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3069 						}
3070 						/*-
3071 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3072 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3073 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3074 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3075 						 * Separate pseudo_cumack trackers for first transmissions and
3076 						 * retransmissions.
3077 						 */
3078 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3079 							if (tp1->rec.data.chunk_was_revoked == 0) {
3080 								tp1->whoTo->new_pseudo_cumack = 1;
3081 							}
3082 							tp1->whoTo->find_pseudo_cumack = 1;
3083 						}
3084 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3085 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3086 						}
3087 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3088 							if (tp1->rec.data.chunk_was_revoked == 0) {
3089 								tp1->whoTo->new_pseudo_cumack = 1;
3090 							}
3091 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3092 						}
3093 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3094 							sctp_log_sack(*biggest_newly_acked_tsn,
3095 							    last_tsn,
3096 							    tp1->rec.data.tsn,
3097 							    frag_strt,
3098 							    frag_end,
3099 							    SCTP_LOG_TSN_ACKED);
3100 						}
3101 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3102 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3103 							    tp1->whoTo->flight_size,
3104 							    tp1->book_size,
3105 							    (uint32_t)(uintptr_t)tp1->whoTo,
3106 							    tp1->rec.data.tsn);
3107 						}
3108 						sctp_flight_size_decrease(tp1);
3109 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3110 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3111 							    tp1);
3112 						}
3113 						sctp_total_flight_decrease(stcb, tp1);
3114 
3115 						tp1->whoTo->net_ack += tp1->send_size;
3116 						if (tp1->snd_count < 2) {
3117 							/*-
3118 							 * True non-retransmitted chunk
3119 							 */
3120 							tp1->whoTo->net_ack2 += tp1->send_size;
3121 
3122 							/*-
3123 							 * update RTO too ?
3124 							 */
3125 							if (tp1->do_rtt) {
3126 								if (*rto_ok &&
3127 								    sctp_calculate_rto(stcb,
3128 								    &stcb->asoc,
3129 								    tp1->whoTo,
3130 								    &tp1->sent_rcv_time,
3131 								    SCTP_RTT_FROM_DATA)) {
3132 									*rto_ok = 0;
3133 								}
3134 								if (tp1->whoTo->rto_needed == 0) {
3135 									tp1->whoTo->rto_needed = 1;
3136 								}
3137 								tp1->do_rtt = 0;
3138 							}
3139 						}
3140 
3141 					}
3142 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3143 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3144 						    stcb->asoc.this_sack_highest_gap)) {
3145 							stcb->asoc.this_sack_highest_gap =
3146 							    tp1->rec.data.tsn;
3147 						}
3148 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3149 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3150 #ifdef SCTP_AUDITING_ENABLED
3151 							sctp_audit_log(0xB2,
3152 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3153 #endif
3154 						}
3155 					}
3156 					/*-
3157 					 * All chunks NOT UNSENT fall through here and are marked
3158 					 * (leave PR-SCTP ones that are to skip alone though)
3159 					 */
3160 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3161 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3162 						tp1->sent = SCTP_DATAGRAM_MARKED;
3163 					}
3164 					if (tp1->rec.data.chunk_was_revoked) {
3165 						/* deflate the cwnd */
3166 						tp1->whoTo->cwnd -= tp1->book_size;
3167 						tp1->rec.data.chunk_was_revoked = 0;
3168 					}
3169 					/* NR Sack code here */
3170 					if (nr_sacking &&
3171 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3172 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3173 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3174 #ifdef INVARIANTS
3175 						} else {
3176 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3177 #endif
3178 						}
3179 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3180 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3181 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3182 							stcb->asoc.trigger_reset = 1;
3183 						}
3184 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3185 						if (tp1->data) {
3186 							/*
3187 							 * sa_ignore
3188 							 * NO_NULL_CHK
3189 							 */
3190 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3191 							sctp_m_freem(tp1->data);
3192 							tp1->data = NULL;
3193 						}
3194 						wake_him++;
3195 					}
3196 				}
3197 				break;
3198 			}	/* if (tp1->tsn == theTSN) */
3199 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3200 				break;
3201 			}
3202 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3203 			if ((tp1 == NULL) && (circled == 0)) {
3204 				circled++;
3205 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3206 			}
3207 		}		/* end while (tp1) */
3208 		if (tp1 == NULL) {
3209 			circled = 0;
3210 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3211 		}
3212 		/* In case the fragments were not in order we must reset */
3213 	}			/* end for (j = fragStart */
3214 	*p_tp1 = tp1;
3215 	return (wake_him);	/* Return value only used for nr-sack */
3216 }
3217 
3218 
3219 static int
3220 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3221     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3222     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3223     int num_seg, int num_nr_seg, int *rto_ok)
3224 {
3225 	struct sctp_gap_ack_block *frag, block;
3226 	struct sctp_tmit_chunk *tp1;
3227 	int i;
3228 	int num_frs = 0;
3229 	int chunk_freed;
3230 	int non_revocable;
3231 	uint16_t frag_strt, frag_end, prev_frag_end;
3232 
3233 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3234 	prev_frag_end = 0;
3235 	chunk_freed = 0;
3236 
3237 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3238 		if (i == num_seg) {
3239 			prev_frag_end = 0;
3240 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3241 		}
3242 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3243 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3244 		*offset += sizeof(block);
3245 		if (frag == NULL) {
3246 			return (chunk_freed);
3247 		}
3248 		frag_strt = ntohs(frag->start);
3249 		frag_end = ntohs(frag->end);
3250 
3251 		if (frag_strt > frag_end) {
3252 			/* This gap report is malformed, skip it. */
3253 			continue;
3254 		}
3255 		if (frag_strt <= prev_frag_end) {
3256 			/* This gap report is not in order, so restart. */
3257 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3258 		}
3259 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3260 			*biggest_tsn_acked = last_tsn + frag_end;
3261 		}
3262 		if (i < num_seg) {
3263 			non_revocable = 0;
3264 		} else {
3265 			non_revocable = 1;
3266 		}
3267 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3268 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3269 		    this_sack_lowest_newack, rto_ok)) {
3270 			chunk_freed = 1;
3271 		}
3272 		prev_frag_end = frag_end;
3273 	}
3274 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3275 		if (num_frs)
3276 			sctp_log_fr(*biggest_tsn_acked,
3277 			    *biggest_newly_acked_tsn,
3278 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3279 	}
3280 	return (chunk_freed);
3281 }
3282 
3283 static void
3284 sctp_check_for_revoked(struct sctp_tcb *stcb,
3285     struct sctp_association *asoc, uint32_t cumack,
3286     uint32_t biggest_tsn_acked)
3287 {
3288 	struct sctp_tmit_chunk *tp1;
3289 
3290 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3291 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3292 			/*
3293 			 * ok this guy is either ACK or MARKED. If it is
3294 			 * ACKED it has been previously acked but not this
3295 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3296 			 * again.
3297 			 */
3298 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3299 				break;
3300 			}
3301 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3302 				/* it has been revoked */
3303 				tp1->sent = SCTP_DATAGRAM_SENT;
3304 				tp1->rec.data.chunk_was_revoked = 1;
3305 				/*
3306 				 * We must add this stuff back in to assure
3307 				 * timers and such get started.
3308 				 */
3309 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3310 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3311 					    tp1->whoTo->flight_size,
3312 					    tp1->book_size,
3313 					    (uint32_t)(uintptr_t)tp1->whoTo,
3314 					    tp1->rec.data.tsn);
3315 				}
3316 				sctp_flight_size_increase(tp1);
3317 				sctp_total_flight_increase(stcb, tp1);
3318 				/*
3319 				 * We inflate the cwnd to compensate for our
3320 				 * artificial inflation of the flight_size.
3321 				 */
3322 				tp1->whoTo->cwnd += tp1->book_size;
3323 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3324 					sctp_log_sack(asoc->last_acked_seq,
3325 					    cumack,
3326 					    tp1->rec.data.tsn,
3327 					    0,
3328 					    0,
3329 					    SCTP_LOG_TSN_REVOKED);
3330 				}
3331 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3332 				/* it has been re-acked in this SACK */
3333 				tp1->sent = SCTP_DATAGRAM_ACKED;
3334 			}
3335 		}
3336 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3337 			break;
3338 	}
3339 }
3340 
3341 
3342 static void
3343 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3344     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3345 {
3346 	struct sctp_tmit_chunk *tp1;
3347 	int strike_flag = 0;
3348 	struct timeval now;
3349 	int tot_retrans = 0;
3350 	uint32_t sending_seq;
3351 	struct sctp_nets *net;
3352 	int num_dests_sacked = 0;
3353 
3354 	/*
3355 	 * select the sending_seq, this is either the next thing ready to be
3356 	 * sent but not transmitted, OR, the next seq we assign.
3357 	 */
3358 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3359 	if (tp1 == NULL) {
3360 		sending_seq = asoc->sending_seq;
3361 	} else {
3362 		sending_seq = tp1->rec.data.tsn;
3363 	}
3364 
3365 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3366 	if ((asoc->sctp_cmt_on_off > 0) &&
3367 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3368 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3369 			if (net->saw_newack)
3370 				num_dests_sacked++;
3371 		}
3372 	}
3373 	if (stcb->asoc.prsctp_supported) {
3374 		(void)SCTP_GETTIME_TIMEVAL(&now);
3375 	}
3376 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3377 		strike_flag = 0;
3378 		if (tp1->no_fr_allowed) {
3379 			/* this one had a timeout or something */
3380 			continue;
3381 		}
3382 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3383 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3384 				sctp_log_fr(biggest_tsn_newly_acked,
3385 				    tp1->rec.data.tsn,
3386 				    tp1->sent,
3387 				    SCTP_FR_LOG_CHECK_STRIKE);
3388 		}
3389 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3390 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3391 			/* done */
3392 			break;
3393 		}
3394 		if (stcb->asoc.prsctp_supported) {
3395 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3396 				/* Is it expired? */
3397 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3398 					/* Yes so drop it */
3399 					if (tp1->data != NULL) {
3400 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3401 						    SCTP_SO_NOT_LOCKED);
3402 					}
3403 					continue;
3404 				}
3405 			}
3406 
3407 		}
3408 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3409 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3410 			/* we are beyond the tsn in the sack  */
3411 			break;
3412 		}
3413 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3414 			/* either a RESEND, ACKED, or MARKED */
3415 			/* skip */
3416 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3417 				/* Continue strikin FWD-TSN chunks */
3418 				tp1->rec.data.fwd_tsn_cnt++;
3419 			}
3420 			continue;
3421 		}
3422 		/*
3423 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3424 		 */
3425 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3426 			/*
3427 			 * No new acks were receieved for data sent to this
3428 			 * dest. Therefore, according to the SFR algo for
3429 			 * CMT, no data sent to this dest can be marked for
3430 			 * FR using this SACK.
3431 			 */
3432 			continue;
3433 		} else if (tp1->whoTo &&
3434 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3435 			    tp1->whoTo->this_sack_highest_newack) &&
3436 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3437 			/*
3438 			 * CMT: New acks were receieved for data sent to
3439 			 * this dest. But no new acks were seen for data
3440 			 * sent after tp1. Therefore, according to the SFR
3441 			 * algo for CMT, tp1 cannot be marked for FR using
3442 			 * this SACK. This step covers part of the DAC algo
3443 			 * and the HTNA algo as well.
3444 			 */
3445 			continue;
3446 		}
3447 		/*
3448 		 * Here we check to see if we were have already done a FR
3449 		 * and if so we see if the biggest TSN we saw in the sack is
3450 		 * smaller than the recovery point. If so we don't strike
3451 		 * the tsn... otherwise we CAN strike the TSN.
3452 		 */
3453 		/*
3454 		 * @@@ JRI: Check for CMT if (accum_moved &&
3455 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3456 		 * 0)) {
3457 		 */
3458 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3459 			/*
3460 			 * Strike the TSN if in fast-recovery and cum-ack
3461 			 * moved.
3462 			 */
3463 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3464 				sctp_log_fr(biggest_tsn_newly_acked,
3465 				    tp1->rec.data.tsn,
3466 				    tp1->sent,
3467 				    SCTP_FR_LOG_STRIKE_CHUNK);
3468 			}
3469 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3470 				tp1->sent++;
3471 			}
3472 			if ((asoc->sctp_cmt_on_off > 0) &&
3473 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3474 				/*
3475 				 * CMT DAC algorithm: If SACK flag is set to
3476 				 * 0, then lowest_newack test will not pass
3477 				 * because it would have been set to the
3478 				 * cumack earlier. If not already to be
3479 				 * rtx'd, If not a mixed sack and if tp1 is
3480 				 * not between two sacked TSNs, then mark by
3481 				 * one more. NOTE that we are marking by one
3482 				 * additional time since the SACK DAC flag
3483 				 * indicates that two packets have been
3484 				 * received after this missing TSN.
3485 				 */
3486 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3487 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3488 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3489 						sctp_log_fr(16 + num_dests_sacked,
3490 						    tp1->rec.data.tsn,
3491 						    tp1->sent,
3492 						    SCTP_FR_LOG_STRIKE_CHUNK);
3493 					}
3494 					tp1->sent++;
3495 				}
3496 			}
3497 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3498 		    (asoc->sctp_cmt_on_off == 0)) {
3499 			/*
3500 			 * For those that have done a FR we must take
3501 			 * special consideration if we strike. I.e the
3502 			 * biggest_newly_acked must be higher than the
3503 			 * sending_seq at the time we did the FR.
3504 			 */
3505 			if (
3506 #ifdef SCTP_FR_TO_ALTERNATE
3507 			/*
3508 			 * If FR's go to new networks, then we must only do
3509 			 * this for singly homed asoc's. However if the FR's
3510 			 * go to the same network (Armando's work) then its
3511 			 * ok to FR multiple times.
3512 			 */
3513 			    (asoc->numnets < 2)
3514 #else
3515 			    (1)
3516 #endif
3517 			    ) {
3518 
3519 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3520 				    tp1->rec.data.fast_retran_tsn)) {
3521 					/*
3522 					 * Strike the TSN, since this ack is
3523 					 * beyond where things were when we
3524 					 * did a FR.
3525 					 */
3526 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3527 						sctp_log_fr(biggest_tsn_newly_acked,
3528 						    tp1->rec.data.tsn,
3529 						    tp1->sent,
3530 						    SCTP_FR_LOG_STRIKE_CHUNK);
3531 					}
3532 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3533 						tp1->sent++;
3534 					}
3535 					strike_flag = 1;
3536 					if ((asoc->sctp_cmt_on_off > 0) &&
3537 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3538 						/*
3539 						 * CMT DAC algorithm: If
3540 						 * SACK flag is set to 0,
3541 						 * then lowest_newack test
3542 						 * will not pass because it
3543 						 * would have been set to
3544 						 * the cumack earlier. If
3545 						 * not already to be rtx'd,
3546 						 * If not a mixed sack and
3547 						 * if tp1 is not between two
3548 						 * sacked TSNs, then mark by
3549 						 * one more. NOTE that we
3550 						 * are marking by one
3551 						 * additional time since the
3552 						 * SACK DAC flag indicates
3553 						 * that two packets have
3554 						 * been received after this
3555 						 * missing TSN.
3556 						 */
3557 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3558 						    (num_dests_sacked == 1) &&
3559 						    SCTP_TSN_GT(this_sack_lowest_newack,
3560 						    tp1->rec.data.tsn)) {
3561 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3562 								sctp_log_fr(32 + num_dests_sacked,
3563 								    tp1->rec.data.tsn,
3564 								    tp1->sent,
3565 								    SCTP_FR_LOG_STRIKE_CHUNK);
3566 							}
3567 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3568 								tp1->sent++;
3569 							}
3570 						}
3571 					}
3572 				}
3573 			}
3574 			/*
3575 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3576 			 * algo covers HTNA.
3577 			 */
3578 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3579 		    biggest_tsn_newly_acked)) {
3580 			/*
3581 			 * We don't strike these: This is the  HTNA
3582 			 * algorithm i.e. we don't strike If our TSN is
3583 			 * larger than the Highest TSN Newly Acked.
3584 			 */
3585 			;
3586 		} else {
3587 			/* Strike the TSN */
3588 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3589 				sctp_log_fr(biggest_tsn_newly_acked,
3590 				    tp1->rec.data.tsn,
3591 				    tp1->sent,
3592 				    SCTP_FR_LOG_STRIKE_CHUNK);
3593 			}
3594 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3595 				tp1->sent++;
3596 			}
3597 			if ((asoc->sctp_cmt_on_off > 0) &&
3598 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3599 				/*
3600 				 * CMT DAC algorithm: If SACK flag is set to
3601 				 * 0, then lowest_newack test will not pass
3602 				 * because it would have been set to the
3603 				 * cumack earlier. If not already to be
3604 				 * rtx'd, If not a mixed sack and if tp1 is
3605 				 * not between two sacked TSNs, then mark by
3606 				 * one more. NOTE that we are marking by one
3607 				 * additional time since the SACK DAC flag
3608 				 * indicates that two packets have been
3609 				 * received after this missing TSN.
3610 				 */
3611 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3612 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3613 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3614 						sctp_log_fr(48 + num_dests_sacked,
3615 						    tp1->rec.data.tsn,
3616 						    tp1->sent,
3617 						    SCTP_FR_LOG_STRIKE_CHUNK);
3618 					}
3619 					tp1->sent++;
3620 				}
3621 			}
3622 		}
3623 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3624 			struct sctp_nets *alt;
3625 
3626 			/* fix counts and things */
3627 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3628 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3629 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3630 				    tp1->book_size,
3631 				    (uint32_t)(uintptr_t)tp1->whoTo,
3632 				    tp1->rec.data.tsn);
3633 			}
3634 			if (tp1->whoTo) {
3635 				tp1->whoTo->net_ack++;
3636 				sctp_flight_size_decrease(tp1);
3637 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3638 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3639 					    tp1);
3640 				}
3641 			}
3642 
3643 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3644 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3645 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3646 			}
3647 			/* add back to the rwnd */
3648 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3649 
3650 			/* remove from the total flight */
3651 			sctp_total_flight_decrease(stcb, tp1);
3652 
3653 			if ((stcb->asoc.prsctp_supported) &&
3654 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3655 				/*
3656 				 * Has it been retransmitted tv_sec times? -
3657 				 * we store the retran count there.
3658 				 */
3659 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3660 					/* Yes, so drop it */
3661 					if (tp1->data != NULL) {
3662 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3663 						    SCTP_SO_NOT_LOCKED);
3664 					}
3665 					/* Make sure to flag we had a FR */
3666 					if (tp1->whoTo != NULL) {
3667 						tp1->whoTo->net_ack++;
3668 					}
3669 					continue;
3670 				}
3671 			}
3672 			/*
3673 			 * SCTP_PRINTF("OK, we are now ready to FR this
3674 			 * guy\n");
3675 			 */
3676 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3677 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3678 				    0, SCTP_FR_MARKED);
3679 			}
3680 			if (strike_flag) {
3681 				/* This is a subsequent FR */
3682 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3683 			}
3684 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3685 			if (asoc->sctp_cmt_on_off > 0) {
3686 				/*
3687 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3688 				 * If CMT is being used, then pick dest with
3689 				 * largest ssthresh for any retransmission.
3690 				 */
3691 				tp1->no_fr_allowed = 1;
3692 				alt = tp1->whoTo;
3693 				/* sa_ignore NO_NULL_CHK */
3694 				if (asoc->sctp_cmt_pf > 0) {
3695 					/*
3696 					 * JRS 5/18/07 - If CMT PF is on,
3697 					 * use the PF version of
3698 					 * find_alt_net()
3699 					 */
3700 					alt = sctp_find_alternate_net(stcb, alt, 2);
3701 				} else {
3702 					/*
3703 					 * JRS 5/18/07 - If only CMT is on,
3704 					 * use the CMT version of
3705 					 * find_alt_net()
3706 					 */
3707 					/* sa_ignore NO_NULL_CHK */
3708 					alt = sctp_find_alternate_net(stcb, alt, 1);
3709 				}
3710 				if (alt == NULL) {
3711 					alt = tp1->whoTo;
3712 				}
3713 				/*
3714 				 * CUCv2: If a different dest is picked for
3715 				 * the retransmission, then new
3716 				 * (rtx-)pseudo_cumack needs to be tracked
3717 				 * for orig dest. Let CUCv2 track new (rtx-)
3718 				 * pseudo-cumack always.
3719 				 */
3720 				if (tp1->whoTo) {
3721 					tp1->whoTo->find_pseudo_cumack = 1;
3722 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3723 				}
3724 
3725 			} else {	/* CMT is OFF */
3726 
3727 #ifdef SCTP_FR_TO_ALTERNATE
3728 				/* Can we find an alternate? */
3729 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3730 #else
3731 				/*
3732 				 * default behavior is to NOT retransmit
3733 				 * FR's to an alternate. Armando Caro's
3734 				 * paper details why.
3735 				 */
3736 				alt = tp1->whoTo;
3737 #endif
3738 			}
3739 
3740 			tp1->rec.data.doing_fast_retransmit = 1;
3741 			tot_retrans++;
3742 			/* mark the sending seq for possible subsequent FR's */
3743 			/*
3744 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3745 			 * (uint32_t)tpi->rec.data.tsn);
3746 			 */
3747 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3748 				/*
3749 				 * If the queue of send is empty then its
3750 				 * the next sequence number that will be
3751 				 * assigned so we subtract one from this to
3752 				 * get the one we last sent.
3753 				 */
3754 				tp1->rec.data.fast_retran_tsn = sending_seq;
3755 			} else {
3756 				/*
3757 				 * If there are chunks on the send queue
3758 				 * (unsent data that has made it from the
3759 				 * stream queues but not out the door, we
3760 				 * take the first one (which will have the
3761 				 * lowest TSN) and subtract one to get the
3762 				 * one we last sent.
3763 				 */
3764 				struct sctp_tmit_chunk *ttt;
3765 
3766 				ttt = TAILQ_FIRST(&asoc->send_queue);
3767 				tp1->rec.data.fast_retran_tsn =
3768 				    ttt->rec.data.tsn;
3769 			}
3770 
3771 			if (tp1->do_rtt) {
3772 				/*
3773 				 * this guy had a RTO calculation pending on
3774 				 * it, cancel it
3775 				 */
3776 				if ((tp1->whoTo != NULL) &&
3777 				    (tp1->whoTo->rto_needed == 0)) {
3778 					tp1->whoTo->rto_needed = 1;
3779 				}
3780 				tp1->do_rtt = 0;
3781 			}
3782 			if (alt != tp1->whoTo) {
3783 				/* yes, there is an alternate. */
3784 				sctp_free_remote_addr(tp1->whoTo);
3785 				/* sa_ignore FREED_MEMORY */
3786 				tp1->whoTo = alt;
3787 				atomic_add_int(&alt->ref_count, 1);
3788 			}
3789 		}
3790 	}
3791 }
3792 
3793 struct sctp_tmit_chunk *
3794 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3795     struct sctp_association *asoc)
3796 {
3797 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3798 	struct timeval now;
3799 	int now_filled = 0;
3800 
3801 	if (asoc->prsctp_supported == 0) {
3802 		return (NULL);
3803 	}
3804 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3805 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3806 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3807 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3808 			/* no chance to advance, out of here */
3809 			break;
3810 		}
3811 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3812 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3813 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3814 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3815 				    asoc->advanced_peer_ack_point,
3816 				    tp1->rec.data.tsn, 0, 0);
3817 			}
3818 		}
3819 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3820 			/*
3821 			 * We can't fwd-tsn past any that are reliable aka
3822 			 * retransmitted until the asoc fails.
3823 			 */
3824 			break;
3825 		}
3826 		if (!now_filled) {
3827 			(void)SCTP_GETTIME_TIMEVAL(&now);
3828 			now_filled = 1;
3829 		}
3830 		/*
3831 		 * now we got a chunk which is marked for another
3832 		 * retransmission to a PR-stream but has run out its chances
3833 		 * already maybe OR has been marked to skip now. Can we skip
3834 		 * it if its a resend?
3835 		 */
3836 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3837 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3838 			/*
3839 			 * Now is this one marked for resend and its time is
3840 			 * now up?
3841 			 */
3842 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3843 				/* Yes so drop it */
3844 				if (tp1->data) {
3845 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3846 					    1, SCTP_SO_NOT_LOCKED);
3847 				}
3848 			} else {
3849 				/*
3850 				 * No, we are done when hit one for resend
3851 				 * whos time as not expired.
3852 				 */
3853 				break;
3854 			}
3855 		}
3856 		/*
3857 		 * Ok now if this chunk is marked to drop it we can clean up
3858 		 * the chunk, advance our peer ack point and we can check
3859 		 * the next chunk.
3860 		 */
3861 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3862 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3863 			/* advance PeerAckPoint goes forward */
3864 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3865 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3866 				a_adv = tp1;
3867 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3868 				/* No update but we do save the chk */
3869 				a_adv = tp1;
3870 			}
3871 		} else {
3872 			/*
3873 			 * If it is still in RESEND we can advance no
3874 			 * further
3875 			 */
3876 			break;
3877 		}
3878 	}
3879 	return (a_adv);
3880 }
3881 
3882 static int
3883 sctp_fs_audit(struct sctp_association *asoc)
3884 {
3885 	struct sctp_tmit_chunk *chk;
3886 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3887 	int ret;
3888 #ifndef INVARIANTS
3889 	int entry_flight, entry_cnt;
3890 #endif
3891 
3892 	ret = 0;
3893 #ifndef INVARIANTS
3894 	entry_flight = asoc->total_flight;
3895 	entry_cnt = asoc->total_flight_count;
3896 #endif
3897 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3898 		return (0);
3899 
3900 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3901 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3902 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3903 			    chk->rec.data.tsn,
3904 			    chk->send_size,
3905 			    chk->snd_count);
3906 			inflight++;
3907 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3908 			resend++;
3909 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3910 			inbetween++;
3911 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3912 			above++;
3913 		} else {
3914 			acked++;
3915 		}
3916 	}
3917 
3918 	if ((inflight > 0) || (inbetween > 0)) {
3919 #ifdef INVARIANTS
3920 		panic("Flight size-express incorrect? \n");
3921 #else
3922 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3923 		    entry_flight, entry_cnt);
3924 
3925 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3926 		    inflight, inbetween, resend, above, acked);
3927 		ret = 1;
3928 #endif
3929 	}
3930 	return (ret);
3931 }
3932 
3933 
3934 static void
3935 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3936     struct sctp_association *asoc,
3937     struct sctp_tmit_chunk *tp1)
3938 {
3939 	tp1->window_probe = 0;
3940 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3941 		/* TSN's skipped we do NOT move back. */
3942 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3943 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3944 		    tp1->book_size,
3945 		    (uint32_t)(uintptr_t)tp1->whoTo,
3946 		    tp1->rec.data.tsn);
3947 		return;
3948 	}
3949 	/* First setup this by shrinking flight */
3950 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3951 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3952 		    tp1);
3953 	}
3954 	sctp_flight_size_decrease(tp1);
3955 	sctp_total_flight_decrease(stcb, tp1);
3956 	/* Now mark for resend */
3957 	tp1->sent = SCTP_DATAGRAM_RESEND;
3958 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3959 
3960 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3961 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3962 		    tp1->whoTo->flight_size,
3963 		    tp1->book_size,
3964 		    (uint32_t)(uintptr_t)tp1->whoTo,
3965 		    tp1->rec.data.tsn);
3966 	}
3967 }
3968 
3969 void
3970 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3971     uint32_t rwnd, int *abort_now, int ecne_seen)
3972 {
3973 	struct sctp_nets *net;
3974 	struct sctp_association *asoc;
3975 	struct sctp_tmit_chunk *tp1, *tp2;
3976 	uint32_t old_rwnd;
3977 	int win_probe_recovery = 0;
3978 	int win_probe_recovered = 0;
3979 	int j, done_once = 0;
3980 	int rto_ok = 1;
3981 	uint32_t send_s;
3982 
3983 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3984 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3985 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3986 	}
3987 	SCTP_TCB_LOCK_ASSERT(stcb);
3988 #ifdef SCTP_ASOCLOG_OF_TSNS
3989 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3990 	stcb->asoc.cumack_log_at++;
3991 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3992 		stcb->asoc.cumack_log_at = 0;
3993 	}
3994 #endif
3995 	asoc = &stcb->asoc;
3996 	old_rwnd = asoc->peers_rwnd;
3997 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3998 		/* old ack */
3999 		return;
4000 	} else if (asoc->last_acked_seq == cumack) {
4001 		/* Window update sack */
4002 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4003 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4004 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4005 			/* SWS sender side engages */
4006 			asoc->peers_rwnd = 0;
4007 		}
4008 		if (asoc->peers_rwnd > old_rwnd) {
4009 			goto again;
4010 		}
4011 		return;
4012 	}
4013 
4014 	/* First setup for CC stuff */
4015 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4016 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
4017 			/* Drag along the window_tsn for cwr's */
4018 			net->cwr_window_tsn = cumack;
4019 		}
4020 		net->prev_cwnd = net->cwnd;
4021 		net->net_ack = 0;
4022 		net->net_ack2 = 0;
4023 
4024 		/*
4025 		 * CMT: Reset CUC and Fast recovery algo variables before
4026 		 * SACK processing
4027 		 */
4028 		net->new_pseudo_cumack = 0;
4029 		net->will_exit_fast_recovery = 0;
4030 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4031 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4032 		}
4033 	}
4034 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4035 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4036 		    sctpchunk_listhead);
4037 		send_s = tp1->rec.data.tsn + 1;
4038 	} else {
4039 		send_s = asoc->sending_seq;
4040 	}
4041 	if (SCTP_TSN_GE(cumack, send_s)) {
4042 		struct mbuf *op_err;
4043 		char msg[SCTP_DIAG_INFO_LEN];
4044 
4045 		*abort_now = 1;
4046 		/* XXX */
4047 		SCTP_SNPRINTF(msg, sizeof(msg),
4048 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4049 		    cumack, send_s);
4050 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4051 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4052 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4053 		return;
4054 	}
4055 	asoc->this_sack_highest_gap = cumack;
4056 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4057 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4058 		    stcb->asoc.overall_error_count,
4059 		    0,
4060 		    SCTP_FROM_SCTP_INDATA,
4061 		    __LINE__);
4062 	}
4063 	stcb->asoc.overall_error_count = 0;
4064 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4065 		/* process the new consecutive TSN first */
4066 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4067 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4068 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4069 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4070 				}
4071 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4072 					/*
4073 					 * If it is less than ACKED, it is
4074 					 * now no-longer in flight. Higher
4075 					 * values may occur during marking
4076 					 */
4077 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4078 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4079 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4080 							    tp1->whoTo->flight_size,
4081 							    tp1->book_size,
4082 							    (uint32_t)(uintptr_t)tp1->whoTo,
4083 							    tp1->rec.data.tsn);
4084 						}
4085 						sctp_flight_size_decrease(tp1);
4086 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4087 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4088 							    tp1);
4089 						}
4090 						/* sa_ignore NO_NULL_CHK */
4091 						sctp_total_flight_decrease(stcb, tp1);
4092 					}
4093 					tp1->whoTo->net_ack += tp1->send_size;
4094 					if (tp1->snd_count < 2) {
4095 						/*
4096 						 * True non-retransmitted
4097 						 * chunk
4098 						 */
4099 						tp1->whoTo->net_ack2 +=
4100 						    tp1->send_size;
4101 
4102 						/* update RTO too? */
4103 						if (tp1->do_rtt) {
4104 							if (rto_ok &&
4105 							    sctp_calculate_rto(stcb,
4106 							    &stcb->asoc,
4107 							    tp1->whoTo,
4108 							    &tp1->sent_rcv_time,
4109 							    SCTP_RTT_FROM_DATA)) {
4110 								rto_ok = 0;
4111 							}
4112 							if (tp1->whoTo->rto_needed == 0) {
4113 								tp1->whoTo->rto_needed = 1;
4114 							}
4115 							tp1->do_rtt = 0;
4116 						}
4117 					}
4118 					/*
4119 					 * CMT: CUCv2 algorithm. From the
4120 					 * cumack'd TSNs, for each TSN being
4121 					 * acked for the first time, set the
4122 					 * following variables for the
4123 					 * corresp destination.
4124 					 * new_pseudo_cumack will trigger a
4125 					 * cwnd update.
4126 					 * find_(rtx_)pseudo_cumack will
4127 					 * trigger search for the next
4128 					 * expected (rtx-)pseudo-cumack.
4129 					 */
4130 					tp1->whoTo->new_pseudo_cumack = 1;
4131 					tp1->whoTo->find_pseudo_cumack = 1;
4132 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4133 
4134 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4135 						/* sa_ignore NO_NULL_CHK */
4136 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4137 					}
4138 				}
4139 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4140 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4141 				}
4142 				if (tp1->rec.data.chunk_was_revoked) {
4143 					/* deflate the cwnd */
4144 					tp1->whoTo->cwnd -= tp1->book_size;
4145 					tp1->rec.data.chunk_was_revoked = 0;
4146 				}
4147 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4148 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4149 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4150 #ifdef INVARIANTS
4151 					} else {
4152 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4153 #endif
4154 					}
4155 				}
4156 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4157 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4158 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4159 					asoc->trigger_reset = 1;
4160 				}
4161 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4162 				if (tp1->data) {
4163 					/* sa_ignore NO_NULL_CHK */
4164 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4165 					sctp_m_freem(tp1->data);
4166 					tp1->data = NULL;
4167 				}
4168 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4169 					sctp_log_sack(asoc->last_acked_seq,
4170 					    cumack,
4171 					    tp1->rec.data.tsn,
4172 					    0,
4173 					    0,
4174 					    SCTP_LOG_FREE_SENT);
4175 				}
4176 				asoc->sent_queue_cnt--;
4177 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4178 			} else {
4179 				break;
4180 			}
4181 		}
4182 
4183 	}
4184 	/* sa_ignore NO_NULL_CHK */
4185 	if (stcb->sctp_socket) {
4186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4187 		struct socket *so;
4188 
4189 #endif
4190 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4191 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4192 			/* sa_ignore NO_NULL_CHK */
4193 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4194 		}
4195 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4196 		so = SCTP_INP_SO(stcb->sctp_ep);
4197 		atomic_add_int(&stcb->asoc.refcnt, 1);
4198 		SCTP_TCB_UNLOCK(stcb);
4199 		SCTP_SOCKET_LOCK(so, 1);
4200 		SCTP_TCB_LOCK(stcb);
4201 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4202 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4203 			/* assoc was freed while we were unlocked */
4204 			SCTP_SOCKET_UNLOCK(so, 1);
4205 			return;
4206 		}
4207 #endif
4208 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4209 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4210 		SCTP_SOCKET_UNLOCK(so, 1);
4211 #endif
4212 	} else {
4213 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4214 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4215 		}
4216 	}
4217 
4218 	/* JRS - Use the congestion control given in the CC module */
4219 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4220 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4221 			if (net->net_ack2 > 0) {
4222 				/*
4223 				 * Karn's rule applies to clearing error
4224 				 * count, this is optional.
4225 				 */
4226 				net->error_count = 0;
4227 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4228 					/* addr came good */
4229 					net->dest_state |= SCTP_ADDR_REACHABLE;
4230 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4231 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4232 				}
4233 				if (net == stcb->asoc.primary_destination) {
4234 					if (stcb->asoc.alternate) {
4235 						/*
4236 						 * release the alternate,
4237 						 * primary is good
4238 						 */
4239 						sctp_free_remote_addr(stcb->asoc.alternate);
4240 						stcb->asoc.alternate = NULL;
4241 					}
4242 				}
4243 				if (net->dest_state & SCTP_ADDR_PF) {
4244 					net->dest_state &= ~SCTP_ADDR_PF;
4245 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4246 					    stcb->sctp_ep, stcb, net,
4247 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4248 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4249 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4250 					/* Done with this net */
4251 					net->net_ack = 0;
4252 				}
4253 				/* restore any doubled timers */
4254 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4255 				if (net->RTO < stcb->asoc.minrto) {
4256 					net->RTO = stcb->asoc.minrto;
4257 				}
4258 				if (net->RTO > stcb->asoc.maxrto) {
4259 					net->RTO = stcb->asoc.maxrto;
4260 				}
4261 			}
4262 		}
4263 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4264 	}
4265 	asoc->last_acked_seq = cumack;
4266 
4267 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4268 		/* nothing left in-flight */
4269 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4270 			net->flight_size = 0;
4271 			net->partial_bytes_acked = 0;
4272 		}
4273 		asoc->total_flight = 0;
4274 		asoc->total_flight_count = 0;
4275 	}
4276 
4277 	/* RWND update */
4278 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4279 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4280 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4281 		/* SWS sender side engages */
4282 		asoc->peers_rwnd = 0;
4283 	}
4284 	if (asoc->peers_rwnd > old_rwnd) {
4285 		win_probe_recovery = 1;
4286 	}
4287 	/* Now assure a timer where data is queued at */
4288 again:
4289 	j = 0;
4290 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4291 		if (win_probe_recovery && (net->window_probe)) {
4292 			win_probe_recovered = 1;
4293 			/*
4294 			 * Find first chunk that was used with window probe
4295 			 * and clear the sent
4296 			 */
4297 			/* sa_ignore FREED_MEMORY */
4298 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4299 				if (tp1->window_probe) {
4300 					/* move back to data send queue */
4301 					sctp_window_probe_recovery(stcb, asoc, tp1);
4302 					break;
4303 				}
4304 			}
4305 		}
4306 		if (net->flight_size) {
4307 			j++;
4308 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4309 			if (net->window_probe) {
4310 				net->window_probe = 0;
4311 			}
4312 		} else {
4313 			if (net->window_probe) {
4314 				/*
4315 				 * In window probes we must assure a timer
4316 				 * is still running there
4317 				 */
4318 				net->window_probe = 0;
4319 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4320 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4321 				}
4322 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4323 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4324 				    stcb, net,
4325 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4326 			}
4327 		}
4328 	}
4329 	if ((j == 0) &&
4330 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4331 	    (asoc->sent_queue_retran_cnt == 0) &&
4332 	    (win_probe_recovered == 0) &&
4333 	    (done_once == 0)) {
4334 		/*
4335 		 * huh, this should not happen unless all packets are
4336 		 * PR-SCTP and marked to skip of course.
4337 		 */
4338 		if (sctp_fs_audit(asoc)) {
4339 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4340 				net->flight_size = 0;
4341 			}
4342 			asoc->total_flight = 0;
4343 			asoc->total_flight_count = 0;
4344 			asoc->sent_queue_retran_cnt = 0;
4345 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4346 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4347 					sctp_flight_size_increase(tp1);
4348 					sctp_total_flight_increase(stcb, tp1);
4349 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4350 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4351 				}
4352 			}
4353 		}
4354 		done_once = 1;
4355 		goto again;
4356 	}
4357 	/**********************************/
4358 	/* Now what about shutdown issues */
4359 	/**********************************/
4360 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4361 		/* nothing left on sendqueue.. consider done */
4362 		/* clean up */
4363 		if ((asoc->stream_queue_cnt == 1) &&
4364 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4365 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4366 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4367 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4368 		}
4369 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4370 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4371 		    (asoc->stream_queue_cnt == 1) &&
4372 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4373 			struct mbuf *op_err;
4374 
4375 			*abort_now = 1;
4376 			/* XXX */
4377 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4378 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4379 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4380 			return;
4381 		}
4382 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4383 		    (asoc->stream_queue_cnt == 0)) {
4384 			struct sctp_nets *netp;
4385 
4386 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4387 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4388 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4389 			}
4390 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4391 			sctp_stop_timers_for_shutdown(stcb);
4392 			if (asoc->alternate) {
4393 				netp = asoc->alternate;
4394 			} else {
4395 				netp = asoc->primary_destination;
4396 			}
4397 			sctp_send_shutdown(stcb, netp);
4398 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4399 			    stcb->sctp_ep, stcb, netp);
4400 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4401 			    stcb->sctp_ep, stcb, NULL);
4402 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4403 		    (asoc->stream_queue_cnt == 0)) {
4404 			struct sctp_nets *netp;
4405 
4406 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4407 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4408 			sctp_stop_timers_for_shutdown(stcb);
4409 			if (asoc->alternate) {
4410 				netp = asoc->alternate;
4411 			} else {
4412 				netp = asoc->primary_destination;
4413 			}
4414 			sctp_send_shutdown_ack(stcb, netp);
4415 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4416 			    stcb->sctp_ep, stcb, netp);
4417 		}
4418 	}
4419 	/*********************************************/
4420 	/* Here we perform PR-SCTP procedures        */
4421 	/* (section 4.2)                             */
4422 	/*********************************************/
4423 	/* C1. update advancedPeerAckPoint */
4424 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4425 		asoc->advanced_peer_ack_point = cumack;
4426 	}
4427 	/* PR-Sctp issues need to be addressed too */
4428 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4429 		struct sctp_tmit_chunk *lchk;
4430 		uint32_t old_adv_peer_ack_point;
4431 
4432 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4433 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4434 		/* C3. See if we need to send a Fwd-TSN */
4435 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4436 			/*
4437 			 * ISSUE with ECN, see FWD-TSN processing.
4438 			 */
4439 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4440 				send_forward_tsn(stcb, asoc);
4441 			} else if (lchk) {
4442 				/* try to FR fwd-tsn's that get lost too */
4443 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4444 					send_forward_tsn(stcb, asoc);
4445 				}
4446 			}
4447 		}
4448 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4449 			if (lchk->whoTo != NULL) {
4450 				break;
4451 			}
4452 		}
4453 		if (lchk != NULL) {
4454 			/* Assure a timer is up */
4455 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4456 			    stcb->sctp_ep, stcb, lchk->whoTo);
4457 		}
4458 	}
4459 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4460 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4461 		    rwnd,
4462 		    stcb->asoc.peers_rwnd,
4463 		    stcb->asoc.total_flight,
4464 		    stcb->asoc.total_output_queue_size);
4465 	}
4466 }
4467 
4468 void
4469 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4470     struct sctp_tcb *stcb,
4471     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4472     int *abort_now, uint8_t flags,
4473     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4474 {
4475 	struct sctp_association *asoc;
4476 	struct sctp_tmit_chunk *tp1, *tp2;
4477 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4478 	uint16_t wake_him = 0;
4479 	uint32_t send_s = 0;
4480 	long j;
4481 	int accum_moved = 0;
4482 	int will_exit_fast_recovery = 0;
4483 	uint32_t a_rwnd, old_rwnd;
4484 	int win_probe_recovery = 0;
4485 	int win_probe_recovered = 0;
4486 	struct sctp_nets *net = NULL;
4487 	int done_once;
4488 	int rto_ok = 1;
4489 	uint8_t reneged_all = 0;
4490 	uint8_t cmt_dac_flag;
4491 
4492 	/*
4493 	 * we take any chance we can to service our queues since we cannot
4494 	 * get awoken when the socket is read from :<
4495 	 */
4496 	/*
4497 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4498 	 * old sack, if so discard. 2) If there is nothing left in the send
4499 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4500 	 * too, update any rwnd change and verify no timers are running.
4501 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4502 	 * moved process these first and note that it moved. 4) Process any
4503 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4504 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4505 	 * sync up flightsizes and things, stop all timers and also check
4506 	 * for shutdown_pending state. If so then go ahead and send off the
4507 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4508 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4509 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4510 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4511 	 * if in shutdown_recv state.
4512 	 */
4513 	SCTP_TCB_LOCK_ASSERT(stcb);
4514 	/* CMT DAC algo */
4515 	this_sack_lowest_newack = 0;
4516 	SCTP_STAT_INCR(sctps_slowpath_sack);
4517 	last_tsn = cum_ack;
4518 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4519 #ifdef SCTP_ASOCLOG_OF_TSNS
4520 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4521 	stcb->asoc.cumack_log_at++;
4522 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4523 		stcb->asoc.cumack_log_at = 0;
4524 	}
4525 #endif
4526 	a_rwnd = rwnd;
4527 
4528 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4529 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4530 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4531 	}
4532 
4533 	old_rwnd = stcb->asoc.peers_rwnd;
4534 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4535 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4536 		    stcb->asoc.overall_error_count,
4537 		    0,
4538 		    SCTP_FROM_SCTP_INDATA,
4539 		    __LINE__);
4540 	}
4541 	stcb->asoc.overall_error_count = 0;
4542 	asoc = &stcb->asoc;
4543 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4544 		sctp_log_sack(asoc->last_acked_seq,
4545 		    cum_ack,
4546 		    0,
4547 		    num_seg,
4548 		    num_dup,
4549 		    SCTP_LOG_NEW_SACK);
4550 	}
4551 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4552 		uint16_t i;
4553 		uint32_t *dupdata, dblock;
4554 
4555 		for (i = 0; i < num_dup; i++) {
4556 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4557 			    sizeof(uint32_t), (uint8_t *)&dblock);
4558 			if (dupdata == NULL) {
4559 				break;
4560 			}
4561 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4562 		}
4563 	}
4564 	/* reality check */
4565 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4566 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4567 		    sctpchunk_listhead);
4568 		send_s = tp1->rec.data.tsn + 1;
4569 	} else {
4570 		tp1 = NULL;
4571 		send_s = asoc->sending_seq;
4572 	}
4573 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4574 		struct mbuf *op_err;
4575 		char msg[SCTP_DIAG_INFO_LEN];
4576 
4577 		/*
4578 		 * no way, we have not even sent this TSN out yet. Peer is
4579 		 * hopelessly messed up with us.
4580 		 */
4581 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4582 		    cum_ack, send_s);
4583 		if (tp1) {
4584 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4585 			    tp1->rec.data.tsn, (void *)tp1);
4586 		}
4587 hopeless_peer:
4588 		*abort_now = 1;
4589 		/* XXX */
4590 		SCTP_SNPRINTF(msg, sizeof(msg),
4591 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4592 		    cum_ack, send_s);
4593 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4594 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4595 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4596 		return;
4597 	}
4598 	/**********************/
4599 	/* 1) check the range */
4600 	/**********************/
4601 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4602 		/* acking something behind */
4603 		return;
4604 	}
4605 
4606 	/* update the Rwnd of the peer */
4607 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4608 	    TAILQ_EMPTY(&asoc->send_queue) &&
4609 	    (asoc->stream_queue_cnt == 0)) {
4610 		/* nothing left on send/sent and strmq */
4611 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4612 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4613 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4614 		}
4615 		asoc->peers_rwnd = a_rwnd;
4616 		if (asoc->sent_queue_retran_cnt) {
4617 			asoc->sent_queue_retran_cnt = 0;
4618 		}
4619 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4620 			/* SWS sender side engages */
4621 			asoc->peers_rwnd = 0;
4622 		}
4623 		/* stop any timers */
4624 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4625 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4626 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4627 			net->partial_bytes_acked = 0;
4628 			net->flight_size = 0;
4629 		}
4630 		asoc->total_flight = 0;
4631 		asoc->total_flight_count = 0;
4632 		return;
4633 	}
4634 	/*
4635 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4636 	 * things. The total byte count acked is tracked in netAckSz AND
4637 	 * netAck2 is used to track the total bytes acked that are un-
4638 	 * amibguious and were never retransmitted. We track these on a per
4639 	 * destination address basis.
4640 	 */
4641 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4642 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4643 			/* Drag along the window_tsn for cwr's */
4644 			net->cwr_window_tsn = cum_ack;
4645 		}
4646 		net->prev_cwnd = net->cwnd;
4647 		net->net_ack = 0;
4648 		net->net_ack2 = 0;
4649 
4650 		/*
4651 		 * CMT: Reset CUC and Fast recovery algo variables before
4652 		 * SACK processing
4653 		 */
4654 		net->new_pseudo_cumack = 0;
4655 		net->will_exit_fast_recovery = 0;
4656 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4657 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4658 		}
4659 
4660 		/*
4661 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4662 		 * to be greater than the cumack. Also reset saw_newack to 0
4663 		 * for all dests.
4664 		 */
4665 		net->saw_newack = 0;
4666 		net->this_sack_highest_newack = last_tsn;
4667 	}
4668 	/* process the new consecutive TSN first */
4669 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4670 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4671 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4672 				accum_moved = 1;
4673 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4674 					/*
4675 					 * If it is less than ACKED, it is
4676 					 * now no-longer in flight. Higher
4677 					 * values may occur during marking
4678 					 */
4679 					if ((tp1->whoTo->dest_state &
4680 					    SCTP_ADDR_UNCONFIRMED) &&
4681 					    (tp1->snd_count < 2)) {
4682 						/*
4683 						 * If there was no retran
4684 						 * and the address is
4685 						 * un-confirmed and we sent
4686 						 * there and are now
4687 						 * sacked.. its confirmed,
4688 						 * mark it so.
4689 						 */
4690 						tp1->whoTo->dest_state &=
4691 						    ~SCTP_ADDR_UNCONFIRMED;
4692 					}
4693 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4694 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4695 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4696 							    tp1->whoTo->flight_size,
4697 							    tp1->book_size,
4698 							    (uint32_t)(uintptr_t)tp1->whoTo,
4699 							    tp1->rec.data.tsn);
4700 						}
4701 						sctp_flight_size_decrease(tp1);
4702 						sctp_total_flight_decrease(stcb, tp1);
4703 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4704 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4705 							    tp1);
4706 						}
4707 					}
4708 					tp1->whoTo->net_ack += tp1->send_size;
4709 
4710 					/* CMT SFR and DAC algos */
4711 					this_sack_lowest_newack = tp1->rec.data.tsn;
4712 					tp1->whoTo->saw_newack = 1;
4713 
4714 					if (tp1->snd_count < 2) {
4715 						/*
4716 						 * True non-retransmitted
4717 						 * chunk
4718 						 */
4719 						tp1->whoTo->net_ack2 +=
4720 						    tp1->send_size;
4721 
4722 						/* update RTO too? */
4723 						if (tp1->do_rtt) {
4724 							if (rto_ok &&
4725 							    sctp_calculate_rto(stcb,
4726 							    &stcb->asoc,
4727 							    tp1->whoTo,
4728 							    &tp1->sent_rcv_time,
4729 							    SCTP_RTT_FROM_DATA)) {
4730 								rto_ok = 0;
4731 							}
4732 							if (tp1->whoTo->rto_needed == 0) {
4733 								tp1->whoTo->rto_needed = 1;
4734 							}
4735 							tp1->do_rtt = 0;
4736 						}
4737 					}
4738 					/*
4739 					 * CMT: CUCv2 algorithm. From the
4740 					 * cumack'd TSNs, for each TSN being
4741 					 * acked for the first time, set the
4742 					 * following variables for the
4743 					 * corresp destination.
4744 					 * new_pseudo_cumack will trigger a
4745 					 * cwnd update.
4746 					 * find_(rtx_)pseudo_cumack will
4747 					 * trigger search for the next
4748 					 * expected (rtx-)pseudo-cumack.
4749 					 */
4750 					tp1->whoTo->new_pseudo_cumack = 1;
4751 					tp1->whoTo->find_pseudo_cumack = 1;
4752 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4753 
4754 
4755 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4756 						sctp_log_sack(asoc->last_acked_seq,
4757 						    cum_ack,
4758 						    tp1->rec.data.tsn,
4759 						    0,
4760 						    0,
4761 						    SCTP_LOG_TSN_ACKED);
4762 					}
4763 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4764 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4765 					}
4766 				}
4767 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4768 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4769 #ifdef SCTP_AUDITING_ENABLED
4770 					sctp_audit_log(0xB3,
4771 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4772 #endif
4773 				}
4774 				if (tp1->rec.data.chunk_was_revoked) {
4775 					/* deflate the cwnd */
4776 					tp1->whoTo->cwnd -= tp1->book_size;
4777 					tp1->rec.data.chunk_was_revoked = 0;
4778 				}
4779 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4780 					tp1->sent = SCTP_DATAGRAM_ACKED;
4781 				}
4782 			}
4783 		} else {
4784 			break;
4785 		}
4786 	}
4787 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4788 	/* always set this up to cum-ack */
4789 	asoc->this_sack_highest_gap = last_tsn;
4790 
4791 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4792 
4793 		/*
4794 		 * thisSackHighestGap will increase while handling NEW
4795 		 * segments this_sack_highest_newack will increase while
4796 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4797 		 * used for CMT DAC algo. saw_newack will also change.
4798 		 */
4799 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4800 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4801 		    num_seg, num_nr_seg, &rto_ok)) {
4802 			wake_him++;
4803 		}
4804 		/*
4805 		 * validate the biggest_tsn_acked in the gap acks if strict
4806 		 * adherence is wanted.
4807 		 */
4808 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4809 			/*
4810 			 * peer is either confused or we are under attack.
4811 			 * We must abort.
4812 			 */
4813 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4814 			    biggest_tsn_acked, send_s);
4815 			goto hopeless_peer;
4816 		}
4817 	}
4818 	/*******************************************/
4819 	/* cancel ALL T3-send timer if accum moved */
4820 	/*******************************************/
4821 	if (asoc->sctp_cmt_on_off > 0) {
4822 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4823 			if (net->new_pseudo_cumack)
4824 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4825 				    stcb, net,
4826 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4827 
4828 		}
4829 	} else {
4830 		if (accum_moved) {
4831 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4832 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4833 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4834 			}
4835 		}
4836 	}
4837 	/********************************************/
4838 	/* drop the acked chunks from the sentqueue */
4839 	/********************************************/
4840 	asoc->last_acked_seq = cum_ack;
4841 
4842 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4843 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4844 			break;
4845 		}
4846 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4847 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4848 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4849 #ifdef INVARIANTS
4850 			} else {
4851 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4852 #endif
4853 			}
4854 		}
4855 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4856 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4857 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4858 			asoc->trigger_reset = 1;
4859 		}
4860 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4861 		if (PR_SCTP_ENABLED(tp1->flags)) {
4862 			if (asoc->pr_sctp_cnt != 0)
4863 				asoc->pr_sctp_cnt--;
4864 		}
4865 		asoc->sent_queue_cnt--;
4866 		if (tp1->data) {
4867 			/* sa_ignore NO_NULL_CHK */
4868 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4869 			sctp_m_freem(tp1->data);
4870 			tp1->data = NULL;
4871 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4872 				asoc->sent_queue_cnt_removeable--;
4873 			}
4874 		}
4875 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4876 			sctp_log_sack(asoc->last_acked_seq,
4877 			    cum_ack,
4878 			    tp1->rec.data.tsn,
4879 			    0,
4880 			    0,
4881 			    SCTP_LOG_FREE_SENT);
4882 		}
4883 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4884 		wake_him++;
4885 	}
4886 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4887 #ifdef INVARIANTS
4888 		panic("Warning flight size is positive and should be 0");
4889 #else
4890 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4891 		    asoc->total_flight);
4892 #endif
4893 		asoc->total_flight = 0;
4894 	}
4895 
4896 	/* sa_ignore NO_NULL_CHK */
4897 	if ((wake_him) && (stcb->sctp_socket)) {
4898 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4899 		struct socket *so;
4900 
4901 #endif
4902 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4903 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4904 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4905 		}
4906 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4907 		so = SCTP_INP_SO(stcb->sctp_ep);
4908 		atomic_add_int(&stcb->asoc.refcnt, 1);
4909 		SCTP_TCB_UNLOCK(stcb);
4910 		SCTP_SOCKET_LOCK(so, 1);
4911 		SCTP_TCB_LOCK(stcb);
4912 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4913 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4914 			/* assoc was freed while we were unlocked */
4915 			SCTP_SOCKET_UNLOCK(so, 1);
4916 			return;
4917 		}
4918 #endif
4919 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4920 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4921 		SCTP_SOCKET_UNLOCK(so, 1);
4922 #endif
4923 	} else {
4924 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4925 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4926 		}
4927 	}
4928 
4929 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4930 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4931 			/* Setup so we will exit RFC2582 fast recovery */
4932 			will_exit_fast_recovery = 1;
4933 		}
4934 	}
4935 	/*
4936 	 * Check for revoked fragments:
4937 	 *
4938 	 * if Previous sack - Had no frags then we can't have any revoked if
4939 	 * Previous sack - Had frag's then - If we now have frags aka
4940 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4941 	 * some of them. else - The peer revoked all ACKED fragments, since
4942 	 * we had some before and now we have NONE.
4943 	 */
4944 
4945 	if (num_seg) {
4946 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4947 		asoc->saw_sack_with_frags = 1;
4948 	} else if (asoc->saw_sack_with_frags) {
4949 		int cnt_revoked = 0;
4950 
4951 		/* Peer revoked all dg's marked or acked */
4952 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4953 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4954 				tp1->sent = SCTP_DATAGRAM_SENT;
4955 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4956 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4957 					    tp1->whoTo->flight_size,
4958 					    tp1->book_size,
4959 					    (uint32_t)(uintptr_t)tp1->whoTo,
4960 					    tp1->rec.data.tsn);
4961 				}
4962 				sctp_flight_size_increase(tp1);
4963 				sctp_total_flight_increase(stcb, tp1);
4964 				tp1->rec.data.chunk_was_revoked = 1;
4965 				/*
4966 				 * To ensure that this increase in
4967 				 * flightsize, which is artificial, does not
4968 				 * throttle the sender, we also increase the
4969 				 * cwnd artificially.
4970 				 */
4971 				tp1->whoTo->cwnd += tp1->book_size;
4972 				cnt_revoked++;
4973 			}
4974 		}
4975 		if (cnt_revoked) {
4976 			reneged_all = 1;
4977 		}
4978 		asoc->saw_sack_with_frags = 0;
4979 	}
4980 	if (num_nr_seg > 0)
4981 		asoc->saw_sack_with_nr_frags = 1;
4982 	else
4983 		asoc->saw_sack_with_nr_frags = 0;
4984 
4985 	/* JRS - Use the congestion control given in the CC module */
4986 	if (ecne_seen == 0) {
4987 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4988 			if (net->net_ack2 > 0) {
4989 				/*
4990 				 * Karn's rule applies to clearing error
4991 				 * count, this is optional.
4992 				 */
4993 				net->error_count = 0;
4994 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4995 					/* addr came good */
4996 					net->dest_state |= SCTP_ADDR_REACHABLE;
4997 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4998 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4999 				}
5000 
5001 				if (net == stcb->asoc.primary_destination) {
5002 					if (stcb->asoc.alternate) {
5003 						/*
5004 						 * release the alternate,
5005 						 * primary is good
5006 						 */
5007 						sctp_free_remote_addr(stcb->asoc.alternate);
5008 						stcb->asoc.alternate = NULL;
5009 					}
5010 				}
5011 
5012 				if (net->dest_state & SCTP_ADDR_PF) {
5013 					net->dest_state &= ~SCTP_ADDR_PF;
5014 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5015 					    stcb->sctp_ep, stcb, net,
5016 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5017 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5018 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5019 					/* Done with this net */
5020 					net->net_ack = 0;
5021 				}
5022 				/* restore any doubled timers */
5023 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5024 				if (net->RTO < stcb->asoc.minrto) {
5025 					net->RTO = stcb->asoc.minrto;
5026 				}
5027 				if (net->RTO > stcb->asoc.maxrto) {
5028 					net->RTO = stcb->asoc.maxrto;
5029 				}
5030 			}
5031 		}
5032 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5033 	}
5034 
5035 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5036 		/* nothing left in-flight */
5037 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5038 			/* stop all timers */
5039 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5040 			    stcb, net,
5041 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5042 			net->flight_size = 0;
5043 			net->partial_bytes_acked = 0;
5044 		}
5045 		asoc->total_flight = 0;
5046 		asoc->total_flight_count = 0;
5047 	}
5048 
5049 	/**********************************/
5050 	/* Now what about shutdown issues */
5051 	/**********************************/
5052 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5053 		/* nothing left on sendqueue.. consider done */
5054 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5055 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5056 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5057 		}
5058 		asoc->peers_rwnd = a_rwnd;
5059 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5060 			/* SWS sender side engages */
5061 			asoc->peers_rwnd = 0;
5062 		}
5063 		/* clean up */
5064 		if ((asoc->stream_queue_cnt == 1) &&
5065 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5066 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5067 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5068 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5069 		}
5070 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5071 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5072 		    (asoc->stream_queue_cnt == 1) &&
5073 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5074 			struct mbuf *op_err;
5075 
5076 			*abort_now = 1;
5077 			/* XXX */
5078 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5079 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_34;
5080 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5081 			return;
5082 		}
5083 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5084 		    (asoc->stream_queue_cnt == 0)) {
5085 			struct sctp_nets *netp;
5086 
5087 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5088 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5089 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5090 			}
5091 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5092 			sctp_stop_timers_for_shutdown(stcb);
5093 			if (asoc->alternate) {
5094 				netp = asoc->alternate;
5095 			} else {
5096 				netp = asoc->primary_destination;
5097 			}
5098 			sctp_send_shutdown(stcb, netp);
5099 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5100 			    stcb->sctp_ep, stcb, netp);
5101 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5102 			    stcb->sctp_ep, stcb, NULL);
5103 			return;
5104 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5105 		    (asoc->stream_queue_cnt == 0)) {
5106 			struct sctp_nets *netp;
5107 
5108 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5109 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5110 			sctp_stop_timers_for_shutdown(stcb);
5111 			if (asoc->alternate) {
5112 				netp = asoc->alternate;
5113 			} else {
5114 				netp = asoc->primary_destination;
5115 			}
5116 			sctp_send_shutdown_ack(stcb, netp);
5117 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5118 			    stcb->sctp_ep, stcb, netp);
5119 			return;
5120 		}
5121 	}
5122 	/*
5123 	 * Now here we are going to recycle net_ack for a different use...
5124 	 * HEADS UP.
5125 	 */
5126 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5127 		net->net_ack = 0;
5128 	}
5129 
5130 	/*
5131 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5132 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5133 	 * automatically ensure that.
5134 	 */
5135 	if ((asoc->sctp_cmt_on_off > 0) &&
5136 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5137 	    (cmt_dac_flag == 0)) {
5138 		this_sack_lowest_newack = cum_ack;
5139 	}
5140 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5141 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5142 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5143 	}
5144 	/* JRS - Use the congestion control given in the CC module */
5145 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5146 
5147 	/* Now are we exiting loss recovery ? */
5148 	if (will_exit_fast_recovery) {
5149 		/* Ok, we must exit fast recovery */
5150 		asoc->fast_retran_loss_recovery = 0;
5151 	}
5152 	if ((asoc->sat_t3_loss_recovery) &&
5153 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5154 		/* end satellite t3 loss recovery */
5155 		asoc->sat_t3_loss_recovery = 0;
5156 	}
5157 	/*
5158 	 * CMT Fast recovery
5159 	 */
5160 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5161 		if (net->will_exit_fast_recovery) {
5162 			/* Ok, we must exit fast recovery */
5163 			net->fast_retran_loss_recovery = 0;
5164 		}
5165 	}
5166 
5167 	/* Adjust and set the new rwnd value */
5168 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5169 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5170 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5171 	}
5172 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5173 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5174 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5175 		/* SWS sender side engages */
5176 		asoc->peers_rwnd = 0;
5177 	}
5178 	if (asoc->peers_rwnd > old_rwnd) {
5179 		win_probe_recovery = 1;
5180 	}
5181 
5182 	/*
5183 	 * Now we must setup so we have a timer up for anyone with
5184 	 * outstanding data.
5185 	 */
5186 	done_once = 0;
5187 again:
5188 	j = 0;
5189 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5190 		if (win_probe_recovery && (net->window_probe)) {
5191 			win_probe_recovered = 1;
5192 			/*-
5193 			 * Find first chunk that was used with
5194 			 * window probe and clear the event. Put
5195 			 * it back into the send queue as if has
5196 			 * not been sent.
5197 			 */
5198 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5199 				if (tp1->window_probe) {
5200 					sctp_window_probe_recovery(stcb, asoc, tp1);
5201 					break;
5202 				}
5203 			}
5204 		}
5205 		if (net->flight_size) {
5206 			j++;
5207 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5208 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5209 				    stcb->sctp_ep, stcb, net);
5210 			}
5211 			if (net->window_probe) {
5212 				net->window_probe = 0;
5213 			}
5214 		} else {
5215 			if (net->window_probe) {
5216 				/*
5217 				 * In window probes we must assure a timer
5218 				 * is still running there
5219 				 */
5220 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5221 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5222 					    stcb->sctp_ep, stcb, net);
5223 
5224 				}
5225 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5226 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5227 				    stcb, net,
5228 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_35);
5229 			}
5230 		}
5231 	}
5232 	if ((j == 0) &&
5233 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5234 	    (asoc->sent_queue_retran_cnt == 0) &&
5235 	    (win_probe_recovered == 0) &&
5236 	    (done_once == 0)) {
5237 		/*
5238 		 * huh, this should not happen unless all packets are
5239 		 * PR-SCTP and marked to skip of course.
5240 		 */
5241 		if (sctp_fs_audit(asoc)) {
5242 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5243 				net->flight_size = 0;
5244 			}
5245 			asoc->total_flight = 0;
5246 			asoc->total_flight_count = 0;
5247 			asoc->sent_queue_retran_cnt = 0;
5248 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5249 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5250 					sctp_flight_size_increase(tp1);
5251 					sctp_total_flight_increase(stcb, tp1);
5252 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5253 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5254 				}
5255 			}
5256 		}
5257 		done_once = 1;
5258 		goto again;
5259 	}
5260 	/*********************************************/
5261 	/* Here we perform PR-SCTP procedures        */
5262 	/* (section 4.2)                             */
5263 	/*********************************************/
5264 	/* C1. update advancedPeerAckPoint */
5265 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5266 		asoc->advanced_peer_ack_point = cum_ack;
5267 	}
5268 	/* C2. try to further move advancedPeerAckPoint ahead */
5269 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5270 		struct sctp_tmit_chunk *lchk;
5271 		uint32_t old_adv_peer_ack_point;
5272 
5273 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5274 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5275 		/* C3. See if we need to send a Fwd-TSN */
5276 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5277 			/*
5278 			 * ISSUE with ECN, see FWD-TSN processing.
5279 			 */
5280 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5281 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5282 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5283 				    old_adv_peer_ack_point);
5284 			}
5285 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5286 				send_forward_tsn(stcb, asoc);
5287 			} else if (lchk) {
5288 				/* try to FR fwd-tsn's that get lost too */
5289 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5290 					send_forward_tsn(stcb, asoc);
5291 				}
5292 			}
5293 		}
5294 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5295 			if (lchk->whoTo != NULL) {
5296 				break;
5297 			}
5298 		}
5299 		if (lchk != NULL) {
5300 			/* Assure a timer is up */
5301 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5302 			    stcb->sctp_ep, stcb, lchk->whoTo);
5303 		}
5304 	}
5305 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5306 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5307 		    a_rwnd,
5308 		    stcb->asoc.peers_rwnd,
5309 		    stcb->asoc.total_flight,
5310 		    stcb->asoc.total_output_queue_size);
5311 	}
5312 }
5313 
5314 void
5315 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5316 {
5317 	/* Copy cum-ack */
5318 	uint32_t cum_ack, a_rwnd;
5319 
5320 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5321 	/* Arrange so a_rwnd does NOT change */
5322 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5323 
5324 	/* Now call the express sack handling */
5325 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5326 }
5327 
5328 static void
5329 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5330     struct sctp_stream_in *strmin)
5331 {
5332 	struct sctp_queued_to_read *control, *ncontrol;
5333 	struct sctp_association *asoc;
5334 	uint32_t mid;
5335 	int need_reasm_check = 0;
5336 
5337 	asoc = &stcb->asoc;
5338 	mid = strmin->last_mid_delivered;
5339 	/*
5340 	 * First deliver anything prior to and including the stream no that
5341 	 * came in.
5342 	 */
5343 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5344 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5345 			/* this is deliverable now */
5346 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5347 				if (control->on_strm_q) {
5348 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5349 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5350 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5351 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5352 #ifdef INVARIANTS
5353 					} else {
5354 						panic("strmin: %p ctl: %p unknown %d",
5355 						    strmin, control, control->on_strm_q);
5356 #endif
5357 					}
5358 					control->on_strm_q = 0;
5359 				}
5360 				/* subtract pending on streams */
5361 				if (asoc->size_on_all_streams >= control->length) {
5362 					asoc->size_on_all_streams -= control->length;
5363 				} else {
5364 #ifdef INVARIANTS
5365 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5366 #else
5367 					asoc->size_on_all_streams = 0;
5368 #endif
5369 				}
5370 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5371 				/* deliver it to at least the delivery-q */
5372 				if (stcb->sctp_socket) {
5373 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5374 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5375 					    control,
5376 					    &stcb->sctp_socket->so_rcv,
5377 					    1, SCTP_READ_LOCK_HELD,
5378 					    SCTP_SO_NOT_LOCKED);
5379 				}
5380 			} else {
5381 				/* Its a fragmented message */
5382 				if (control->first_frag_seen) {
5383 					/*
5384 					 * Make it so this is next to
5385 					 * deliver, we restore later
5386 					 */
5387 					strmin->last_mid_delivered = control->mid - 1;
5388 					need_reasm_check = 1;
5389 					break;
5390 				}
5391 			}
5392 		} else {
5393 			/* no more delivery now. */
5394 			break;
5395 		}
5396 	}
5397 	if (need_reasm_check) {
5398 		int ret;
5399 
5400 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5401 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5402 			/* Restore the next to deliver unless we are ahead */
5403 			strmin->last_mid_delivered = mid;
5404 		}
5405 		if (ret == 0) {
5406 			/* Left the front Partial one on */
5407 			return;
5408 		}
5409 		need_reasm_check = 0;
5410 	}
5411 	/*
5412 	 * now we must deliver things in queue the normal way  if any are
5413 	 * now ready.
5414 	 */
5415 	mid = strmin->last_mid_delivered + 1;
5416 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5417 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5418 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5419 				/* this is deliverable now */
5420 				if (control->on_strm_q) {
5421 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5422 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5423 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5424 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5425 #ifdef INVARIANTS
5426 					} else {
5427 						panic("strmin: %p ctl: %p unknown %d",
5428 						    strmin, control, control->on_strm_q);
5429 #endif
5430 					}
5431 					control->on_strm_q = 0;
5432 				}
5433 				/* subtract pending on streams */
5434 				if (asoc->size_on_all_streams >= control->length) {
5435 					asoc->size_on_all_streams -= control->length;
5436 				} else {
5437 #ifdef INVARIANTS
5438 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5439 #else
5440 					asoc->size_on_all_streams = 0;
5441 #endif
5442 				}
5443 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5444 				/* deliver it to at least the delivery-q */
5445 				strmin->last_mid_delivered = control->mid;
5446 				if (stcb->sctp_socket) {
5447 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5448 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5449 					    control,
5450 					    &stcb->sctp_socket->so_rcv, 1,
5451 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5452 
5453 				}
5454 				mid = strmin->last_mid_delivered + 1;
5455 			} else {
5456 				/* Its a fragmented message */
5457 				if (control->first_frag_seen) {
5458 					/*
5459 					 * Make it so this is next to
5460 					 * deliver
5461 					 */
5462 					strmin->last_mid_delivered = control->mid - 1;
5463 					need_reasm_check = 1;
5464 					break;
5465 				}
5466 			}
5467 		} else {
5468 			break;
5469 		}
5470 	}
5471 	if (need_reasm_check) {
5472 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5473 	}
5474 }
5475 
5476 
5477 
5478 static void
5479 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5480     struct sctp_association *asoc,
5481     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5482 {
5483 	struct sctp_queued_to_read *control;
5484 	struct sctp_stream_in *strm;
5485 	struct sctp_tmit_chunk *chk, *nchk;
5486 	int cnt_removed = 0;
5487 
5488 	/*
5489 	 * For now large messages held on the stream reasm that are complete
5490 	 * will be tossed too. We could in theory do more work to spin
5491 	 * through and stop after dumping one msg aka seeing the start of a
5492 	 * new msg at the head, and call the delivery function... to see if
5493 	 * it can be delivered... But for now we just dump everything on the
5494 	 * queue.
5495 	 */
5496 	strm = &asoc->strmin[stream];
5497 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5498 	if (control == NULL) {
5499 		/* Not found */
5500 		return;
5501 	}
5502 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5503 		return;
5504 	}
5505 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5506 		/* Purge hanging chunks */
5507 		if (!asoc->idata_supported && (ordered == 0)) {
5508 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5509 				break;
5510 			}
5511 		}
5512 		cnt_removed++;
5513 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5514 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5515 			asoc->size_on_reasm_queue -= chk->send_size;
5516 		} else {
5517 #ifdef INVARIANTS
5518 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5519 #else
5520 			asoc->size_on_reasm_queue = 0;
5521 #endif
5522 		}
5523 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5524 		if (chk->data) {
5525 			sctp_m_freem(chk->data);
5526 			chk->data = NULL;
5527 		}
5528 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5529 	}
5530 	if (!TAILQ_EMPTY(&control->reasm)) {
5531 		/* This has to be old data, unordered */
5532 		if (control->data) {
5533 			sctp_m_freem(control->data);
5534 			control->data = NULL;
5535 		}
5536 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5537 		chk = TAILQ_FIRST(&control->reasm);
5538 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5539 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5540 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5541 			    chk, SCTP_READ_LOCK_HELD);
5542 		}
5543 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5544 		return;
5545 	}
5546 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5547 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5548 		if (asoc->size_on_all_streams >= control->length) {
5549 			asoc->size_on_all_streams -= control->length;
5550 		} else {
5551 #ifdef INVARIANTS
5552 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5553 #else
5554 			asoc->size_on_all_streams = 0;
5555 #endif
5556 		}
5557 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5558 		control->on_strm_q = 0;
5559 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5560 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5561 		control->on_strm_q = 0;
5562 #ifdef INVARIANTS
5563 	} else if (control->on_strm_q) {
5564 		panic("strm: %p ctl: %p unknown %d",
5565 		    strm, control, control->on_strm_q);
5566 #endif
5567 	}
5568 	control->on_strm_q = 0;
5569 	if (control->on_read_q == 0) {
5570 		sctp_free_remote_addr(control->whoFrom);
5571 		if (control->data) {
5572 			sctp_m_freem(control->data);
5573 			control->data = NULL;
5574 		}
5575 		sctp_free_a_readq(stcb, control);
5576 	}
5577 }
5578 
5579 void
5580 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5581     struct sctp_forward_tsn_chunk *fwd,
5582     int *abort_flag, struct mbuf *m, int offset)
5583 {
5584 	/* The pr-sctp fwd tsn */
5585 	/*
5586 	 * here we will perform all the data receiver side steps for
5587 	 * processing FwdTSN, as required in by pr-sctp draft:
5588 	 *
5589 	 * Assume we get FwdTSN(x):
5590 	 *
5591 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5592 	 * + others we have 3) examine and update re-ordering queue on
5593 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5594 	 * report where we are.
5595 	 */
5596 	struct sctp_association *asoc;
5597 	uint32_t new_cum_tsn, gap;
5598 	unsigned int i, fwd_sz, m_size;
5599 	uint32_t str_seq;
5600 	struct sctp_stream_in *strm;
5601 	struct sctp_queued_to_read *control, *sv;
5602 
5603 	asoc = &stcb->asoc;
5604 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5605 		SCTPDBG(SCTP_DEBUG_INDATA1,
5606 		    "Bad size too small/big fwd-tsn\n");
5607 		return;
5608 	}
5609 	m_size = (stcb->asoc.mapping_array_size << 3);
5610 	/*************************************************************/
5611 	/* 1. Here we update local cumTSN and shift the bitmap array */
5612 	/*************************************************************/
5613 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5614 
5615 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5616 		/* Already got there ... */
5617 		return;
5618 	}
5619 	/*
5620 	 * now we know the new TSN is more advanced, let's find the actual
5621 	 * gap
5622 	 */
5623 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5624 	asoc->cumulative_tsn = new_cum_tsn;
5625 	if (gap >= m_size) {
5626 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5627 			struct mbuf *op_err;
5628 			char msg[SCTP_DIAG_INFO_LEN];
5629 
5630 			/*
5631 			 * out of range (of single byte chunks in the rwnd I
5632 			 * give out). This must be an attacker.
5633 			 */
5634 			*abort_flag = 1;
5635 			SCTP_SNPRINTF(msg, sizeof(msg),
5636 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5637 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5638 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5639 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_36;
5640 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5641 			return;
5642 		}
5643 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5644 
5645 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5646 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5647 		asoc->highest_tsn_inside_map = new_cum_tsn;
5648 
5649 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5650 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5651 
5652 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5653 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5654 		}
5655 	} else {
5656 		SCTP_TCB_LOCK_ASSERT(stcb);
5657 		for (i = 0; i <= gap; i++) {
5658 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5659 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5660 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5661 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5662 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5663 				}
5664 			}
5665 		}
5666 	}
5667 	/*************************************************************/
5668 	/* 2. Clear up re-assembly queue                             */
5669 	/*************************************************************/
5670 
5671 	/* This is now done as part of clearing up the stream/seq */
5672 	if (asoc->idata_supported == 0) {
5673 		uint16_t sid;
5674 
5675 		/* Flush all the un-ordered data based on cum-tsn */
5676 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5677 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5678 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5679 		}
5680 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5681 	}
5682 	/*******************************************************/
5683 	/* 3. Update the PR-stream re-ordering queues and fix  */
5684 	/* delivery issues as needed.                       */
5685 	/*******************************************************/
5686 	fwd_sz -= sizeof(*fwd);
5687 	if (m && fwd_sz) {
5688 		/* New method. */
5689 		unsigned int num_str;
5690 		uint32_t mid, cur_mid;
5691 		uint16_t sid;
5692 		uint16_t ordered, flags;
5693 		struct sctp_strseq *stseq, strseqbuf;
5694 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5695 
5696 		offset += sizeof(*fwd);
5697 
5698 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5699 		if (asoc->idata_supported) {
5700 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5701 		} else {
5702 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5703 		}
5704 		for (i = 0; i < num_str; i++) {
5705 			if (asoc->idata_supported) {
5706 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5707 				    sizeof(struct sctp_strseq_mid),
5708 				    (uint8_t *)&strseqbuf_m);
5709 				offset += sizeof(struct sctp_strseq_mid);
5710 				if (stseq_m == NULL) {
5711 					break;
5712 				}
5713 				sid = ntohs(stseq_m->sid);
5714 				mid = ntohl(stseq_m->mid);
5715 				flags = ntohs(stseq_m->flags);
5716 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5717 					ordered = 0;
5718 				} else {
5719 					ordered = 1;
5720 				}
5721 			} else {
5722 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5723 				    sizeof(struct sctp_strseq),
5724 				    (uint8_t *)&strseqbuf);
5725 				offset += sizeof(struct sctp_strseq);
5726 				if (stseq == NULL) {
5727 					break;
5728 				}
5729 				sid = ntohs(stseq->sid);
5730 				mid = (uint32_t)ntohs(stseq->ssn);
5731 				ordered = 1;
5732 			}
5733 			/* Convert */
5734 
5735 			/* now process */
5736 
5737 			/*
5738 			 * Ok we now look for the stream/seq on the read
5739 			 * queue where its not all delivered. If we find it
5740 			 * we transmute the read entry into a PDI_ABORTED.
5741 			 */
5742 			if (sid >= asoc->streamincnt) {
5743 				/* screwed up streams, stop!  */
5744 				break;
5745 			}
5746 			if ((asoc->str_of_pdapi == sid) &&
5747 			    (asoc->ssn_of_pdapi == mid)) {
5748 				/*
5749 				 * If this is the one we were partially
5750 				 * delivering now then we no longer are.
5751 				 * Note this will change with the reassembly
5752 				 * re-write.
5753 				 */
5754 				asoc->fragmented_delivery_inprogress = 0;
5755 			}
5756 			strm = &asoc->strmin[sid];
5757 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5758 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5759 			}
5760 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5761 				if ((control->sinfo_stream == sid) &&
5762 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5763 					str_seq = (sid << 16) | (0x0000ffff & mid);
5764 					control->pdapi_aborted = 1;
5765 					sv = stcb->asoc.control_pdapi;
5766 					control->end_added = 1;
5767 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5768 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5769 						if (asoc->size_on_all_streams >= control->length) {
5770 							asoc->size_on_all_streams -= control->length;
5771 						} else {
5772 #ifdef INVARIANTS
5773 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5774 #else
5775 							asoc->size_on_all_streams = 0;
5776 #endif
5777 						}
5778 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5779 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5780 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5781 #ifdef INVARIANTS
5782 					} else if (control->on_strm_q) {
5783 						panic("strm: %p ctl: %p unknown %d",
5784 						    strm, control, control->on_strm_q);
5785 #endif
5786 					}
5787 					control->on_strm_q = 0;
5788 					stcb->asoc.control_pdapi = control;
5789 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5790 					    stcb,
5791 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5792 					    (void *)&str_seq,
5793 					    SCTP_SO_NOT_LOCKED);
5794 					stcb->asoc.control_pdapi = sv;
5795 					break;
5796 				} else if ((control->sinfo_stream == sid) &&
5797 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5798 					/* We are past our victim SSN */
5799 					break;
5800 				}
5801 			}
5802 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5803 				/* Update the sequence number */
5804 				strm->last_mid_delivered = mid;
5805 			}
5806 			/* now kick the stream the new way */
5807 			/* sa_ignore NO_NULL_CHK */
5808 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5809 		}
5810 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5811 	}
5812 	/*
5813 	 * Now slide thing forward.
5814 	 */
5815 	sctp_slide_mapping_arrays(stcb);
5816 }
5817