xref: /freebsd/sys/netinet/sctp_indata.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_queued_to_read *control,
417     struct sctp_tmit_chunk *chk,
418     int *abort_flag, int opspot)
419 {
420 	char msg[SCTP_DIAG_INFO_LEN];
421 	struct mbuf *oper;
422 
423 	if (stcb->asoc.idata_supported) {
424 		snprintf(msg, sizeof(msg),
425 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 		    opspot,
427 		    control->fsn_included,
428 		    chk->rec.data.TSN_seq,
429 		    chk->rec.data.stream_number,
430 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
431 	} else {
432 		snprintf(msg, sizeof(msg),
433 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 		    opspot,
435 		    control->fsn_included,
436 		    chk->rec.data.TSN_seq,
437 		    chk->rec.data.stream_number,
438 		    chk->rec.data.fsn_num,
439 		    (uint16_t) chk->rec.data.stream_seq);
440 	}
441 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 	sctp_m_freem(chk->data);
443 	chk->data = NULL;
444 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
447 	*abort_flag = 1;
448 }
449 
450 static void
451 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 {
453 	/*
454 	 * The control could not be placed and must be cleaned.
455 	 */
456 	struct sctp_tmit_chunk *chk, *nchk;
457 
458 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 		if (chk->data)
461 			sctp_m_freem(chk->data);
462 		chk->data = NULL;
463 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	}
465 	sctp_free_a_readq(stcb, control);
466 }
467 
468 /*
469  * Queue the chunk either right into the socket buffer if it is the next one
470  * to go OR put it in the correct place in the delivery queue.  If we do
471  * append to the so_buf, keep doing so until we are out of order as
472  * long as the control's entered are non-fragmented.
473  */
474 static void
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476     struct sctp_stream_in *strm,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream usage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	char msg[SCTP_DIAG_INFO_LEN];
503 
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    control->sinfo_ssn, strm->last_sequence_delivered);
511 protocol_error:
512 		/*
513 		 * throw it in the stream so it gets cleaned up in
514 		 * association destruction
515 		 */
516 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 		    strm->last_sequence_delivered, control->sinfo_tsn,
519 		    control->sinfo_stream, control->sinfo_ssn);
520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
523 		*abort_flag = 1;
524 		return;
525 
526 	}
527 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
528 		goto protocol_error;
529 	}
530 	queue_needed = 1;
531 	asoc->size_on_all_streams += control->length;
532 	sctp_ucount_incr(asoc->cnt_on_all_streams);
533 	nxt_todel = strm->last_sequence_delivered + 1;
534 	if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 		struct socket *so;
537 
538 		so = SCTP_INP_SO(stcb->sctp_ep);
539 		atomic_add_int(&stcb->asoc.refcnt, 1);
540 		SCTP_TCB_UNLOCK(stcb);
541 		SCTP_SOCKET_LOCK(so, 1);
542 		SCTP_TCB_LOCK(stcb);
543 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 			SCTP_SOCKET_UNLOCK(so, 1);
546 			return;
547 		}
548 #endif
549 		/* can be delivered right away? */
550 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
552 		}
553 		/* EY it wont be queued if it could be delivered directly */
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
560 		    control,
561 		    &stcb->sctp_socket->so_rcv, 1,
562 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
563 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
564 			/* all delivered */
565 			nxt_todel = strm->last_sequence_delivered + 1;
566 			if ((nxt_todel == control->sinfo_ssn) &&
567 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				if (control->on_strm_q == SCTP_ON_ORDERED) {
571 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 #ifdef INVARIANTS
573 				} else {
574 					panic("Huh control: %p is on_strm_q: %d",
575 					    control, control->on_strm_q);
576 #endif
577 				}
578 				control->on_strm_q = 0;
579 				strm->last_sequence_delivered++;
580 				/*
581 				 * We ignore the return of deliver_data here
582 				 * since we always can hold the chunk on the
583 				 * d-queue. And we have a finite number that
584 				 * can be delivered from the strq.
585 				 */
586 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 					sctp_log_strm_del(control, NULL,
588 					    SCTP_STR_LOG_FROM_IMMED_DEL);
589 				}
590 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 				sctp_add_to_readq(stcb->sctp_ep, stcb,
592 				    control,
593 				    &stcb->sctp_socket->so_rcv, 1,
594 				    SCTP_READ_LOCK_NOT_HELD,
595 				    SCTP_SO_LOCKED);
596 				continue;
597 			} else if (nxt_todel == control->sinfo_ssn) {
598 				*need_reasm = 1;
599 			}
600 			break;
601 		}
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 		SCTP_SOCKET_UNLOCK(so, 1);
604 #endif
605 	}
606 	if (queue_needed) {
607 		/*
608 		 * Ok, we did not deliver this guy, find the correct place
609 		 * to put it on the queue.
610 		 */
611 		if (sctp_place_control_in_stream(strm, asoc, control)) {
612 			snprintf(msg, sizeof(msg),
613 			    "Queue to str msg_id: %u duplicate",
614 			    control->msg_id);
615 			sctp_clean_up_control(stcb, control);
616 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 			*abort_flag = 1;
620 		}
621 	}
622 }
623 
624 
625 static void
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
627 {
628 	struct mbuf *m, *prev = NULL;
629 	struct sctp_tcb *stcb;
630 
631 	stcb = control->stcb;
632 	control->held_length = 0;
633 	control->length = 0;
634 	m = control->data;
635 	while (m) {
636 		if (SCTP_BUF_LEN(m) == 0) {
637 			/* Skip mbufs with NO length */
638 			if (prev == NULL) {
639 				/* First one */
640 				control->data = sctp_m_free(m);
641 				m = control->data;
642 			} else {
643 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 				m = SCTP_BUF_NEXT(prev);
645 			}
646 			if (m == NULL) {
647 				control->tail_mbuf = prev;
648 			}
649 			continue;
650 		}
651 		prev = m;
652 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 		if (control->on_read_q) {
654 			/*
655 			 * On read queue so we must increment the SB stuff,
656 			 * we assume caller has done any locks of SB.
657 			 */
658 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
659 		}
660 		m = SCTP_BUF_NEXT(m);
661 	}
662 	if (prev) {
663 		control->tail_mbuf = prev;
664 	}
665 }
666 
667 static void
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
669 {
670 	struct mbuf *prev = NULL;
671 	struct sctp_tcb *stcb;
672 
673 	stcb = control->stcb;
674 	if (stcb == NULL) {
675 #ifdef INVARIANTS
676 		panic("Control broken");
677 #else
678 		return;
679 #endif
680 	}
681 	if (control->tail_mbuf == NULL) {
682 		/* TSNH */
683 		control->data = m;
684 		sctp_setup_tail_pointer(control);
685 		return;
686 	}
687 	control->tail_mbuf->m_next = m;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->tail_mbuf->m_next = sctp_m_free(m);
694 				m = control->tail_mbuf->m_next;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		if (control->on_read_q) {
706 			/*
707 			 * On read queue so we must increment the SB stuff,
708 			 * we assume caller has done any locks of SB.
709 			 */
710 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
711 		}
712 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
722 {
723 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 	nc->sinfo_stream = control->sinfo_stream;
725 	nc->sinfo_ssn = control->sinfo_ssn;
726 	TAILQ_INIT(&nc->reasm);
727 	nc->top_fsn = control->top_fsn;
728 	nc->msg_id = control->msg_id;
729 	nc->sinfo_flags = control->sinfo_flags;
730 	nc->sinfo_ppid = control->sinfo_ppid;
731 	nc->sinfo_context = control->sinfo_context;
732 	nc->fsn_included = 0xffffffff;
733 	nc->sinfo_tsn = control->sinfo_tsn;
734 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 	nc->whoFrom = control->whoFrom;
737 	atomic_add_int(&nc->whoFrom->ref_count, 1);
738 	nc->stcb = control->stcb;
739 	nc->port_from = control->port_from;
740 }
741 
742 static void
743 sctp_reset_a_control(struct sctp_queued_to_read *control,
744     struct sctp_inpcb *inp, uint32_t tsn)
745 {
746 	control->fsn_included = tsn;
747 	if (control->on_read_q) {
748 		/*
749 		 * We have to purge it from there, hopefully this will work
750 		 * :-)
751 		 */
752 		TAILQ_REMOVE(&inp->read_queue, control, next);
753 		control->on_read_q = 0;
754 	}
755 }
756 
757 static int
758 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
759     struct sctp_association *asoc,
760     struct sctp_stream_in *strm,
761     struct sctp_queued_to_read *control,
762     uint32_t pd_point,
763     int inp_read_lock_held)
764 {
765 	/*
766 	 * Special handling for the old un-ordered data chunk. All the
767 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
768 	 * watching to see if we have it all. If you return one, no other
769 	 * control entries on the un-ordered queue will be looked at. In
770 	 * theory there should be no others entries in reality, unless the
771 	 * guy is sending both unordered NDATA and unordered DATA...
772 	 */
773 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
774 	uint32_t fsn;
775 	struct sctp_queued_to_read *nc;
776 	int cnt_added;
777 
778 	if (control->first_frag_seen == 0) {
779 		/* Nothing we can do, we have not seen the first piece yet */
780 		return (1);
781 	}
782 	/* Collapse any we can */
783 	cnt_added = 0;
784 restart:
785 	fsn = control->fsn_included + 1;
786 	/* Now what can we add? */
787 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
788 		if (chk->rec.data.fsn_num == fsn) {
789 			/* Ok lets add it */
790 			sctp_alloc_a_readq(stcb, nc);
791 			if (nc == NULL) {
792 				break;
793 			}
794 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
795 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
796 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
797 			fsn++;
798 			cnt_added++;
799 			chk = NULL;
800 			if (control->end_added) {
801 				/* We are done */
802 				if (!TAILQ_EMPTY(&control->reasm)) {
803 					/*
804 					 * Ok we have to move anything left
805 					 * on the control queue to a new
806 					 * control.
807 					 */
808 					sctp_build_readq_entry_from_ctl(nc, control);
809 					tchk = TAILQ_FIRST(&control->reasm);
810 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
811 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
812 						nc->first_frag_seen = 1;
813 						nc->fsn_included = tchk->rec.data.fsn_num;
814 						nc->data = tchk->data;
815 						nc->sinfo_ppid = tchk->rec.data.payloadtype;
816 						nc->sinfo_tsn = tchk->rec.data.TSN_seq;
817 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
818 						tchk->data = NULL;
819 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
820 						sctp_setup_tail_pointer(nc);
821 						tchk = TAILQ_FIRST(&control->reasm);
822 					}
823 					/* Spin the rest onto the queue */
824 					while (tchk) {
825 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
826 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
827 						tchk = TAILQ_FIRST(&control->reasm);
828 					}
829 					/*
830 					 * Now lets add it to the queue
831 					 * after removing control
832 					 */
833 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
834 					nc->on_strm_q = SCTP_ON_UNORDERED;
835 					if (control->on_strm_q) {
836 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
837 						control->on_strm_q = 0;
838 					}
839 				}
840 				if (control->pdapi_started) {
841 					strm->pd_api_started = 0;
842 					control->pdapi_started = 0;
843 				}
844 				if (control->on_strm_q) {
845 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
846 					control->on_strm_q = 0;
847 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
848 				}
849 				if (control->on_read_q == 0) {
850 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
851 					    &stcb->sctp_socket->so_rcv, control->end_added,
852 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
853 				}
854 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
855 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
856 					/*
857 					 * Switch to the new guy and
858 					 * continue
859 					 */
860 					control = nc;
861 					goto restart;
862 				} else {
863 					if (nc->on_strm_q == 0) {
864 						sctp_free_a_readq(stcb, nc);
865 					}
866 				}
867 				return (1);
868 			} else {
869 				sctp_free_a_readq(stcb, nc);
870 			}
871 		} else {
872 			/* Can't add more */
873 			break;
874 		}
875 	}
876 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
877 		strm->pd_api_started = 1;
878 		control->pdapi_started = 1;
879 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
880 		    &stcb->sctp_socket->so_rcv, control->end_added,
881 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
882 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
883 		return (0);
884 	} else {
885 		return (1);
886 	}
887 }
888 
889 static void
890 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
891     struct sctp_association *asoc,
892     struct sctp_queued_to_read *control,
893     struct sctp_tmit_chunk *chk,
894     int *abort_flag)
895 {
896 	struct sctp_tmit_chunk *at;
897 	int inserted;
898 
899 	/*
900 	 * Here we need to place the chunk into the control structure sorted
901 	 * in the correct order.
902 	 */
903 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
904 		/* Its the very first one. */
905 		SCTPDBG(SCTP_DEBUG_XXX,
906 		    "chunk is a first fsn: %u becomes fsn_included\n",
907 		    chk->rec.data.fsn_num);
908 		if (control->first_frag_seen) {
909 			/*
910 			 * In old un-ordered we can reassembly on one
911 			 * control multiple messages. As long as the next
912 			 * FIRST is greater then the old first (TSN i.e. FSN
913 			 * wise)
914 			 */
915 			struct mbuf *tdata;
916 			uint32_t tmp;
917 
918 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
919 				/*
920 				 * Easy way the start of a new guy beyond
921 				 * the lowest
922 				 */
923 				goto place_chunk;
924 			}
925 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
926 			    (control->pdapi_started)) {
927 				/*
928 				 * Ok this should not happen, if it does we
929 				 * started the pd-api on the higher TSN
930 				 * (since the equals part is a TSN failure
931 				 * it must be that).
932 				 *
933 				 * We are completly hosed in that case since I
934 				 * have no way to recover. This really will
935 				 * only happen if we can get more TSN's
936 				 * higher before the pd-api-point.
937 				 */
938 				sctp_abort_in_reasm(stcb, control, chk,
939 				    abort_flag,
940 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
941 
942 				return;
943 			}
944 			/*
945 			 * Ok we have two firsts and the one we just got is
946 			 * smaller than the one we previously placed.. yuck!
947 			 * We must swap them out.
948 			 */
949 			/* swap the mbufs */
950 			tdata = control->data;
951 			control->data = chk->data;
952 			chk->data = tdata;
953 			/* Save the lengths */
954 			chk->send_size = control->length;
955 			/* Recompute length of control and tail pointer */
956 			sctp_setup_tail_pointer(control);
957 			/* Fix the FSN included */
958 			tmp = control->fsn_included;
959 			control->fsn_included = chk->rec.data.fsn_num;
960 			chk->rec.data.fsn_num = tmp;
961 			/* Fix the TSN included */
962 			tmp = control->sinfo_tsn;
963 			control->sinfo_tsn = chk->rec.data.TSN_seq;
964 			chk->rec.data.TSN_seq = tmp;
965 			/* Fix the PPID included */
966 			tmp = control->sinfo_ppid;
967 			control->sinfo_ppid = chk->rec.data.payloadtype;
968 			chk->rec.data.payloadtype = tmp;
969 			/* Fix tail pointer */
970 			goto place_chunk;
971 		}
972 		control->first_frag_seen = 1;
973 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
974 		control->sinfo_tsn = chk->rec.data.TSN_seq;
975 		control->sinfo_ppid = chk->rec.data.payloadtype;
976 		control->data = chk->data;
977 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
978 		chk->data = NULL;
979 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
980 		sctp_setup_tail_pointer(control);
981 		return;
982 	}
983 place_chunk:
984 	inserted = 0;
985 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
986 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
987 			/*
988 			 * This one in queue is bigger than the new one,
989 			 * insert the new one before at.
990 			 */
991 			asoc->size_on_reasm_queue += chk->send_size;
992 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
993 			inserted = 1;
994 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
995 			break;
996 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
997 			/*
998 			 * They sent a duplicate fsn number. This really
999 			 * should not happen since the FSN is a TSN and it
1000 			 * should have been dropped earlier.
1001 			 */
1002 			sctp_abort_in_reasm(stcb, control, chk,
1003 			    abort_flag,
1004 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1005 			return;
1006 		}
1007 	}
1008 	if (inserted == 0) {
1009 		/* Its at the end */
1010 		asoc->size_on_reasm_queue += chk->send_size;
1011 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1012 		control->top_fsn = chk->rec.data.fsn_num;
1013 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1014 	}
1015 }
1016 
1017 static int
1018 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1019     struct sctp_stream_in *strm, int inp_read_lock_held)
1020 {
1021 	/*
1022 	 * Given a stream, strm, see if any of the SSN's on it that are
1023 	 * fragmented are ready to deliver. If so go ahead and place them on
1024 	 * the read queue. In so placing if we have hit the end, then we
1025 	 * need to remove them from the stream's queue.
1026 	 */
1027 	struct sctp_queued_to_read *control, *nctl = NULL;
1028 	uint32_t next_to_del;
1029 	uint32_t pd_point;
1030 	int ret = 0;
1031 
1032 	if (stcb->sctp_socket) {
1033 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1034 		    stcb->sctp_ep->partial_delivery_point);
1035 	} else {
1036 		pd_point = stcb->sctp_ep->partial_delivery_point;
1037 	}
1038 	control = TAILQ_FIRST(&strm->uno_inqueue);
1039 
1040 	if ((control) &&
1041 	    (asoc->idata_supported == 0)) {
1042 		/* Special handling needed for "old" data format */
1043 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1044 			goto done_un;
1045 		}
1046 	}
1047 	if (strm->pd_api_started) {
1048 		/* Can't add more */
1049 		return (0);
1050 	}
1051 	while (control) {
1052 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1053 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1054 		nctl = TAILQ_NEXT(control, next_instrm);
1055 		if (control->end_added) {
1056 			/* We just put the last bit on */
1057 			if (control->on_strm_q) {
1058 #ifdef INVARIANTS
1059 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1060 					panic("Huh control: %p on_q: %d -- not unordered?",
1061 					    control, control->on_strm_q);
1062 				}
1063 #endif
1064 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1065 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1066 				control->on_strm_q = 0;
1067 			}
1068 			if (control->on_read_q == 0) {
1069 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1070 				    control,
1071 				    &stcb->sctp_socket->so_rcv, control->end_added,
1072 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1073 			}
1074 		} else {
1075 			/* Can we do a PD-API for this un-ordered guy? */
1076 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1077 				strm->pd_api_started = 1;
1078 				control->pdapi_started = 1;
1079 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1080 				    control,
1081 				    &stcb->sctp_socket->so_rcv, control->end_added,
1082 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1083 
1084 				break;
1085 			}
1086 		}
1087 		control = nctl;
1088 	}
1089 done_un:
1090 	control = TAILQ_FIRST(&strm->inqueue);
1091 	if (strm->pd_api_started) {
1092 		/* Can't add more */
1093 		return (0);
1094 	}
1095 	if (control == NULL) {
1096 		return (ret);
1097 	}
1098 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1099 		/*
1100 		 * Ok the guy at the top was being partially delivered
1101 		 * completed, so we remove it. Note the pd_api flag was
1102 		 * taken off when the chunk was merged on in
1103 		 * sctp_queue_data_for_reasm below.
1104 		 */
1105 		nctl = TAILQ_NEXT(control, next_instrm);
1106 		SCTPDBG(SCTP_DEBUG_XXX,
1107 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1108 		    control, control->end_added, control->sinfo_ssn,
1109 		    control->top_fsn, control->fsn_included,
1110 		    strm->last_sequence_delivered);
1111 		if (control->end_added) {
1112 			if (control->on_strm_q) {
1113 #ifdef INVARIANTS
1114 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1115 					panic("Huh control: %p on_q: %d -- not ordered?",
1116 					    control, control->on_strm_q);
1117 				}
1118 #endif
1119 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1120 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1121 				control->on_strm_q = 0;
1122 			}
1123 			if (strm->pd_api_started && control->pdapi_started) {
1124 				control->pdapi_started = 0;
1125 				strm->pd_api_started = 0;
1126 			}
1127 			if (control->on_read_q == 0) {
1128 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1129 				    control,
1130 				    &stcb->sctp_socket->so_rcv, control->end_added,
1131 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1132 			}
1133 			control = nctl;
1134 		}
1135 	}
1136 	if (strm->pd_api_started) {
1137 		/*
1138 		 * Can't add more must have gotten an un-ordered above being
1139 		 * partially delivered.
1140 		 */
1141 		return (0);
1142 	}
1143 deliver_more:
1144 	next_to_del = strm->last_sequence_delivered + 1;
1145 	if (control) {
1146 		SCTPDBG(SCTP_DEBUG_XXX,
1147 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1148 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1149 		    next_to_del);
1150 		nctl = TAILQ_NEXT(control, next_instrm);
1151 		if ((control->sinfo_ssn == next_to_del) &&
1152 		    (control->first_frag_seen)) {
1153 			int done;
1154 
1155 			/* Ok we can deliver it onto the stream. */
1156 			if (control->end_added) {
1157 				/* We are done with it afterwards */
1158 				if (control->on_strm_q) {
1159 #ifdef INVARIANTS
1160 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1161 						panic("Huh control: %p on_q: %d -- not ordered?",
1162 						    control, control->on_strm_q);
1163 					}
1164 #endif
1165 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1166 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1167 					control->on_strm_q = 0;
1168 				}
1169 				ret++;
1170 			}
1171 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1172 				/*
1173 				 * A singleton now slipping through - mark
1174 				 * it non-revokable too
1175 				 */
1176 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1177 			} else if (control->end_added == 0) {
1178 				/*
1179 				 * Check if we can defer adding until its
1180 				 * all there
1181 				 */
1182 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1183 					/*
1184 					 * Don't need it or cannot add more
1185 					 * (one being delivered that way)
1186 					 */
1187 					goto out;
1188 				}
1189 			}
1190 			done = (control->end_added) && (control->last_frag_seen);
1191 			if (control->on_read_q == 0) {
1192 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1193 				    control,
1194 				    &stcb->sctp_socket->so_rcv, control->end_added,
1195 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1196 			}
1197 			strm->last_sequence_delivered = next_to_del;
1198 			if (done) {
1199 				control = nctl;
1200 				goto deliver_more;
1201 			} else {
1202 				/* We are now doing PD API */
1203 				strm->pd_api_started = 1;
1204 				control->pdapi_started = 1;
1205 			}
1206 		}
1207 	}
1208 out:
1209 	return (ret);
1210 }
1211 
1212 
1213 void
1214 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1215     struct sctp_stream_in *strm,
1216     struct sctp_tcb *stcb, struct sctp_association *asoc,
1217     struct sctp_tmit_chunk *chk, int hold_rlock)
1218 {
1219 	/*
1220 	 * Given a control and a chunk, merge the data from the chk onto the
1221 	 * control and free up the chunk resources.
1222 	 */
1223 	int i_locked = 0;
1224 
1225 	if (control->on_read_q && (hold_rlock == 0)) {
1226 		/*
1227 		 * Its being pd-api'd so we must do some locks.
1228 		 */
1229 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1230 		i_locked = 1;
1231 	}
1232 	if (control->data == NULL) {
1233 		control->data = chk->data;
1234 		sctp_setup_tail_pointer(control);
1235 	} else {
1236 		sctp_add_to_tail_pointer(control, chk->data);
1237 	}
1238 	control->fsn_included = chk->rec.data.fsn_num;
1239 	asoc->size_on_reasm_queue -= chk->send_size;
1240 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1241 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1242 	chk->data = NULL;
1243 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1244 		control->first_frag_seen = 1;
1245 	}
1246 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1247 		/* Its complete */
1248 		if ((control->on_strm_q) && (control->on_read_q)) {
1249 			if (control->pdapi_started) {
1250 				control->pdapi_started = 0;
1251 				strm->pd_api_started = 0;
1252 			}
1253 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1254 				/* Unordered */
1255 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1256 				control->on_strm_q = 0;
1257 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1258 				/* Ordered */
1259 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1260 				control->on_strm_q = 0;
1261 #ifdef INVARIANTS
1262 			} else if (control->on_strm_q) {
1263 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1264 				    control->on_strm_q);
1265 #endif
1266 			}
1267 		}
1268 		control->end_added = 1;
1269 		control->last_frag_seen = 1;
1270 	}
1271 	if (i_locked) {
1272 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1273 	}
1274 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1275 }
1276 
1277 /*
1278  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1279  * queue, see if anthing can be delivered. If so pull it off (or as much as
1280  * we can. If we run out of space then we must dump what we can and set the
1281  * appropriate flag to say we queued what we could.
1282  */
1283 static void
1284 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1285     struct sctp_stream_in *strm,
1286     struct sctp_queued_to_read *control,
1287     struct sctp_tmit_chunk *chk,
1288     int created_control,
1289     int *abort_flag, uint32_t tsn)
1290 {
1291 	uint32_t next_fsn;
1292 	struct sctp_tmit_chunk *at, *nat;
1293 	int do_wakeup, unordered;
1294 
1295 	/*
1296 	 * For old un-ordered data chunks.
1297 	 */
1298 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1299 		unordered = 1;
1300 	} else {
1301 		unordered = 0;
1302 	}
1303 	/* Must be added to the stream-in queue */
1304 	if (created_control) {
1305 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1306 			/* Duplicate SSN? */
1307 			sctp_clean_up_control(stcb, control);
1308 			sctp_abort_in_reasm(stcb, control, chk,
1309 			    abort_flag,
1310 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1311 			return;
1312 		}
1313 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1314 			/*
1315 			 * Ok we created this control and now lets validate
1316 			 * that its legal i.e. there is a B bit set, if not
1317 			 * and we have up to the cum-ack then its invalid.
1318 			 */
1319 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1320 				sctp_abort_in_reasm(stcb, control, chk,
1321 				    abort_flag,
1322 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1323 				return;
1324 			}
1325 		}
1326 	}
1327 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1328 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1329 		return;
1330 	}
1331 	/*
1332 	 * Ok we must queue the chunk into the reasembly portion: o if its
1333 	 * the first it goes to the control mbuf. o if its not first but the
1334 	 * next in sequence it goes to the control, and each succeeding one
1335 	 * in order also goes. o if its not in order we place it on the list
1336 	 * in its place.
1337 	 */
1338 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1339 		/* Its the very first one. */
1340 		SCTPDBG(SCTP_DEBUG_XXX,
1341 		    "chunk is a first fsn: %u becomes fsn_included\n",
1342 		    chk->rec.data.fsn_num);
1343 		if (control->first_frag_seen) {
1344 			/*
1345 			 * Error on senders part, they either sent us two
1346 			 * data chunks with FIRST, or they sent two
1347 			 * un-ordered chunks that were fragmented at the
1348 			 * same time in the same stream.
1349 			 */
1350 			sctp_abort_in_reasm(stcb, control, chk,
1351 			    abort_flag,
1352 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1353 			return;
1354 		}
1355 		control->first_frag_seen = 1;
1356 		control->fsn_included = chk->rec.data.fsn_num;
1357 		control->data = chk->data;
1358 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1359 		chk->data = NULL;
1360 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 		sctp_setup_tail_pointer(control);
1362 	} else {
1363 		/* Place the chunk in our list */
1364 		int inserted = 0;
1365 
1366 		if (control->last_frag_seen == 0) {
1367 			/* Still willing to raise highest FSN seen */
1368 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1369 				SCTPDBG(SCTP_DEBUG_XXX,
1370 				    "We have a new top_fsn: %u\n",
1371 				    chk->rec.data.fsn_num);
1372 				control->top_fsn = chk->rec.data.fsn_num;
1373 			}
1374 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1375 				SCTPDBG(SCTP_DEBUG_XXX,
1376 				    "The last fsn is now in place fsn: %u\n",
1377 				    chk->rec.data.fsn_num);
1378 				control->last_frag_seen = 1;
1379 			}
1380 			if (asoc->idata_supported || control->first_frag_seen) {
1381 				/*
1382 				 * For IDATA we always check since we know
1383 				 * that the first fragment is 0. For old
1384 				 * DATA we have to receive the first before
1385 				 * we know the first FSN (which is the TSN).
1386 				 */
1387 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1388 					/*
1389 					 * We have already delivered up to
1390 					 * this so its a dup
1391 					 */
1392 					sctp_abort_in_reasm(stcb, control, chk,
1393 					    abort_flag,
1394 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1395 					return;
1396 				}
1397 			}
1398 		} else {
1399 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1400 				/* Second last? huh? */
1401 				SCTPDBG(SCTP_DEBUG_XXX,
1402 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1403 				    chk->rec.data.fsn_num, control->top_fsn);
1404 				sctp_abort_in_reasm(stcb, control,
1405 				    chk, abort_flag,
1406 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1407 				return;
1408 			}
1409 			if (asoc->idata_supported || control->first_frag_seen) {
1410 				/*
1411 				 * For IDATA we always check since we know
1412 				 * that the first fragment is 0. For old
1413 				 * DATA we have to receive the first before
1414 				 * we know the first FSN (which is the TSN).
1415 				 */
1416 
1417 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1418 					/*
1419 					 * We have already delivered up to
1420 					 * this so its a dup
1421 					 */
1422 					SCTPDBG(SCTP_DEBUG_XXX,
1423 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1424 					    chk->rec.data.fsn_num, control->fsn_included);
1425 					sctp_abort_in_reasm(stcb, control, chk,
1426 					    abort_flag,
1427 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1428 					return;
1429 				}
1430 			}
1431 			/*
1432 			 * validate not beyond top FSN if we have seen last
1433 			 * one
1434 			 */
1435 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1436 				SCTPDBG(SCTP_DEBUG_XXX,
1437 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1438 				    chk->rec.data.fsn_num,
1439 				    control->top_fsn);
1440 				sctp_abort_in_reasm(stcb, control, chk,
1441 				    abort_flag,
1442 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1443 				return;
1444 			}
1445 		}
1446 		/*
1447 		 * If we reach here, we need to place the new chunk in the
1448 		 * reassembly for this control.
1449 		 */
1450 		SCTPDBG(SCTP_DEBUG_XXX,
1451 		    "chunk is a not first fsn: %u needs to be inserted\n",
1452 		    chk->rec.data.fsn_num);
1453 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1454 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1455 				/*
1456 				 * This one in queue is bigger than the new
1457 				 * one, insert the new one before at.
1458 				 */
1459 				SCTPDBG(SCTP_DEBUG_XXX,
1460 				    "Insert it before fsn: %u\n",
1461 				    at->rec.data.fsn_num);
1462 				asoc->size_on_reasm_queue += chk->send_size;
1463 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1464 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1465 				inserted = 1;
1466 				break;
1467 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1468 				/*
1469 				 * Gak, He sent me a duplicate str seq
1470 				 * number
1471 				 */
1472 				/*
1473 				 * foo bar, I guess I will just free this
1474 				 * new guy, should we abort too? FIX ME
1475 				 * MAYBE? Or it COULD be that the SSN's have
1476 				 * wrapped. Maybe I should compare to TSN
1477 				 * somehow... sigh for now just blow away
1478 				 * the chunk!
1479 				 */
1480 				SCTPDBG(SCTP_DEBUG_XXX,
1481 				    "Duplicate to fsn: %u -- abort\n",
1482 				    at->rec.data.fsn_num);
1483 				sctp_abort_in_reasm(stcb, control,
1484 				    chk, abort_flag,
1485 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1486 				return;
1487 			}
1488 		}
1489 		if (inserted == 0) {
1490 			/* Goes on the end */
1491 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1492 			    chk->rec.data.fsn_num);
1493 			asoc->size_on_reasm_queue += chk->send_size;
1494 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1495 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1496 		}
1497 	}
1498 	/*
1499 	 * Ok lets see if we can suck any up into the control structure that
1500 	 * are in seq if it makes sense.
1501 	 */
1502 	do_wakeup = 0;
1503 	/*
1504 	 * If the first fragment has not been seen there is no sense in
1505 	 * looking.
1506 	 */
1507 	if (control->first_frag_seen) {
1508 		next_fsn = control->fsn_included + 1;
1509 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1510 			if (at->rec.data.fsn_num == next_fsn) {
1511 				/* We can add this one now to the control */
1512 				SCTPDBG(SCTP_DEBUG_XXX,
1513 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1514 				    control, at,
1515 				    at->rec.data.fsn_num,
1516 				    next_fsn, control->fsn_included);
1517 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1518 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1519 				if (control->on_read_q) {
1520 					do_wakeup = 1;
1521 				}
1522 				next_fsn++;
1523 				if (control->end_added && control->pdapi_started) {
1524 					if (strm->pd_api_started) {
1525 						strm->pd_api_started = 0;
1526 						control->pdapi_started = 0;
1527 					}
1528 					if (control->on_read_q == 0) {
1529 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1530 						    control,
1531 						    &stcb->sctp_socket->so_rcv, control->end_added,
1532 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1533 						do_wakeup = 1;
1534 					}
1535 					break;
1536 				}
1537 			} else {
1538 				break;
1539 			}
1540 		}
1541 	}
1542 	if (do_wakeup) {
1543 		/* Need to wakeup the reader */
1544 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1545 	}
1546 }
1547 
1548 static struct sctp_queued_to_read *
1549 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1550 {
1551 	struct sctp_queued_to_read *control;
1552 
1553 	if (ordered) {
1554 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1555 			if (control->msg_id == msg_id) {
1556 				break;
1557 			}
1558 		}
1559 	} else {
1560 		if (old) {
1561 			control = TAILQ_FIRST(&strm->uno_inqueue);
1562 			return (control);
1563 		}
1564 		TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1565 			if (control->msg_id == msg_id) {
1566 				break;
1567 			}
1568 		}
1569 	}
1570 	return (control);
1571 }
1572 
1573 static int
1574 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1575     struct mbuf **m, int offset, int chk_length,
1576     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1577     int *break_flag, int last_chunk, uint8_t chtype)
1578 {
1579 	/* Process a data chunk */
1580 	/* struct sctp_tmit_chunk *chk; */
1581 	struct sctp_data_chunk *ch;
1582 	struct sctp_idata_chunk *nch, chunk_buf;
1583 	struct sctp_tmit_chunk *chk;
1584 	uint32_t tsn, fsn, gap, msg_id;
1585 	struct mbuf *dmbuf;
1586 	int the_len;
1587 	int need_reasm_check = 0;
1588 	uint16_t strmno;
1589 	struct mbuf *op_err;
1590 	char msg[SCTP_DIAG_INFO_LEN];
1591 	struct sctp_queued_to_read *control = NULL;
1592 	uint32_t protocol_id;
1593 	uint8_t chunk_flags;
1594 	struct sctp_stream_reset_list *liste;
1595 	struct sctp_stream_in *strm;
1596 	int ordered;
1597 	size_t clen;
1598 	int created_control = 0;
1599 	uint8_t old_data;
1600 
1601 	chk = NULL;
1602 	if (chtype == SCTP_IDATA) {
1603 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1604 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1605 		ch = (struct sctp_data_chunk *)nch;
1606 		clen = sizeof(struct sctp_idata_chunk);
1607 		tsn = ntohl(ch->dp.tsn);
1608 		msg_id = ntohl(nch->dp.msg_id);
1609 		protocol_id = nch->dp.ppid_fsn.protocol_id;
1610 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1611 			fsn = 0;
1612 		else
1613 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1614 		old_data = 0;
1615 	} else {
1616 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1617 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1618 		tsn = ntohl(ch->dp.tsn);
1619 		protocol_id = ch->dp.protocol_id;
1620 		clen = sizeof(struct sctp_data_chunk);
1621 		fsn = tsn;
1622 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1623 		nch = NULL;
1624 		old_data = 1;
1625 	}
1626 	chunk_flags = ch->ch.chunk_flags;
1627 	if ((size_t)chk_length == clen) {
1628 		/*
1629 		 * Need to send an abort since we had a empty data chunk.
1630 		 */
1631 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1632 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1633 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1634 		*abort_flag = 1;
1635 		return (0);
1636 	}
1637 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1638 		asoc->send_sack = 1;
1639 	}
1640 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1641 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1642 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1643 	}
1644 	if (stcb == NULL) {
1645 		return (0);
1646 	}
1647 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1648 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1649 		/* It is a duplicate */
1650 		SCTP_STAT_INCR(sctps_recvdupdata);
1651 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1652 			/* Record a dup for the next outbound sack */
1653 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1654 			asoc->numduptsns++;
1655 		}
1656 		asoc->send_sack = 1;
1657 		return (0);
1658 	}
1659 	/* Calculate the number of TSN's between the base and this TSN */
1660 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1661 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1662 		/* Can't hold the bit in the mapping at max array, toss it */
1663 		return (0);
1664 	}
1665 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1666 		SCTP_TCB_LOCK_ASSERT(stcb);
1667 		if (sctp_expand_mapping_array(asoc, gap)) {
1668 			/* Can't expand, drop it */
1669 			return (0);
1670 		}
1671 	}
1672 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1673 		*high_tsn = tsn;
1674 	}
1675 	/* See if we have received this one already */
1676 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1677 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1678 		SCTP_STAT_INCR(sctps_recvdupdata);
1679 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1680 			/* Record a dup for the next outbound sack */
1681 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1682 			asoc->numduptsns++;
1683 		}
1684 		asoc->send_sack = 1;
1685 		return (0);
1686 	}
1687 	/*
1688 	 * Check to see about the GONE flag, duplicates would cause a sack
1689 	 * to be sent up above
1690 	 */
1691 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1692 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1693 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1694 		/*
1695 		 * wait a minute, this guy is gone, there is no longer a
1696 		 * receiver. Send peer an ABORT!
1697 		 */
1698 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1699 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1700 		*abort_flag = 1;
1701 		return (0);
1702 	}
1703 	/*
1704 	 * Now before going further we see if there is room. If NOT then we
1705 	 * MAY let one through only IF this TSN is the one we are waiting
1706 	 * for on a partial delivery API.
1707 	 */
1708 
1709 	/* Is the stream valid? */
1710 	strmno = ntohs(ch->dp.stream_id);
1711 
1712 	if (strmno >= asoc->streamincnt) {
1713 		struct sctp_error_invalid_stream *cause;
1714 
1715 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1716 		    0, M_NOWAIT, 1, MT_DATA);
1717 		if (op_err != NULL) {
1718 			/* add some space up front so prepend will work well */
1719 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1720 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1721 			/*
1722 			 * Error causes are just param's and this one has
1723 			 * two back to back phdr, one with the error type
1724 			 * and size, the other with the streamid and a rsvd
1725 			 */
1726 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1727 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1728 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1729 			cause->stream_id = ch->dp.stream_id;
1730 			cause->reserved = htons(0);
1731 			sctp_queue_op_err(stcb, op_err);
1732 		}
1733 		SCTP_STAT_INCR(sctps_badsid);
1734 		SCTP_TCB_LOCK_ASSERT(stcb);
1735 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1736 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1737 			asoc->highest_tsn_inside_nr_map = tsn;
1738 		}
1739 		if (tsn == (asoc->cumulative_tsn + 1)) {
1740 			/* Update cum-ack */
1741 			asoc->cumulative_tsn = tsn;
1742 		}
1743 		return (0);
1744 	}
1745 	strm = &asoc->strmin[strmno];
1746 	/*
1747 	 * If its a fragmented message, lets see if we can find the control
1748 	 * on the reassembly queues.
1749 	 */
1750 	if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1751 		/*
1752 		 * The first *must* be fsn 0, and other (middle/end) pieces
1753 		 * can *not* be fsn 0.
1754 		 */
1755 		goto err_out;
1756 	}
1757 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1758 		/* See if we can find the re-assembly entity */
1759 		control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data);
1760 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1761 		    chunk_flags, control);
1762 		if (control) {
1763 			/* We found something, does it belong? */
1764 			if (ordered && (msg_id != control->sinfo_ssn)) {
1765 		err_out:
1766 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1767 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1768 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1769 				*abort_flag = 1;
1770 				return (0);
1771 			}
1772 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1773 				/*
1774 				 * We can't have a switched order with an
1775 				 * unordered chunk
1776 				 */
1777 				goto err_out;
1778 			}
1779 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1780 				/*
1781 				 * We can't have a switched unordered with a
1782 				 * ordered chunk
1783 				 */
1784 				goto err_out;
1785 			}
1786 		}
1787 	} else {
1788 		/*
1789 		 * Its a complete segment. Lets validate we don't have a
1790 		 * re-assembly going on with the same Stream/Seq (for
1791 		 * ordered) or in the same Stream for unordered.
1792 		 */
1793 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1794 		    chunk_flags);
1795 		if (sctp_find_reasm_entry(strm, msg_id, ordered, old_data)) {
1796 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1797 			    chunk_flags,
1798 			    msg_id);
1799 
1800 			goto err_out;
1801 		}
1802 	}
1803 	/* now do the tests */
1804 	if (((asoc->cnt_on_all_streams +
1805 	    asoc->cnt_on_reasm_queue +
1806 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1807 	    (((int)asoc->my_rwnd) <= 0)) {
1808 		/*
1809 		 * When we have NO room in the rwnd we check to make sure
1810 		 * the reader is doing its job...
1811 		 */
1812 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1813 			/* some to read, wake-up */
1814 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1815 			struct socket *so;
1816 
1817 			so = SCTP_INP_SO(stcb->sctp_ep);
1818 			atomic_add_int(&stcb->asoc.refcnt, 1);
1819 			SCTP_TCB_UNLOCK(stcb);
1820 			SCTP_SOCKET_LOCK(so, 1);
1821 			SCTP_TCB_LOCK(stcb);
1822 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1823 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1824 				/* assoc was freed while we were unlocked */
1825 				SCTP_SOCKET_UNLOCK(so, 1);
1826 				return (0);
1827 			}
1828 #endif
1829 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1830 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1831 			SCTP_SOCKET_UNLOCK(so, 1);
1832 #endif
1833 		}
1834 		/* now is it in the mapping array of what we have accepted? */
1835 		if (nch == NULL) {
1836 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1837 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1838 				/* Nope not in the valid range dump it */
1839 		dump_packet:
1840 				sctp_set_rwnd(stcb, asoc);
1841 				if ((asoc->cnt_on_all_streams +
1842 				    asoc->cnt_on_reasm_queue +
1843 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1844 					SCTP_STAT_INCR(sctps_datadropchklmt);
1845 				} else {
1846 					SCTP_STAT_INCR(sctps_datadroprwnd);
1847 				}
1848 				*break_flag = 1;
1849 				return (0);
1850 			}
1851 		} else {
1852 			if (control == NULL) {
1853 				goto dump_packet;
1854 			}
1855 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1856 				goto dump_packet;
1857 			}
1858 		}
1859 	}
1860 #ifdef SCTP_ASOCLOG_OF_TSNS
1861 	SCTP_TCB_LOCK_ASSERT(stcb);
1862 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1863 		asoc->tsn_in_at = 0;
1864 		asoc->tsn_in_wrapped = 1;
1865 	}
1866 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1867 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1868 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1869 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1870 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1871 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1872 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1873 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1874 	asoc->tsn_in_at++;
1875 #endif
1876 	/*
1877 	 * Before we continue lets validate that we are not being fooled by
1878 	 * an evil attacker. We can only have Nk chunks based on our TSN
1879 	 * spread allowed by the mapping array N * 8 bits, so there is no
1880 	 * way our stream sequence numbers could have wrapped. We of course
1881 	 * only validate the FIRST fragment so the bit must be set.
1882 	 */
1883 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1884 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1885 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1886 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1887 		/* The incoming sseq is behind where we last delivered? */
1888 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1889 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1890 
1891 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1892 		    asoc->strmin[strmno].last_sequence_delivered,
1893 		    tsn, strmno, msg_id);
1894 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1896 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1897 		*abort_flag = 1;
1898 		return (0);
1899 	}
1900 	/************************************
1901 	 * From here down we may find ch-> invalid
1902 	 * so its a good idea NOT to use it.
1903 	 *************************************/
1904 	if (nch) {
1905 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1906 	} else {
1907 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1908 	}
1909 	if (last_chunk == 0) {
1910 		if (nch) {
1911 			dmbuf = SCTP_M_COPYM(*m,
1912 			    (offset + sizeof(struct sctp_idata_chunk)),
1913 			    the_len, M_NOWAIT);
1914 		} else {
1915 			dmbuf = SCTP_M_COPYM(*m,
1916 			    (offset + sizeof(struct sctp_data_chunk)),
1917 			    the_len, M_NOWAIT);
1918 		}
1919 #ifdef SCTP_MBUF_LOGGING
1920 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1921 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1922 		}
1923 #endif
1924 	} else {
1925 		/* We can steal the last chunk */
1926 		int l_len;
1927 
1928 		dmbuf = *m;
1929 		/* lop off the top part */
1930 		if (nch) {
1931 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1932 		} else {
1933 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1934 		}
1935 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1936 			l_len = SCTP_BUF_LEN(dmbuf);
1937 		} else {
1938 			/*
1939 			 * need to count up the size hopefully does not hit
1940 			 * this to often :-0
1941 			 */
1942 			struct mbuf *lat;
1943 
1944 			l_len = 0;
1945 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1946 				l_len += SCTP_BUF_LEN(lat);
1947 			}
1948 		}
1949 		if (l_len > the_len) {
1950 			/* Trim the end round bytes off  too */
1951 			m_adj(dmbuf, -(l_len - the_len));
1952 		}
1953 	}
1954 	if (dmbuf == NULL) {
1955 		SCTP_STAT_INCR(sctps_nomem);
1956 		return (0);
1957 	}
1958 	/*
1959 	 * Now no matter what we need a control, get one if we don't have
1960 	 * one (we may have gotten it above when we found the message was
1961 	 * fragmented
1962 	 */
1963 	if (control == NULL) {
1964 		sctp_alloc_a_readq(stcb, control);
1965 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1966 		    protocol_id,
1967 		    strmno, msg_id,
1968 		    chunk_flags,
1969 		    NULL, fsn, msg_id);
1970 		if (control == NULL) {
1971 			SCTP_STAT_INCR(sctps_nomem);
1972 			return (0);
1973 		}
1974 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1975 			control->data = dmbuf;
1976 			control->tail_mbuf = NULL;
1977 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1978 			control->top_fsn = control->fsn_included = fsn;
1979 		}
1980 		created_control = 1;
1981 	}
1982 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1983 	    chunk_flags, ordered, msg_id, control);
1984 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1985 	    TAILQ_EMPTY(&asoc->resetHead) &&
1986 	    ((ordered == 0) ||
1987 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1988 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1989 		/* Candidate for express delivery */
1990 		/*
1991 		 * Its not fragmented, No PD-API is up, Nothing in the
1992 		 * delivery queue, Its un-ordered OR ordered and the next to
1993 		 * deliver AND nothing else is stuck on the stream queue,
1994 		 * And there is room for it in the socket buffer. Lets just
1995 		 * stuff it up the buffer....
1996 		 */
1997 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1998 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1999 			asoc->highest_tsn_inside_nr_map = tsn;
2000 		}
2001 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
2002 		    control, msg_id);
2003 
2004 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2005 		    control, &stcb->sctp_socket->so_rcv,
2006 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2007 
2008 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2009 			/* for ordered, bump what we delivered */
2010 			strm->last_sequence_delivered++;
2011 		}
2012 		SCTP_STAT_INCR(sctps_recvexpress);
2013 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2014 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
2015 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2016 		}
2017 		control = NULL;
2018 		goto finish_express_del;
2019 	}
2020 	/* Now will we need a chunk too? */
2021 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2022 		sctp_alloc_a_chunk(stcb, chk);
2023 		if (chk == NULL) {
2024 			/* No memory so we drop the chunk */
2025 			SCTP_STAT_INCR(sctps_nomem);
2026 			if (last_chunk == 0) {
2027 				/* we copied it, free the copy */
2028 				sctp_m_freem(dmbuf);
2029 			}
2030 			return (0);
2031 		}
2032 		chk->rec.data.TSN_seq = tsn;
2033 		chk->no_fr_allowed = 0;
2034 		chk->rec.data.fsn_num = fsn;
2035 		chk->rec.data.stream_seq = msg_id;
2036 		chk->rec.data.stream_number = strmno;
2037 		chk->rec.data.payloadtype = protocol_id;
2038 		chk->rec.data.context = stcb->asoc.context;
2039 		chk->rec.data.doing_fast_retransmit = 0;
2040 		chk->rec.data.rcv_flags = chunk_flags;
2041 		chk->asoc = asoc;
2042 		chk->send_size = the_len;
2043 		chk->whoTo = net;
2044 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2045 		    chk,
2046 		    control, msg_id);
2047 		atomic_add_int(&net->ref_count, 1);
2048 		chk->data = dmbuf;
2049 	}
2050 	/* Set the appropriate TSN mark */
2051 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2052 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2053 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2054 			asoc->highest_tsn_inside_nr_map = tsn;
2055 		}
2056 	} else {
2057 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2058 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2059 			asoc->highest_tsn_inside_map = tsn;
2060 		}
2061 	}
2062 	/* Now is it complete (i.e. not fragmented)? */
2063 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2064 		/*
2065 		 * Special check for when streams are resetting. We could be
2066 		 * more smart about this and check the actual stream to see
2067 		 * if it is not being reset.. that way we would not create a
2068 		 * HOLB when amongst streams being reset and those not being
2069 		 * reset.
2070 		 *
2071 		 */
2072 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2073 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2074 			/*
2075 			 * yep its past where we need to reset... go ahead
2076 			 * and queue it.
2077 			 */
2078 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2079 				/* first one on */
2080 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2081 			} else {
2082 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2083 				unsigned char inserted = 0;
2084 
2085 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2086 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2087 
2088 						continue;
2089 					} else {
2090 						/* found it */
2091 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2092 						inserted = 1;
2093 						break;
2094 					}
2095 				}
2096 				if (inserted == 0) {
2097 					/*
2098 					 * must be put at end, use prevP
2099 					 * (all setup from loop) to setup
2100 					 * nextP.
2101 					 */
2102 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2103 				}
2104 			}
2105 			goto finish_express_del;
2106 		}
2107 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2108 			/* queue directly into socket buffer */
2109 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2110 			    control, msg_id);
2111 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2112 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2113 			    control,
2114 			    &stcb->sctp_socket->so_rcv, 1,
2115 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2116 
2117 		} else {
2118 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2119 			    msg_id);
2120 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2121 			if (*abort_flag) {
2122 				if (last_chunk) {
2123 					*m = NULL;
2124 				}
2125 				return (0);
2126 			}
2127 		}
2128 		goto finish_express_del;
2129 	}
2130 	/* If we reach here its a reassembly */
2131 	need_reasm_check = 1;
2132 	SCTPDBG(SCTP_DEBUG_XXX,
2133 	    "Queue data to stream for reasm control: %p msg_id: %u\n",
2134 	    control, msg_id);
2135 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2136 	if (*abort_flag) {
2137 		/*
2138 		 * the assoc is now gone and chk was put onto the reasm
2139 		 * queue, which has all been freed.
2140 		 */
2141 		if (last_chunk) {
2142 			*m = NULL;
2143 		}
2144 		return (0);
2145 	}
2146 finish_express_del:
2147 	/* Here we tidy up things */
2148 	if (tsn == (asoc->cumulative_tsn + 1)) {
2149 		/* Update cum-ack */
2150 		asoc->cumulative_tsn = tsn;
2151 	}
2152 	if (last_chunk) {
2153 		*m = NULL;
2154 	}
2155 	if (ordered) {
2156 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2157 	} else {
2158 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2159 	}
2160 	SCTP_STAT_INCR(sctps_recvdata);
2161 	/* Set it present please */
2162 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2163 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2164 	}
2165 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2166 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2167 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2168 	}
2169 	/* check the special flag for stream resets */
2170 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2171 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2172 		/*
2173 		 * we have finished working through the backlogged TSN's now
2174 		 * time to reset streams. 1: call reset function. 2: free
2175 		 * pending_reply space 3: distribute any chunks in
2176 		 * pending_reply_queue.
2177 		 */
2178 		struct sctp_queued_to_read *ctl, *nctl;
2179 
2180 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2181 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2182 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2183 		SCTP_FREE(liste, SCTP_M_STRESET);
2184 		/* sa_ignore FREED_MEMORY */
2185 		liste = TAILQ_FIRST(&asoc->resetHead);
2186 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2187 			/* All can be removed */
2188 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2189 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2190 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2191 				if (*abort_flag) {
2192 					return (0);
2193 				}
2194 			}
2195 		} else {
2196 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2197 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2198 					break;
2199 				}
2200 				/*
2201 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2202 				 * process it which is the NOT of
2203 				 * ctl->sinfo_tsn > liste->tsn
2204 				 */
2205 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2206 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2207 				if (*abort_flag) {
2208 					return (0);
2209 				}
2210 			}
2211 		}
2212 		/*
2213 		 * Now service re-assembly to pick up anything that has been
2214 		 * held on reassembly queue?
2215 		 */
2216 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2217 		need_reasm_check = 0;
2218 	}
2219 	if (need_reasm_check) {
2220 		/* Another one waits ? */
2221 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2222 	}
2223 	return (1);
2224 }
2225 
2226 static const int8_t sctp_map_lookup_tab[256] = {
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 4,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 5,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 4,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 6,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 4,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 5,
2239 	0, 1, 0, 2, 0, 1, 0, 3,
2240 	0, 1, 0, 2, 0, 1, 0, 4,
2241 	0, 1, 0, 2, 0, 1, 0, 3,
2242 	0, 1, 0, 2, 0, 1, 0, 7,
2243 	0, 1, 0, 2, 0, 1, 0, 3,
2244 	0, 1, 0, 2, 0, 1, 0, 4,
2245 	0, 1, 0, 2, 0, 1, 0, 3,
2246 	0, 1, 0, 2, 0, 1, 0, 5,
2247 	0, 1, 0, 2, 0, 1, 0, 3,
2248 	0, 1, 0, 2, 0, 1, 0, 4,
2249 	0, 1, 0, 2, 0, 1, 0, 3,
2250 	0, 1, 0, 2, 0, 1, 0, 6,
2251 	0, 1, 0, 2, 0, 1, 0, 3,
2252 	0, 1, 0, 2, 0, 1, 0, 4,
2253 	0, 1, 0, 2, 0, 1, 0, 3,
2254 	0, 1, 0, 2, 0, 1, 0, 5,
2255 	0, 1, 0, 2, 0, 1, 0, 3,
2256 	0, 1, 0, 2, 0, 1, 0, 4,
2257 	0, 1, 0, 2, 0, 1, 0, 3,
2258 	0, 1, 0, 2, 0, 1, 0, 8
2259 };
2260 
2261 
2262 void
2263 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2264 {
2265 	/*
2266 	 * Now we also need to check the mapping array in a couple of ways.
2267 	 * 1) Did we move the cum-ack point?
2268 	 *
2269 	 * When you first glance at this you might think that all entries that
2270 	 * make up the position of the cum-ack would be in the nr-mapping
2271 	 * array only.. i.e. things up to the cum-ack are always
2272 	 * deliverable. Thats true with one exception, when its a fragmented
2273 	 * message we may not deliver the data until some threshold (or all
2274 	 * of it) is in place. So we must OR the nr_mapping_array and
2275 	 * mapping_array to get a true picture of the cum-ack.
2276 	 */
2277 	struct sctp_association *asoc;
2278 	int at;
2279 	uint8_t val;
2280 	int slide_from, slide_end, lgap, distance;
2281 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2282 
2283 	asoc = &stcb->asoc;
2284 
2285 	old_cumack = asoc->cumulative_tsn;
2286 	old_base = asoc->mapping_array_base_tsn;
2287 	old_highest = asoc->highest_tsn_inside_map;
2288 	/*
2289 	 * We could probably improve this a small bit by calculating the
2290 	 * offset of the current cum-ack as the starting point.
2291 	 */
2292 	at = 0;
2293 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2294 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2295 		if (val == 0xff) {
2296 			at += 8;
2297 		} else {
2298 			/* there is a 0 bit */
2299 			at += sctp_map_lookup_tab[val];
2300 			break;
2301 		}
2302 	}
2303 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2304 
2305 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2306 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2307 #ifdef INVARIANTS
2308 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2309 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2310 #else
2311 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2312 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2313 		sctp_print_mapping_array(asoc);
2314 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2315 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2316 		}
2317 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2318 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2319 #endif
2320 	}
2321 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2322 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2323 	} else {
2324 		highest_tsn = asoc->highest_tsn_inside_map;
2325 	}
2326 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2327 		/* The complete array was completed by a single FR */
2328 		/* highest becomes the cum-ack */
2329 		int clr;
2330 
2331 #ifdef INVARIANTS
2332 		unsigned int i;
2333 
2334 #endif
2335 
2336 		/* clear the array */
2337 		clr = ((at + 7) >> 3);
2338 		if (clr > asoc->mapping_array_size) {
2339 			clr = asoc->mapping_array_size;
2340 		}
2341 		memset(asoc->mapping_array, 0, clr);
2342 		memset(asoc->nr_mapping_array, 0, clr);
2343 #ifdef INVARIANTS
2344 		for (i = 0; i < asoc->mapping_array_size; i++) {
2345 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2346 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2347 				sctp_print_mapping_array(asoc);
2348 			}
2349 		}
2350 #endif
2351 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2352 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2353 	} else if (at >= 8) {
2354 		/* we can slide the mapping array down */
2355 		/* slide_from holds where we hit the first NON 0xff byte */
2356 
2357 		/*
2358 		 * now calculate the ceiling of the move using our highest
2359 		 * TSN value
2360 		 */
2361 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2362 		slide_end = (lgap >> 3);
2363 		if (slide_end < slide_from) {
2364 			sctp_print_mapping_array(asoc);
2365 #ifdef INVARIANTS
2366 			panic("impossible slide");
2367 #else
2368 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2369 			    lgap, slide_end, slide_from, at);
2370 			return;
2371 #endif
2372 		}
2373 		if (slide_end > asoc->mapping_array_size) {
2374 #ifdef INVARIANTS
2375 			panic("would overrun buffer");
2376 #else
2377 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2378 			    asoc->mapping_array_size, slide_end);
2379 			slide_end = asoc->mapping_array_size;
2380 #endif
2381 		}
2382 		distance = (slide_end - slide_from) + 1;
2383 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2384 			sctp_log_map(old_base, old_cumack, old_highest,
2385 			    SCTP_MAP_PREPARE_SLIDE);
2386 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2387 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2388 		}
2389 		if (distance + slide_from > asoc->mapping_array_size ||
2390 		    distance < 0) {
2391 			/*
2392 			 * Here we do NOT slide forward the array so that
2393 			 * hopefully when more data comes in to fill it up
2394 			 * we will be able to slide it forward. Really I
2395 			 * don't think this should happen :-0
2396 			 */
2397 
2398 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2399 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2400 				    (uint32_t) asoc->mapping_array_size,
2401 				    SCTP_MAP_SLIDE_NONE);
2402 			}
2403 		} else {
2404 			int ii;
2405 
2406 			for (ii = 0; ii < distance; ii++) {
2407 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2408 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2409 
2410 			}
2411 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2412 				asoc->mapping_array[ii] = 0;
2413 				asoc->nr_mapping_array[ii] = 0;
2414 			}
2415 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2416 				asoc->highest_tsn_inside_map += (slide_from << 3);
2417 			}
2418 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2419 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2420 			}
2421 			asoc->mapping_array_base_tsn += (slide_from << 3);
2422 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2423 				sctp_log_map(asoc->mapping_array_base_tsn,
2424 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2425 				    SCTP_MAP_SLIDE_RESULT);
2426 			}
2427 		}
2428 	}
2429 }
2430 
2431 void
2432 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2433 {
2434 	struct sctp_association *asoc;
2435 	uint32_t highest_tsn;
2436 
2437 	asoc = &stcb->asoc;
2438 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2439 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2440 	} else {
2441 		highest_tsn = asoc->highest_tsn_inside_map;
2442 	}
2443 
2444 	/*
2445 	 * Now we need to see if we need to queue a sack or just start the
2446 	 * timer (if allowed).
2447 	 */
2448 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2449 		/*
2450 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2451 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2452 		 * SACK
2453 		 */
2454 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2455 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2456 			    stcb->sctp_ep, stcb, NULL,
2457 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2458 		}
2459 		sctp_send_shutdown(stcb,
2460 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2461 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2462 	} else {
2463 		int is_a_gap;
2464 
2465 		/* is there a gap now ? */
2466 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2467 
2468 		/*
2469 		 * CMT DAC algorithm: increase number of packets received
2470 		 * since last ack
2471 		 */
2472 		stcb->asoc.cmt_dac_pkts_rcvd++;
2473 
2474 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2475 							 * SACK */
2476 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2477 							 * longer is one */
2478 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2479 		    (is_a_gap) ||	/* is still a gap */
2480 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2481 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2482 		    ) {
2483 
2484 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2485 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2486 			    (stcb->asoc.send_sack == 0) &&
2487 			    (stcb->asoc.numduptsns == 0) &&
2488 			    (stcb->asoc.delayed_ack) &&
2489 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2490 
2491 				/*
2492 				 * CMT DAC algorithm: With CMT, delay acks
2493 				 * even in the face of
2494 				 *
2495 				 * reordering. Therefore, if acks that do not
2496 				 * have to be sent because of the above
2497 				 * reasons, will be delayed. That is, acks
2498 				 * that would have been sent due to gap
2499 				 * reports will be delayed with DAC. Start
2500 				 * the delayed ack timer.
2501 				 */
2502 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2503 				    stcb->sctp_ep, stcb, NULL);
2504 			} else {
2505 				/*
2506 				 * Ok we must build a SACK since the timer
2507 				 * is pending, we got our first packet OR
2508 				 * there are gaps or duplicates.
2509 				 */
2510 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2511 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2512 			}
2513 		} else {
2514 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2515 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2516 				    stcb->sctp_ep, stcb, NULL);
2517 			}
2518 		}
2519 	}
2520 }
2521 
2522 int
2523 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2524     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2525     struct sctp_nets *net, uint32_t * high_tsn)
2526 {
2527 	struct sctp_chunkhdr *ch, chunk_buf;
2528 	struct sctp_association *asoc;
2529 	int num_chunks = 0;	/* number of control chunks processed */
2530 	int stop_proc = 0;
2531 	int chk_length, break_flag, last_chunk;
2532 	int abort_flag = 0, was_a_gap;
2533 	struct mbuf *m;
2534 	uint32_t highest_tsn;
2535 
2536 	/* set the rwnd */
2537 	sctp_set_rwnd(stcb, &stcb->asoc);
2538 
2539 	m = *mm;
2540 	SCTP_TCB_LOCK_ASSERT(stcb);
2541 	asoc = &stcb->asoc;
2542 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2543 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2544 	} else {
2545 		highest_tsn = asoc->highest_tsn_inside_map;
2546 	}
2547 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2548 	/*
2549 	 * setup where we got the last DATA packet from for any SACK that
2550 	 * may need to go out. Don't bump the net. This is done ONLY when a
2551 	 * chunk is assigned.
2552 	 */
2553 	asoc->last_data_chunk_from = net;
2554 
2555 	/*-
2556 	 * Now before we proceed we must figure out if this is a wasted
2557 	 * cluster... i.e. it is a small packet sent in and yet the driver
2558 	 * underneath allocated a full cluster for it. If so we must copy it
2559 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2560 	 * with cluster starvation. Note for __Panda__ we don't do this
2561 	 * since it has clusters all the way down to 64 bytes.
2562 	 */
2563 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2564 		/* we only handle mbufs that are singletons.. not chains */
2565 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2566 		if (m) {
2567 			/* ok lets see if we can copy the data up */
2568 			caddr_t *from, *to;
2569 
2570 			/* get the pointers and copy */
2571 			to = mtod(m, caddr_t *);
2572 			from = mtod((*mm), caddr_t *);
2573 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2574 			/* copy the length and free up the old */
2575 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2576 			sctp_m_freem(*mm);
2577 			/* success, back copy */
2578 			*mm = m;
2579 		} else {
2580 			/* We are in trouble in the mbuf world .. yikes */
2581 			m = *mm;
2582 		}
2583 	}
2584 	/* get pointer to the first chunk header */
2585 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2586 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2587 	if (ch == NULL) {
2588 		return (1);
2589 	}
2590 	/*
2591 	 * process all DATA chunks...
2592 	 */
2593 	*high_tsn = asoc->cumulative_tsn;
2594 	break_flag = 0;
2595 	asoc->data_pkts_seen++;
2596 	while (stop_proc == 0) {
2597 		/* validate chunk length */
2598 		chk_length = ntohs(ch->chunk_length);
2599 		if (length - *offset < chk_length) {
2600 			/* all done, mutulated chunk */
2601 			stop_proc = 1;
2602 			continue;
2603 		}
2604 		if ((asoc->idata_supported == 1) &&
2605 		    (ch->chunk_type == SCTP_DATA)) {
2606 			struct mbuf *op_err;
2607 			char msg[SCTP_DIAG_INFO_LEN];
2608 
2609 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2610 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2611 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2612 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2613 			return (2);
2614 		}
2615 		if ((asoc->idata_supported == 0) &&
2616 		    (ch->chunk_type == SCTP_IDATA)) {
2617 			struct mbuf *op_err;
2618 			char msg[SCTP_DIAG_INFO_LEN];
2619 
2620 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2621 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2622 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2623 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2624 			return (2);
2625 		}
2626 		if ((ch->chunk_type == SCTP_DATA) ||
2627 		    (ch->chunk_type == SCTP_IDATA)) {
2628 			int clen;
2629 
2630 			if (ch->chunk_type == SCTP_DATA) {
2631 				clen = sizeof(struct sctp_data_chunk);
2632 			} else {
2633 				clen = sizeof(struct sctp_idata_chunk);
2634 			}
2635 			if (chk_length < clen) {
2636 				/*
2637 				 * Need to send an abort since we had a
2638 				 * invalid data chunk.
2639 				 */
2640 				struct mbuf *op_err;
2641 				char msg[SCTP_DIAG_INFO_LEN];
2642 
2643 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2644 				    chk_length);
2645 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2646 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2647 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2648 				return (2);
2649 			}
2650 #ifdef SCTP_AUDITING_ENABLED
2651 			sctp_audit_log(0xB1, 0);
2652 #endif
2653 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2654 				last_chunk = 1;
2655 			} else {
2656 				last_chunk = 0;
2657 			}
2658 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2659 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2660 			    last_chunk, ch->chunk_type)) {
2661 				num_chunks++;
2662 			}
2663 			if (abort_flag)
2664 				return (2);
2665 
2666 			if (break_flag) {
2667 				/*
2668 				 * Set because of out of rwnd space and no
2669 				 * drop rep space left.
2670 				 */
2671 				stop_proc = 1;
2672 				continue;
2673 			}
2674 		} else {
2675 			/* not a data chunk in the data region */
2676 			switch (ch->chunk_type) {
2677 			case SCTP_INITIATION:
2678 			case SCTP_INITIATION_ACK:
2679 			case SCTP_SELECTIVE_ACK:
2680 			case SCTP_NR_SELECTIVE_ACK:
2681 			case SCTP_HEARTBEAT_REQUEST:
2682 			case SCTP_HEARTBEAT_ACK:
2683 			case SCTP_ABORT_ASSOCIATION:
2684 			case SCTP_SHUTDOWN:
2685 			case SCTP_SHUTDOWN_ACK:
2686 			case SCTP_OPERATION_ERROR:
2687 			case SCTP_COOKIE_ECHO:
2688 			case SCTP_COOKIE_ACK:
2689 			case SCTP_ECN_ECHO:
2690 			case SCTP_ECN_CWR:
2691 			case SCTP_SHUTDOWN_COMPLETE:
2692 			case SCTP_AUTHENTICATION:
2693 			case SCTP_ASCONF_ACK:
2694 			case SCTP_PACKET_DROPPED:
2695 			case SCTP_STREAM_RESET:
2696 			case SCTP_FORWARD_CUM_TSN:
2697 			case SCTP_ASCONF:
2698 				{
2699 					/*
2700 					 * Now, what do we do with KNOWN
2701 					 * chunks that are NOT in the right
2702 					 * place?
2703 					 *
2704 					 * For now, I do nothing but ignore
2705 					 * them. We may later want to add
2706 					 * sysctl stuff to switch out and do
2707 					 * either an ABORT() or possibly
2708 					 * process them.
2709 					 */
2710 					struct mbuf *op_err;
2711 					char msg[SCTP_DIAG_INFO_LEN];
2712 
2713 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2714 					    ch->chunk_type);
2715 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2716 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2717 					return (2);
2718 				}
2719 			default:
2720 				/* unknown chunk type, use bit rules */
2721 				if (ch->chunk_type & 0x40) {
2722 					/* Add a error report to the queue */
2723 					struct mbuf *op_err;
2724 					struct sctp_gen_error_cause *cause;
2725 
2726 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2727 					    0, M_NOWAIT, 1, MT_DATA);
2728 					if (op_err != NULL) {
2729 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2730 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2731 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2732 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2733 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2734 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2735 							sctp_queue_op_err(stcb, op_err);
2736 						} else {
2737 							sctp_m_freem(op_err);
2738 						}
2739 					}
2740 				}
2741 				if ((ch->chunk_type & 0x80) == 0) {
2742 					/* discard the rest of this packet */
2743 					stop_proc = 1;
2744 				}	/* else skip this bad chunk and
2745 					 * continue... */
2746 				break;
2747 			}	/* switch of chunk type */
2748 		}
2749 		*offset += SCTP_SIZE32(chk_length);
2750 		if ((*offset >= length) || stop_proc) {
2751 			/* no more data left in the mbuf chain */
2752 			stop_proc = 1;
2753 			continue;
2754 		}
2755 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2756 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2757 		if (ch == NULL) {
2758 			*offset = length;
2759 			stop_proc = 1;
2760 			continue;
2761 		}
2762 	}
2763 	if (break_flag) {
2764 		/*
2765 		 * we need to report rwnd overrun drops.
2766 		 */
2767 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2768 	}
2769 	if (num_chunks) {
2770 		/*
2771 		 * Did we get data, if so update the time for auto-close and
2772 		 * give peer credit for being alive.
2773 		 */
2774 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2775 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2776 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2777 			    stcb->asoc.overall_error_count,
2778 			    0,
2779 			    SCTP_FROM_SCTP_INDATA,
2780 			    __LINE__);
2781 		}
2782 		stcb->asoc.overall_error_count = 0;
2783 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2784 	}
2785 	/* now service all of the reassm queue if needed */
2786 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2787 		/* Assure that we ack right away */
2788 		stcb->asoc.send_sack = 1;
2789 	}
2790 	/* Start a sack timer or QUEUE a SACK for sending */
2791 	sctp_sack_check(stcb, was_a_gap);
2792 	return (0);
2793 }
2794 
2795 static int
2796 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2797     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2798     int *num_frs,
2799     uint32_t * biggest_newly_acked_tsn,
2800     uint32_t * this_sack_lowest_newack,
2801     int *rto_ok)
2802 {
2803 	struct sctp_tmit_chunk *tp1;
2804 	unsigned int theTSN;
2805 	int j, wake_him = 0, circled = 0;
2806 
2807 	/* Recover the tp1 we last saw */
2808 	tp1 = *p_tp1;
2809 	if (tp1 == NULL) {
2810 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2811 	}
2812 	for (j = frag_strt; j <= frag_end; j++) {
2813 		theTSN = j + last_tsn;
2814 		while (tp1) {
2815 			if (tp1->rec.data.doing_fast_retransmit)
2816 				(*num_frs) += 1;
2817 
2818 			/*-
2819 			 * CMT: CUCv2 algorithm. For each TSN being
2820 			 * processed from the sent queue, track the
2821 			 * next expected pseudo-cumack, or
2822 			 * rtx_pseudo_cumack, if required. Separate
2823 			 * cumack trackers for first transmissions,
2824 			 * and retransmissions.
2825 			 */
2826 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2827 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2828 			    (tp1->snd_count == 1)) {
2829 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2830 				tp1->whoTo->find_pseudo_cumack = 0;
2831 			}
2832 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2834 			    (tp1->snd_count > 1)) {
2835 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2836 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2837 			}
2838 			if (tp1->rec.data.TSN_seq == theTSN) {
2839 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2840 					/*-
2841 					 * must be held until
2842 					 * cum-ack passes
2843 					 */
2844 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2845 						/*-
2846 						 * If it is less than RESEND, it is
2847 						 * now no-longer in flight.
2848 						 * Higher values may already be set
2849 						 * via previous Gap Ack Blocks...
2850 						 * i.e. ACKED or RESEND.
2851 						 */
2852 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2853 						    *biggest_newly_acked_tsn)) {
2854 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2855 						}
2856 						/*-
2857 						 * CMT: SFR algo (and HTNA) - set
2858 						 * saw_newack to 1 for dest being
2859 						 * newly acked. update
2860 						 * this_sack_highest_newack if
2861 						 * appropriate.
2862 						 */
2863 						if (tp1->rec.data.chunk_was_revoked == 0)
2864 							tp1->whoTo->saw_newack = 1;
2865 
2866 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2867 						    tp1->whoTo->this_sack_highest_newack)) {
2868 							tp1->whoTo->this_sack_highest_newack =
2869 							    tp1->rec.data.TSN_seq;
2870 						}
2871 						/*-
2872 						 * CMT DAC algo: also update
2873 						 * this_sack_lowest_newack
2874 						 */
2875 						if (*this_sack_lowest_newack == 0) {
2876 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2877 								sctp_log_sack(*this_sack_lowest_newack,
2878 								    last_tsn,
2879 								    tp1->rec.data.TSN_seq,
2880 								    0,
2881 								    0,
2882 								    SCTP_LOG_TSN_ACKED);
2883 							}
2884 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2885 						}
2886 						/*-
2887 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2888 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2889 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2890 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2891 						 * Separate pseudo_cumack trackers for first transmissions and
2892 						 * retransmissions.
2893 						 */
2894 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2895 							if (tp1->rec.data.chunk_was_revoked == 0) {
2896 								tp1->whoTo->new_pseudo_cumack = 1;
2897 							}
2898 							tp1->whoTo->find_pseudo_cumack = 1;
2899 						}
2900 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2901 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2902 						}
2903 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2904 							if (tp1->rec.data.chunk_was_revoked == 0) {
2905 								tp1->whoTo->new_pseudo_cumack = 1;
2906 							}
2907 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2908 						}
2909 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2910 							sctp_log_sack(*biggest_newly_acked_tsn,
2911 							    last_tsn,
2912 							    tp1->rec.data.TSN_seq,
2913 							    frag_strt,
2914 							    frag_end,
2915 							    SCTP_LOG_TSN_ACKED);
2916 						}
2917 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2918 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2919 							    tp1->whoTo->flight_size,
2920 							    tp1->book_size,
2921 							    (uint32_t) (uintptr_t) tp1->whoTo,
2922 							    tp1->rec.data.TSN_seq);
2923 						}
2924 						sctp_flight_size_decrease(tp1);
2925 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2926 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2927 							    tp1);
2928 						}
2929 						sctp_total_flight_decrease(stcb, tp1);
2930 
2931 						tp1->whoTo->net_ack += tp1->send_size;
2932 						if (tp1->snd_count < 2) {
2933 							/*-
2934 							 * True non-retransmited chunk
2935 							 */
2936 							tp1->whoTo->net_ack2 += tp1->send_size;
2937 
2938 							/*-
2939 							 * update RTO too ?
2940 							 */
2941 							if (tp1->do_rtt) {
2942 								if (*rto_ok) {
2943 									tp1->whoTo->RTO =
2944 									    sctp_calculate_rto(stcb,
2945 									    &stcb->asoc,
2946 									    tp1->whoTo,
2947 									    &tp1->sent_rcv_time,
2948 									    sctp_align_safe_nocopy,
2949 									    SCTP_RTT_FROM_DATA);
2950 									*rto_ok = 0;
2951 								}
2952 								if (tp1->whoTo->rto_needed == 0) {
2953 									tp1->whoTo->rto_needed = 1;
2954 								}
2955 								tp1->do_rtt = 0;
2956 							}
2957 						}
2958 					}
2959 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2960 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2961 						    stcb->asoc.this_sack_highest_gap)) {
2962 							stcb->asoc.this_sack_highest_gap =
2963 							    tp1->rec.data.TSN_seq;
2964 						}
2965 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2966 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2967 #ifdef SCTP_AUDITING_ENABLED
2968 							sctp_audit_log(0xB2,
2969 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2970 #endif
2971 						}
2972 					}
2973 					/*-
2974 					 * All chunks NOT UNSENT fall through here and are marked
2975 					 * (leave PR-SCTP ones that are to skip alone though)
2976 					 */
2977 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2978 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2979 						tp1->sent = SCTP_DATAGRAM_MARKED;
2980 					}
2981 					if (tp1->rec.data.chunk_was_revoked) {
2982 						/* deflate the cwnd */
2983 						tp1->whoTo->cwnd -= tp1->book_size;
2984 						tp1->rec.data.chunk_was_revoked = 0;
2985 					}
2986 					/* NR Sack code here */
2987 					if (nr_sacking &&
2988 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2989 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2990 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2991 #ifdef INVARIANTS
2992 						} else {
2993 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2994 #endif
2995 						}
2996 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2997 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2998 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2999 							stcb->asoc.trigger_reset = 1;
3000 						}
3001 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3002 						if (tp1->data) {
3003 							/*
3004 							 * sa_ignore
3005 							 * NO_NULL_CHK
3006 							 */
3007 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3008 							sctp_m_freem(tp1->data);
3009 							tp1->data = NULL;
3010 						}
3011 						wake_him++;
3012 					}
3013 				}
3014 				break;
3015 			}	/* if (tp1->TSN_seq == theTSN) */
3016 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3017 				break;
3018 			}
3019 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3020 			if ((tp1 == NULL) && (circled == 0)) {
3021 				circled++;
3022 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3023 			}
3024 		}		/* end while (tp1) */
3025 		if (tp1 == NULL) {
3026 			circled = 0;
3027 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3028 		}
3029 		/* In case the fragments were not in order we must reset */
3030 	}			/* end for (j = fragStart */
3031 	*p_tp1 = tp1;
3032 	return (wake_him);	/* Return value only used for nr-sack */
3033 }
3034 
3035 
3036 static int
3037 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3038     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3039     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3040     int num_seg, int num_nr_seg, int *rto_ok)
3041 {
3042 	struct sctp_gap_ack_block *frag, block;
3043 	struct sctp_tmit_chunk *tp1;
3044 	int i;
3045 	int num_frs = 0;
3046 	int chunk_freed;
3047 	int non_revocable;
3048 	uint16_t frag_strt, frag_end, prev_frag_end;
3049 
3050 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3051 	prev_frag_end = 0;
3052 	chunk_freed = 0;
3053 
3054 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3055 		if (i == num_seg) {
3056 			prev_frag_end = 0;
3057 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3058 		}
3059 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3060 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3061 		*offset += sizeof(block);
3062 		if (frag == NULL) {
3063 			return (chunk_freed);
3064 		}
3065 		frag_strt = ntohs(frag->start);
3066 		frag_end = ntohs(frag->end);
3067 
3068 		if (frag_strt > frag_end) {
3069 			/* This gap report is malformed, skip it. */
3070 			continue;
3071 		}
3072 		if (frag_strt <= prev_frag_end) {
3073 			/* This gap report is not in order, so restart. */
3074 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3075 		}
3076 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3077 			*biggest_tsn_acked = last_tsn + frag_end;
3078 		}
3079 		if (i < num_seg) {
3080 			non_revocable = 0;
3081 		} else {
3082 			non_revocable = 1;
3083 		}
3084 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3085 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3086 		    this_sack_lowest_newack, rto_ok)) {
3087 			chunk_freed = 1;
3088 		}
3089 		prev_frag_end = frag_end;
3090 	}
3091 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3092 		if (num_frs)
3093 			sctp_log_fr(*biggest_tsn_acked,
3094 			    *biggest_newly_acked_tsn,
3095 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3096 	}
3097 	return (chunk_freed);
3098 }
3099 
3100 static void
3101 sctp_check_for_revoked(struct sctp_tcb *stcb,
3102     struct sctp_association *asoc, uint32_t cumack,
3103     uint32_t biggest_tsn_acked)
3104 {
3105 	struct sctp_tmit_chunk *tp1;
3106 
3107 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3108 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3109 			/*
3110 			 * ok this guy is either ACK or MARKED. If it is
3111 			 * ACKED it has been previously acked but not this
3112 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3113 			 * again.
3114 			 */
3115 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3116 				break;
3117 			}
3118 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3119 				/* it has been revoked */
3120 				tp1->sent = SCTP_DATAGRAM_SENT;
3121 				tp1->rec.data.chunk_was_revoked = 1;
3122 				/*
3123 				 * We must add this stuff back in to assure
3124 				 * timers and such get started.
3125 				 */
3126 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3127 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3128 					    tp1->whoTo->flight_size,
3129 					    tp1->book_size,
3130 					    (uint32_t) (uintptr_t) tp1->whoTo,
3131 					    tp1->rec.data.TSN_seq);
3132 				}
3133 				sctp_flight_size_increase(tp1);
3134 				sctp_total_flight_increase(stcb, tp1);
3135 				/*
3136 				 * We inflate the cwnd to compensate for our
3137 				 * artificial inflation of the flight_size.
3138 				 */
3139 				tp1->whoTo->cwnd += tp1->book_size;
3140 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3141 					sctp_log_sack(asoc->last_acked_seq,
3142 					    cumack,
3143 					    tp1->rec.data.TSN_seq,
3144 					    0,
3145 					    0,
3146 					    SCTP_LOG_TSN_REVOKED);
3147 				}
3148 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3149 				/* it has been re-acked in this SACK */
3150 				tp1->sent = SCTP_DATAGRAM_ACKED;
3151 			}
3152 		}
3153 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3154 			break;
3155 	}
3156 }
3157 
3158 
3159 static void
3160 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3161     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3162 {
3163 	struct sctp_tmit_chunk *tp1;
3164 	int strike_flag = 0;
3165 	struct timeval now;
3166 	int tot_retrans = 0;
3167 	uint32_t sending_seq;
3168 	struct sctp_nets *net;
3169 	int num_dests_sacked = 0;
3170 
3171 	/*
3172 	 * select the sending_seq, this is either the next thing ready to be
3173 	 * sent but not transmitted, OR, the next seq we assign.
3174 	 */
3175 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3176 	if (tp1 == NULL) {
3177 		sending_seq = asoc->sending_seq;
3178 	} else {
3179 		sending_seq = tp1->rec.data.TSN_seq;
3180 	}
3181 
3182 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3183 	if ((asoc->sctp_cmt_on_off > 0) &&
3184 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3185 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3186 			if (net->saw_newack)
3187 				num_dests_sacked++;
3188 		}
3189 	}
3190 	if (stcb->asoc.prsctp_supported) {
3191 		(void)SCTP_GETTIME_TIMEVAL(&now);
3192 	}
3193 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3194 		strike_flag = 0;
3195 		if (tp1->no_fr_allowed) {
3196 			/* this one had a timeout or something */
3197 			continue;
3198 		}
3199 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3200 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3201 				sctp_log_fr(biggest_tsn_newly_acked,
3202 				    tp1->rec.data.TSN_seq,
3203 				    tp1->sent,
3204 				    SCTP_FR_LOG_CHECK_STRIKE);
3205 		}
3206 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3207 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3208 			/* done */
3209 			break;
3210 		}
3211 		if (stcb->asoc.prsctp_supported) {
3212 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3213 				/* Is it expired? */
3214 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3215 					/* Yes so drop it */
3216 					if (tp1->data != NULL) {
3217 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3218 						    SCTP_SO_NOT_LOCKED);
3219 					}
3220 					continue;
3221 				}
3222 			}
3223 		}
3224 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3225 			/* we are beyond the tsn in the sack  */
3226 			break;
3227 		}
3228 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3229 			/* either a RESEND, ACKED, or MARKED */
3230 			/* skip */
3231 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3232 				/* Continue strikin FWD-TSN chunks */
3233 				tp1->rec.data.fwd_tsn_cnt++;
3234 			}
3235 			continue;
3236 		}
3237 		/*
3238 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3239 		 */
3240 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3241 			/*
3242 			 * No new acks were receieved for data sent to this
3243 			 * dest. Therefore, according to the SFR algo for
3244 			 * CMT, no data sent to this dest can be marked for
3245 			 * FR using this SACK.
3246 			 */
3247 			continue;
3248 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3249 		    tp1->whoTo->this_sack_highest_newack)) {
3250 			/*
3251 			 * CMT: New acks were receieved for data sent to
3252 			 * this dest. But no new acks were seen for data
3253 			 * sent after tp1. Therefore, according to the SFR
3254 			 * algo for CMT, tp1 cannot be marked for FR using
3255 			 * this SACK. This step covers part of the DAC algo
3256 			 * and the HTNA algo as well.
3257 			 */
3258 			continue;
3259 		}
3260 		/*
3261 		 * Here we check to see if we were have already done a FR
3262 		 * and if so we see if the biggest TSN we saw in the sack is
3263 		 * smaller than the recovery point. If so we don't strike
3264 		 * the tsn... otherwise we CAN strike the TSN.
3265 		 */
3266 		/*
3267 		 * @@@ JRI: Check for CMT if (accum_moved &&
3268 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3269 		 * 0)) {
3270 		 */
3271 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3272 			/*
3273 			 * Strike the TSN if in fast-recovery and cum-ack
3274 			 * moved.
3275 			 */
3276 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3277 				sctp_log_fr(biggest_tsn_newly_acked,
3278 				    tp1->rec.data.TSN_seq,
3279 				    tp1->sent,
3280 				    SCTP_FR_LOG_STRIKE_CHUNK);
3281 			}
3282 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3283 				tp1->sent++;
3284 			}
3285 			if ((asoc->sctp_cmt_on_off > 0) &&
3286 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3287 				/*
3288 				 * CMT DAC algorithm: If SACK flag is set to
3289 				 * 0, then lowest_newack test will not pass
3290 				 * because it would have been set to the
3291 				 * cumack earlier. If not already to be
3292 				 * rtx'd, If not a mixed sack and if tp1 is
3293 				 * not between two sacked TSNs, then mark by
3294 				 * one more. NOTE that we are marking by one
3295 				 * additional time since the SACK DAC flag
3296 				 * indicates that two packets have been
3297 				 * received after this missing TSN.
3298 				 */
3299 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3300 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3301 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3302 						sctp_log_fr(16 + num_dests_sacked,
3303 						    tp1->rec.data.TSN_seq,
3304 						    tp1->sent,
3305 						    SCTP_FR_LOG_STRIKE_CHUNK);
3306 					}
3307 					tp1->sent++;
3308 				}
3309 			}
3310 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3311 		    (asoc->sctp_cmt_on_off == 0)) {
3312 			/*
3313 			 * For those that have done a FR we must take
3314 			 * special consideration if we strike. I.e the
3315 			 * biggest_newly_acked must be higher than the
3316 			 * sending_seq at the time we did the FR.
3317 			 */
3318 			if (
3319 #ifdef SCTP_FR_TO_ALTERNATE
3320 			/*
3321 			 * If FR's go to new networks, then we must only do
3322 			 * this for singly homed asoc's. However if the FR's
3323 			 * go to the same network (Armando's work) then its
3324 			 * ok to FR multiple times.
3325 			 */
3326 			    (asoc->numnets < 2)
3327 #else
3328 			    (1)
3329 #endif
3330 			    ) {
3331 
3332 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3333 				    tp1->rec.data.fast_retran_tsn)) {
3334 					/*
3335 					 * Strike the TSN, since this ack is
3336 					 * beyond where things were when we
3337 					 * did a FR.
3338 					 */
3339 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3340 						sctp_log_fr(biggest_tsn_newly_acked,
3341 						    tp1->rec.data.TSN_seq,
3342 						    tp1->sent,
3343 						    SCTP_FR_LOG_STRIKE_CHUNK);
3344 					}
3345 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3346 						tp1->sent++;
3347 					}
3348 					strike_flag = 1;
3349 					if ((asoc->sctp_cmt_on_off > 0) &&
3350 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3351 						/*
3352 						 * CMT DAC algorithm: If
3353 						 * SACK flag is set to 0,
3354 						 * then lowest_newack test
3355 						 * will not pass because it
3356 						 * would have been set to
3357 						 * the cumack earlier. If
3358 						 * not already to be rtx'd,
3359 						 * If not a mixed sack and
3360 						 * if tp1 is not between two
3361 						 * sacked TSNs, then mark by
3362 						 * one more. NOTE that we
3363 						 * are marking by one
3364 						 * additional time since the
3365 						 * SACK DAC flag indicates
3366 						 * that two packets have
3367 						 * been received after this
3368 						 * missing TSN.
3369 						 */
3370 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3371 						    (num_dests_sacked == 1) &&
3372 						    SCTP_TSN_GT(this_sack_lowest_newack,
3373 						    tp1->rec.data.TSN_seq)) {
3374 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3375 								sctp_log_fr(32 + num_dests_sacked,
3376 								    tp1->rec.data.TSN_seq,
3377 								    tp1->sent,
3378 								    SCTP_FR_LOG_STRIKE_CHUNK);
3379 							}
3380 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3381 								tp1->sent++;
3382 							}
3383 						}
3384 					}
3385 				}
3386 			}
3387 			/*
3388 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3389 			 * algo covers HTNA.
3390 			 */
3391 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3392 		    biggest_tsn_newly_acked)) {
3393 			/*
3394 			 * We don't strike these: This is the  HTNA
3395 			 * algorithm i.e. we don't strike If our TSN is
3396 			 * larger than the Highest TSN Newly Acked.
3397 			 */
3398 			;
3399 		} else {
3400 			/* Strike the TSN */
3401 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3402 				sctp_log_fr(biggest_tsn_newly_acked,
3403 				    tp1->rec.data.TSN_seq,
3404 				    tp1->sent,
3405 				    SCTP_FR_LOG_STRIKE_CHUNK);
3406 			}
3407 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3408 				tp1->sent++;
3409 			}
3410 			if ((asoc->sctp_cmt_on_off > 0) &&
3411 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3412 				/*
3413 				 * CMT DAC algorithm: If SACK flag is set to
3414 				 * 0, then lowest_newack test will not pass
3415 				 * because it would have been set to the
3416 				 * cumack earlier. If not already to be
3417 				 * rtx'd, If not a mixed sack and if tp1 is
3418 				 * not between two sacked TSNs, then mark by
3419 				 * one more. NOTE that we are marking by one
3420 				 * additional time since the SACK DAC flag
3421 				 * indicates that two packets have been
3422 				 * received after this missing TSN.
3423 				 */
3424 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3425 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3426 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3427 						sctp_log_fr(48 + num_dests_sacked,
3428 						    tp1->rec.data.TSN_seq,
3429 						    tp1->sent,
3430 						    SCTP_FR_LOG_STRIKE_CHUNK);
3431 					}
3432 					tp1->sent++;
3433 				}
3434 			}
3435 		}
3436 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3437 			struct sctp_nets *alt;
3438 
3439 			/* fix counts and things */
3440 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3441 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3442 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3443 				    tp1->book_size,
3444 				    (uint32_t) (uintptr_t) tp1->whoTo,
3445 				    tp1->rec.data.TSN_seq);
3446 			}
3447 			if (tp1->whoTo) {
3448 				tp1->whoTo->net_ack++;
3449 				sctp_flight_size_decrease(tp1);
3450 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3451 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3452 					    tp1);
3453 				}
3454 			}
3455 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3456 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3457 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3458 			}
3459 			/* add back to the rwnd */
3460 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3461 
3462 			/* remove from the total flight */
3463 			sctp_total_flight_decrease(stcb, tp1);
3464 
3465 			if ((stcb->asoc.prsctp_supported) &&
3466 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3467 				/*
3468 				 * Has it been retransmitted tv_sec times? -
3469 				 * we store the retran count there.
3470 				 */
3471 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3472 					/* Yes, so drop it */
3473 					if (tp1->data != NULL) {
3474 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3475 						    SCTP_SO_NOT_LOCKED);
3476 					}
3477 					/* Make sure to flag we had a FR */
3478 					tp1->whoTo->net_ack++;
3479 					continue;
3480 				}
3481 			}
3482 			/*
3483 			 * SCTP_PRINTF("OK, we are now ready to FR this
3484 			 * guy\n");
3485 			 */
3486 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3487 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3488 				    0, SCTP_FR_MARKED);
3489 			}
3490 			if (strike_flag) {
3491 				/* This is a subsequent FR */
3492 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3493 			}
3494 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3495 			if (asoc->sctp_cmt_on_off > 0) {
3496 				/*
3497 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3498 				 * If CMT is being used, then pick dest with
3499 				 * largest ssthresh for any retransmission.
3500 				 */
3501 				tp1->no_fr_allowed = 1;
3502 				alt = tp1->whoTo;
3503 				/* sa_ignore NO_NULL_CHK */
3504 				if (asoc->sctp_cmt_pf > 0) {
3505 					/*
3506 					 * JRS 5/18/07 - If CMT PF is on,
3507 					 * use the PF version of
3508 					 * find_alt_net()
3509 					 */
3510 					alt = sctp_find_alternate_net(stcb, alt, 2);
3511 				} else {
3512 					/*
3513 					 * JRS 5/18/07 - If only CMT is on,
3514 					 * use the CMT version of
3515 					 * find_alt_net()
3516 					 */
3517 					/* sa_ignore NO_NULL_CHK */
3518 					alt = sctp_find_alternate_net(stcb, alt, 1);
3519 				}
3520 				if (alt == NULL) {
3521 					alt = tp1->whoTo;
3522 				}
3523 				/*
3524 				 * CUCv2: If a different dest is picked for
3525 				 * the retransmission, then new
3526 				 * (rtx-)pseudo_cumack needs to be tracked
3527 				 * for orig dest. Let CUCv2 track new (rtx-)
3528 				 * pseudo-cumack always.
3529 				 */
3530 				if (tp1->whoTo) {
3531 					tp1->whoTo->find_pseudo_cumack = 1;
3532 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3533 				}
3534 			} else {/* CMT is OFF */
3535 
3536 #ifdef SCTP_FR_TO_ALTERNATE
3537 				/* Can we find an alternate? */
3538 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3539 #else
3540 				/*
3541 				 * default behavior is to NOT retransmit
3542 				 * FR's to an alternate. Armando Caro's
3543 				 * paper details why.
3544 				 */
3545 				alt = tp1->whoTo;
3546 #endif
3547 			}
3548 
3549 			tp1->rec.data.doing_fast_retransmit = 1;
3550 			tot_retrans++;
3551 			/* mark the sending seq for possible subsequent FR's */
3552 			/*
3553 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3554 			 * (uint32_t)tpi->rec.data.TSN_seq);
3555 			 */
3556 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3557 				/*
3558 				 * If the queue of send is empty then its
3559 				 * the next sequence number that will be
3560 				 * assigned so we subtract one from this to
3561 				 * get the one we last sent.
3562 				 */
3563 				tp1->rec.data.fast_retran_tsn = sending_seq;
3564 			} else {
3565 				/*
3566 				 * If there are chunks on the send queue
3567 				 * (unsent data that has made it from the
3568 				 * stream queues but not out the door, we
3569 				 * take the first one (which will have the
3570 				 * lowest TSN) and subtract one to get the
3571 				 * one we last sent.
3572 				 */
3573 				struct sctp_tmit_chunk *ttt;
3574 
3575 				ttt = TAILQ_FIRST(&asoc->send_queue);
3576 				tp1->rec.data.fast_retran_tsn =
3577 				    ttt->rec.data.TSN_seq;
3578 			}
3579 
3580 			if (tp1->do_rtt) {
3581 				/*
3582 				 * this guy had a RTO calculation pending on
3583 				 * it, cancel it
3584 				 */
3585 				if ((tp1->whoTo != NULL) &&
3586 				    (tp1->whoTo->rto_needed == 0)) {
3587 					tp1->whoTo->rto_needed = 1;
3588 				}
3589 				tp1->do_rtt = 0;
3590 			}
3591 			if (alt != tp1->whoTo) {
3592 				/* yes, there is an alternate. */
3593 				sctp_free_remote_addr(tp1->whoTo);
3594 				/* sa_ignore FREED_MEMORY */
3595 				tp1->whoTo = alt;
3596 				atomic_add_int(&alt->ref_count, 1);
3597 			}
3598 		}
3599 	}
3600 }
3601 
3602 struct sctp_tmit_chunk *
3603 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3604     struct sctp_association *asoc)
3605 {
3606 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3607 	struct timeval now;
3608 	int now_filled = 0;
3609 
3610 	if (asoc->prsctp_supported == 0) {
3611 		return (NULL);
3612 	}
3613 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3614 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3615 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3616 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3617 			/* no chance to advance, out of here */
3618 			break;
3619 		}
3620 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3621 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3622 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3623 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3624 				    asoc->advanced_peer_ack_point,
3625 				    tp1->rec.data.TSN_seq, 0, 0);
3626 			}
3627 		}
3628 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3629 			/*
3630 			 * We can't fwd-tsn past any that are reliable aka
3631 			 * retransmitted until the asoc fails.
3632 			 */
3633 			break;
3634 		}
3635 		if (!now_filled) {
3636 			(void)SCTP_GETTIME_TIMEVAL(&now);
3637 			now_filled = 1;
3638 		}
3639 		/*
3640 		 * now we got a chunk which is marked for another
3641 		 * retransmission to a PR-stream but has run out its chances
3642 		 * already maybe OR has been marked to skip now. Can we skip
3643 		 * it if its a resend?
3644 		 */
3645 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3646 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3647 			/*
3648 			 * Now is this one marked for resend and its time is
3649 			 * now up?
3650 			 */
3651 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3652 				/* Yes so drop it */
3653 				if (tp1->data) {
3654 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3655 					    1, SCTP_SO_NOT_LOCKED);
3656 				}
3657 			} else {
3658 				/*
3659 				 * No, we are done when hit one for resend
3660 				 * whos time as not expired.
3661 				 */
3662 				break;
3663 			}
3664 		}
3665 		/*
3666 		 * Ok now if this chunk is marked to drop it we can clean up
3667 		 * the chunk, advance our peer ack point and we can check
3668 		 * the next chunk.
3669 		 */
3670 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3671 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3672 			/* advance PeerAckPoint goes forward */
3673 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3674 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3675 				a_adv = tp1;
3676 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3677 				/* No update but we do save the chk */
3678 				a_adv = tp1;
3679 			}
3680 		} else {
3681 			/*
3682 			 * If it is still in RESEND we can advance no
3683 			 * further
3684 			 */
3685 			break;
3686 		}
3687 	}
3688 	return (a_adv);
3689 }
3690 
3691 static int
3692 sctp_fs_audit(struct sctp_association *asoc)
3693 {
3694 	struct sctp_tmit_chunk *chk;
3695 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3696 	int ret;
3697 
3698 #ifndef INVARIANTS
3699 	int entry_flight, entry_cnt;
3700 
3701 #endif
3702 
3703 	ret = 0;
3704 #ifndef INVARIANTS
3705 	entry_flight = asoc->total_flight;
3706 	entry_cnt = asoc->total_flight_count;
3707 #endif
3708 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3709 		return (0);
3710 
3711 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3712 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3713 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3714 			    chk->rec.data.TSN_seq,
3715 			    chk->send_size,
3716 			    chk->snd_count);
3717 			inflight++;
3718 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3719 			resend++;
3720 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3721 			inbetween++;
3722 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3723 			above++;
3724 		} else {
3725 			acked++;
3726 		}
3727 	}
3728 
3729 	if ((inflight > 0) || (inbetween > 0)) {
3730 #ifdef INVARIANTS
3731 		panic("Flight size-express incorrect? \n");
3732 #else
3733 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3734 		    entry_flight, entry_cnt);
3735 
3736 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3737 		    inflight, inbetween, resend, above, acked);
3738 		ret = 1;
3739 #endif
3740 	}
3741 	return (ret);
3742 }
3743 
3744 
3745 static void
3746 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3747     struct sctp_association *asoc,
3748     struct sctp_tmit_chunk *tp1)
3749 {
3750 	tp1->window_probe = 0;
3751 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3752 		/* TSN's skipped we do NOT move back. */
3753 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3754 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3755 		    tp1->book_size,
3756 		    (uint32_t) (uintptr_t) tp1->whoTo,
3757 		    tp1->rec.data.TSN_seq);
3758 		return;
3759 	}
3760 	/* First setup this by shrinking flight */
3761 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3762 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3763 		    tp1);
3764 	}
3765 	sctp_flight_size_decrease(tp1);
3766 	sctp_total_flight_decrease(stcb, tp1);
3767 	/* Now mark for resend */
3768 	tp1->sent = SCTP_DATAGRAM_RESEND;
3769 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3770 
3771 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3772 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3773 		    tp1->whoTo->flight_size,
3774 		    tp1->book_size,
3775 		    (uint32_t) (uintptr_t) tp1->whoTo,
3776 		    tp1->rec.data.TSN_seq);
3777 	}
3778 }
3779 
3780 void
3781 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3782     uint32_t rwnd, int *abort_now, int ecne_seen)
3783 {
3784 	struct sctp_nets *net;
3785 	struct sctp_association *asoc;
3786 	struct sctp_tmit_chunk *tp1, *tp2;
3787 	uint32_t old_rwnd;
3788 	int win_probe_recovery = 0;
3789 	int win_probe_recovered = 0;
3790 	int j, done_once = 0;
3791 	int rto_ok = 1;
3792 	uint32_t send_s;
3793 
3794 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3795 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3796 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3797 	}
3798 	SCTP_TCB_LOCK_ASSERT(stcb);
3799 #ifdef SCTP_ASOCLOG_OF_TSNS
3800 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3801 	stcb->asoc.cumack_log_at++;
3802 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3803 		stcb->asoc.cumack_log_at = 0;
3804 	}
3805 #endif
3806 	asoc = &stcb->asoc;
3807 	old_rwnd = asoc->peers_rwnd;
3808 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3809 		/* old ack */
3810 		return;
3811 	} else if (asoc->last_acked_seq == cumack) {
3812 		/* Window update sack */
3813 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3814 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3815 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3816 			/* SWS sender side engages */
3817 			asoc->peers_rwnd = 0;
3818 		}
3819 		if (asoc->peers_rwnd > old_rwnd) {
3820 			goto again;
3821 		}
3822 		return;
3823 	}
3824 	/* First setup for CC stuff */
3825 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3826 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3827 			/* Drag along the window_tsn for cwr's */
3828 			net->cwr_window_tsn = cumack;
3829 		}
3830 		net->prev_cwnd = net->cwnd;
3831 		net->net_ack = 0;
3832 		net->net_ack2 = 0;
3833 
3834 		/*
3835 		 * CMT: Reset CUC and Fast recovery algo variables before
3836 		 * SACK processing
3837 		 */
3838 		net->new_pseudo_cumack = 0;
3839 		net->will_exit_fast_recovery = 0;
3840 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3841 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3842 		}
3843 	}
3844 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3845 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3846 		    sctpchunk_listhead);
3847 		send_s = tp1->rec.data.TSN_seq + 1;
3848 	} else {
3849 		send_s = asoc->sending_seq;
3850 	}
3851 	if (SCTP_TSN_GE(cumack, send_s)) {
3852 		struct mbuf *op_err;
3853 		char msg[SCTP_DIAG_INFO_LEN];
3854 
3855 		*abort_now = 1;
3856 		/* XXX */
3857 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3858 		    cumack, send_s);
3859 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3860 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3861 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3862 		return;
3863 	}
3864 	asoc->this_sack_highest_gap = cumack;
3865 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3866 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3867 		    stcb->asoc.overall_error_count,
3868 		    0,
3869 		    SCTP_FROM_SCTP_INDATA,
3870 		    __LINE__);
3871 	}
3872 	stcb->asoc.overall_error_count = 0;
3873 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3874 		/* process the new consecutive TSN first */
3875 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3876 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3877 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3878 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3879 				}
3880 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3881 					/*
3882 					 * If it is less than ACKED, it is
3883 					 * now no-longer in flight. Higher
3884 					 * values may occur during marking
3885 					 */
3886 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3887 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3888 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3889 							    tp1->whoTo->flight_size,
3890 							    tp1->book_size,
3891 							    (uint32_t) (uintptr_t) tp1->whoTo,
3892 							    tp1->rec.data.TSN_seq);
3893 						}
3894 						sctp_flight_size_decrease(tp1);
3895 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3896 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3897 							    tp1);
3898 						}
3899 						/* sa_ignore NO_NULL_CHK */
3900 						sctp_total_flight_decrease(stcb, tp1);
3901 					}
3902 					tp1->whoTo->net_ack += tp1->send_size;
3903 					if (tp1->snd_count < 2) {
3904 						/*
3905 						 * True non-retransmited
3906 						 * chunk
3907 						 */
3908 						tp1->whoTo->net_ack2 +=
3909 						    tp1->send_size;
3910 
3911 						/* update RTO too? */
3912 						if (tp1->do_rtt) {
3913 							if (rto_ok) {
3914 								tp1->whoTo->RTO =
3915 								/*
3916 								 * sa_ignore
3917 								 * NO_NULL_CH
3918 								 * K
3919 								 */
3920 								    sctp_calculate_rto(stcb,
3921 								    asoc, tp1->whoTo,
3922 								    &tp1->sent_rcv_time,
3923 								    sctp_align_safe_nocopy,
3924 								    SCTP_RTT_FROM_DATA);
3925 								rto_ok = 0;
3926 							}
3927 							if (tp1->whoTo->rto_needed == 0) {
3928 								tp1->whoTo->rto_needed = 1;
3929 							}
3930 							tp1->do_rtt = 0;
3931 						}
3932 					}
3933 					/*
3934 					 * CMT: CUCv2 algorithm. From the
3935 					 * cumack'd TSNs, for each TSN being
3936 					 * acked for the first time, set the
3937 					 * following variables for the
3938 					 * corresp destination.
3939 					 * new_pseudo_cumack will trigger a
3940 					 * cwnd update.
3941 					 * find_(rtx_)pseudo_cumack will
3942 					 * trigger search for the next
3943 					 * expected (rtx-)pseudo-cumack.
3944 					 */
3945 					tp1->whoTo->new_pseudo_cumack = 1;
3946 					tp1->whoTo->find_pseudo_cumack = 1;
3947 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3948 
3949 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3950 						/* sa_ignore NO_NULL_CHK */
3951 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3952 					}
3953 				}
3954 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3955 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3956 				}
3957 				if (tp1->rec.data.chunk_was_revoked) {
3958 					/* deflate the cwnd */
3959 					tp1->whoTo->cwnd -= tp1->book_size;
3960 					tp1->rec.data.chunk_was_revoked = 0;
3961 				}
3962 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3963 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3964 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3965 #ifdef INVARIANTS
3966 					} else {
3967 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3968 #endif
3969 					}
3970 				}
3971 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3972 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3973 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3974 					asoc->trigger_reset = 1;
3975 				}
3976 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3977 				if (tp1->data) {
3978 					/* sa_ignore NO_NULL_CHK */
3979 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3980 					sctp_m_freem(tp1->data);
3981 					tp1->data = NULL;
3982 				}
3983 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3984 					sctp_log_sack(asoc->last_acked_seq,
3985 					    cumack,
3986 					    tp1->rec.data.TSN_seq,
3987 					    0,
3988 					    0,
3989 					    SCTP_LOG_FREE_SENT);
3990 				}
3991 				asoc->sent_queue_cnt--;
3992 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3993 			} else {
3994 				break;
3995 			}
3996 		}
3997 
3998 	}
3999 	/* sa_ignore NO_NULL_CHK */
4000 	if (stcb->sctp_socket) {
4001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4002 		struct socket *so;
4003 
4004 #endif
4005 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4006 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4007 			/* sa_ignore NO_NULL_CHK */
4008 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4009 		}
4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 		so = SCTP_INP_SO(stcb->sctp_ep);
4012 		atomic_add_int(&stcb->asoc.refcnt, 1);
4013 		SCTP_TCB_UNLOCK(stcb);
4014 		SCTP_SOCKET_LOCK(so, 1);
4015 		SCTP_TCB_LOCK(stcb);
4016 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4017 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4018 			/* assoc was freed while we were unlocked */
4019 			SCTP_SOCKET_UNLOCK(so, 1);
4020 			return;
4021 		}
4022 #endif
4023 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4024 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4025 		SCTP_SOCKET_UNLOCK(so, 1);
4026 #endif
4027 	} else {
4028 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4029 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4030 		}
4031 	}
4032 
4033 	/* JRS - Use the congestion control given in the CC module */
4034 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4035 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4036 			if (net->net_ack2 > 0) {
4037 				/*
4038 				 * Karn's rule applies to clearing error
4039 				 * count, this is optional.
4040 				 */
4041 				net->error_count = 0;
4042 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4043 					/* addr came good */
4044 					net->dest_state |= SCTP_ADDR_REACHABLE;
4045 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4046 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4047 				}
4048 				if (net == stcb->asoc.primary_destination) {
4049 					if (stcb->asoc.alternate) {
4050 						/*
4051 						 * release the alternate,
4052 						 * primary is good
4053 						 */
4054 						sctp_free_remote_addr(stcb->asoc.alternate);
4055 						stcb->asoc.alternate = NULL;
4056 					}
4057 				}
4058 				if (net->dest_state & SCTP_ADDR_PF) {
4059 					net->dest_state &= ~SCTP_ADDR_PF;
4060 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4061 					    stcb->sctp_ep, stcb, net,
4062 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4063 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4064 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4065 					/* Done with this net */
4066 					net->net_ack = 0;
4067 				}
4068 				/* restore any doubled timers */
4069 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4070 				if (net->RTO < stcb->asoc.minrto) {
4071 					net->RTO = stcb->asoc.minrto;
4072 				}
4073 				if (net->RTO > stcb->asoc.maxrto) {
4074 					net->RTO = stcb->asoc.maxrto;
4075 				}
4076 			}
4077 		}
4078 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4079 	}
4080 	asoc->last_acked_seq = cumack;
4081 
4082 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4083 		/* nothing left in-flight */
4084 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4085 			net->flight_size = 0;
4086 			net->partial_bytes_acked = 0;
4087 		}
4088 		asoc->total_flight = 0;
4089 		asoc->total_flight_count = 0;
4090 	}
4091 	/* RWND update */
4092 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4093 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4094 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4095 		/* SWS sender side engages */
4096 		asoc->peers_rwnd = 0;
4097 	}
4098 	if (asoc->peers_rwnd > old_rwnd) {
4099 		win_probe_recovery = 1;
4100 	}
4101 	/* Now assure a timer where data is queued at */
4102 again:
4103 	j = 0;
4104 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4105 		int to_ticks;
4106 
4107 		if (win_probe_recovery && (net->window_probe)) {
4108 			win_probe_recovered = 1;
4109 			/*
4110 			 * Find first chunk that was used with window probe
4111 			 * and clear the sent
4112 			 */
4113 			/* sa_ignore FREED_MEMORY */
4114 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4115 				if (tp1->window_probe) {
4116 					/* move back to data send queue */
4117 					sctp_window_probe_recovery(stcb, asoc, tp1);
4118 					break;
4119 				}
4120 			}
4121 		}
4122 		if (net->RTO == 0) {
4123 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4124 		} else {
4125 			to_ticks = MSEC_TO_TICKS(net->RTO);
4126 		}
4127 		if (net->flight_size) {
4128 			j++;
4129 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 			    sctp_timeout_handler, &net->rxt_timer);
4131 			if (net->window_probe) {
4132 				net->window_probe = 0;
4133 			}
4134 		} else {
4135 			if (net->window_probe) {
4136 				/*
4137 				 * In window probes we must assure a timer
4138 				 * is still running there
4139 				 */
4140 				net->window_probe = 0;
4141 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4142 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4143 					    sctp_timeout_handler, &net->rxt_timer);
4144 				}
4145 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4146 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4147 				    stcb, net,
4148 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4149 			}
4150 		}
4151 	}
4152 	if ((j == 0) &&
4153 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4154 	    (asoc->sent_queue_retran_cnt == 0) &&
4155 	    (win_probe_recovered == 0) &&
4156 	    (done_once == 0)) {
4157 		/*
4158 		 * huh, this should not happen unless all packets are
4159 		 * PR-SCTP and marked to skip of course.
4160 		 */
4161 		if (sctp_fs_audit(asoc)) {
4162 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4163 				net->flight_size = 0;
4164 			}
4165 			asoc->total_flight = 0;
4166 			asoc->total_flight_count = 0;
4167 			asoc->sent_queue_retran_cnt = 0;
4168 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4169 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4170 					sctp_flight_size_increase(tp1);
4171 					sctp_total_flight_increase(stcb, tp1);
4172 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4173 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4174 				}
4175 			}
4176 		}
4177 		done_once = 1;
4178 		goto again;
4179 	}
4180 	/**********************************/
4181 	/* Now what about shutdown issues */
4182 	/**********************************/
4183 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4184 		/* nothing left on sendqueue.. consider done */
4185 		/* clean up */
4186 		if ((asoc->stream_queue_cnt == 1) &&
4187 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4188 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4189 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4190 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4191 		}
4192 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4193 		    (asoc->stream_queue_cnt == 0)) {
4194 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4195 				/* Need to abort here */
4196 				struct mbuf *op_err;
4197 
4198 		abort_out_now:
4199 				*abort_now = 1;
4200 				/* XXX */
4201 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4202 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4203 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4204 				return;
4205 			} else {
4206 				struct sctp_nets *netp;
4207 
4208 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4209 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4210 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4211 				}
4212 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4213 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4214 				sctp_stop_timers_for_shutdown(stcb);
4215 				if (asoc->alternate) {
4216 					netp = asoc->alternate;
4217 				} else {
4218 					netp = asoc->primary_destination;
4219 				}
4220 				sctp_send_shutdown(stcb, netp);
4221 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4222 				    stcb->sctp_ep, stcb, netp);
4223 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4224 				    stcb->sctp_ep, stcb, netp);
4225 			}
4226 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4227 		    (asoc->stream_queue_cnt == 0)) {
4228 			struct sctp_nets *netp;
4229 
4230 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4231 				goto abort_out_now;
4232 			}
4233 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4234 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4235 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4236 			sctp_stop_timers_for_shutdown(stcb);
4237 			if (asoc->alternate) {
4238 				netp = asoc->alternate;
4239 			} else {
4240 				netp = asoc->primary_destination;
4241 			}
4242 			sctp_send_shutdown_ack(stcb, netp);
4243 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4244 			    stcb->sctp_ep, stcb, netp);
4245 		}
4246 	}
4247 	/*********************************************/
4248 	/* Here we perform PR-SCTP procedures        */
4249 	/* (section 4.2)                             */
4250 	/*********************************************/
4251 	/* C1. update advancedPeerAckPoint */
4252 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4253 		asoc->advanced_peer_ack_point = cumack;
4254 	}
4255 	/* PR-Sctp issues need to be addressed too */
4256 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4257 		struct sctp_tmit_chunk *lchk;
4258 		uint32_t old_adv_peer_ack_point;
4259 
4260 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4261 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4262 		/* C3. See if we need to send a Fwd-TSN */
4263 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4264 			/*
4265 			 * ISSUE with ECN, see FWD-TSN processing.
4266 			 */
4267 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4268 				send_forward_tsn(stcb, asoc);
4269 			} else if (lchk) {
4270 				/* try to FR fwd-tsn's that get lost too */
4271 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4272 					send_forward_tsn(stcb, asoc);
4273 				}
4274 			}
4275 		}
4276 		if (lchk) {
4277 			/* Assure a timer is up */
4278 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4279 			    stcb->sctp_ep, stcb, lchk->whoTo);
4280 		}
4281 	}
4282 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4283 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4284 		    rwnd,
4285 		    stcb->asoc.peers_rwnd,
4286 		    stcb->asoc.total_flight,
4287 		    stcb->asoc.total_output_queue_size);
4288 	}
4289 }
4290 
4291 void
4292 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4293     struct sctp_tcb *stcb,
4294     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4295     int *abort_now, uint8_t flags,
4296     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4297 {
4298 	struct sctp_association *asoc;
4299 	struct sctp_tmit_chunk *tp1, *tp2;
4300 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4301 	uint16_t wake_him = 0;
4302 	uint32_t send_s = 0;
4303 	long j;
4304 	int accum_moved = 0;
4305 	int will_exit_fast_recovery = 0;
4306 	uint32_t a_rwnd, old_rwnd;
4307 	int win_probe_recovery = 0;
4308 	int win_probe_recovered = 0;
4309 	struct sctp_nets *net = NULL;
4310 	int done_once;
4311 	int rto_ok = 1;
4312 	uint8_t reneged_all = 0;
4313 	uint8_t cmt_dac_flag;
4314 
4315 	/*
4316 	 * we take any chance we can to service our queues since we cannot
4317 	 * get awoken when the socket is read from :<
4318 	 */
4319 	/*
4320 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4321 	 * old sack, if so discard. 2) If there is nothing left in the send
4322 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4323 	 * too, update any rwnd change and verify no timers are running.
4324 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4325 	 * moved process these first and note that it moved. 4) Process any
4326 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4327 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4328 	 * sync up flightsizes and things, stop all timers and also check
4329 	 * for shutdown_pending state. If so then go ahead and send off the
4330 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4331 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4332 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4333 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4334 	 * if in shutdown_recv state.
4335 	 */
4336 	SCTP_TCB_LOCK_ASSERT(stcb);
4337 	/* CMT DAC algo */
4338 	this_sack_lowest_newack = 0;
4339 	SCTP_STAT_INCR(sctps_slowpath_sack);
4340 	last_tsn = cum_ack;
4341 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4342 #ifdef SCTP_ASOCLOG_OF_TSNS
4343 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4344 	stcb->asoc.cumack_log_at++;
4345 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4346 		stcb->asoc.cumack_log_at = 0;
4347 	}
4348 #endif
4349 	a_rwnd = rwnd;
4350 
4351 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4352 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4353 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4354 	}
4355 	old_rwnd = stcb->asoc.peers_rwnd;
4356 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4357 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4358 		    stcb->asoc.overall_error_count,
4359 		    0,
4360 		    SCTP_FROM_SCTP_INDATA,
4361 		    __LINE__);
4362 	}
4363 	stcb->asoc.overall_error_count = 0;
4364 	asoc = &stcb->asoc;
4365 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4366 		sctp_log_sack(asoc->last_acked_seq,
4367 		    cum_ack,
4368 		    0,
4369 		    num_seg,
4370 		    num_dup,
4371 		    SCTP_LOG_NEW_SACK);
4372 	}
4373 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4374 		uint16_t i;
4375 		uint32_t *dupdata, dblock;
4376 
4377 		for (i = 0; i < num_dup; i++) {
4378 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4379 			    sizeof(uint32_t), (uint8_t *) & dblock);
4380 			if (dupdata == NULL) {
4381 				break;
4382 			}
4383 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4384 		}
4385 	}
4386 	/* reality check */
4387 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4388 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4389 		    sctpchunk_listhead);
4390 		send_s = tp1->rec.data.TSN_seq + 1;
4391 	} else {
4392 		tp1 = NULL;
4393 		send_s = asoc->sending_seq;
4394 	}
4395 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4396 		struct mbuf *op_err;
4397 		char msg[SCTP_DIAG_INFO_LEN];
4398 
4399 		/*
4400 		 * no way, we have not even sent this TSN out yet. Peer is
4401 		 * hopelessly messed up with us.
4402 		 */
4403 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4404 		    cum_ack, send_s);
4405 		if (tp1) {
4406 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4407 			    tp1->rec.data.TSN_seq, (void *)tp1);
4408 		}
4409 hopeless_peer:
4410 		*abort_now = 1;
4411 		/* XXX */
4412 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4413 		    cum_ack, send_s);
4414 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4415 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4416 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4417 		return;
4418 	}
4419 	/**********************/
4420 	/* 1) check the range */
4421 	/**********************/
4422 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4423 		/* acking something behind */
4424 		return;
4425 	}
4426 	/* update the Rwnd of the peer */
4427 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4428 	    TAILQ_EMPTY(&asoc->send_queue) &&
4429 	    (asoc->stream_queue_cnt == 0)) {
4430 		/* nothing left on send/sent and strmq */
4431 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4432 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4433 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4434 		}
4435 		asoc->peers_rwnd = a_rwnd;
4436 		if (asoc->sent_queue_retran_cnt) {
4437 			asoc->sent_queue_retran_cnt = 0;
4438 		}
4439 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4440 			/* SWS sender side engages */
4441 			asoc->peers_rwnd = 0;
4442 		}
4443 		/* stop any timers */
4444 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4445 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4446 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4447 			net->partial_bytes_acked = 0;
4448 			net->flight_size = 0;
4449 		}
4450 		asoc->total_flight = 0;
4451 		asoc->total_flight_count = 0;
4452 		return;
4453 	}
4454 	/*
4455 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4456 	 * things. The total byte count acked is tracked in netAckSz AND
4457 	 * netAck2 is used to track the total bytes acked that are un-
4458 	 * amibguious and were never retransmitted. We track these on a per
4459 	 * destination address basis.
4460 	 */
4461 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4462 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4463 			/* Drag along the window_tsn for cwr's */
4464 			net->cwr_window_tsn = cum_ack;
4465 		}
4466 		net->prev_cwnd = net->cwnd;
4467 		net->net_ack = 0;
4468 		net->net_ack2 = 0;
4469 
4470 		/*
4471 		 * CMT: Reset CUC and Fast recovery algo variables before
4472 		 * SACK processing
4473 		 */
4474 		net->new_pseudo_cumack = 0;
4475 		net->will_exit_fast_recovery = 0;
4476 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4477 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4478 		}
4479 	}
4480 	/* process the new consecutive TSN first */
4481 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4482 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4483 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4484 				accum_moved = 1;
4485 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4486 					/*
4487 					 * If it is less than ACKED, it is
4488 					 * now no-longer in flight. Higher
4489 					 * values may occur during marking
4490 					 */
4491 					if ((tp1->whoTo->dest_state &
4492 					    SCTP_ADDR_UNCONFIRMED) &&
4493 					    (tp1->snd_count < 2)) {
4494 						/*
4495 						 * If there was no retran
4496 						 * and the address is
4497 						 * un-confirmed and we sent
4498 						 * there and are now
4499 						 * sacked.. its confirmed,
4500 						 * mark it so.
4501 						 */
4502 						tp1->whoTo->dest_state &=
4503 						    ~SCTP_ADDR_UNCONFIRMED;
4504 					}
4505 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4506 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4507 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4508 							    tp1->whoTo->flight_size,
4509 							    tp1->book_size,
4510 							    (uint32_t) (uintptr_t) tp1->whoTo,
4511 							    tp1->rec.data.TSN_seq);
4512 						}
4513 						sctp_flight_size_decrease(tp1);
4514 						sctp_total_flight_decrease(stcb, tp1);
4515 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4516 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4517 							    tp1);
4518 						}
4519 					}
4520 					tp1->whoTo->net_ack += tp1->send_size;
4521 
4522 					/* CMT SFR and DAC algos */
4523 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4524 					tp1->whoTo->saw_newack = 1;
4525 
4526 					if (tp1->snd_count < 2) {
4527 						/*
4528 						 * True non-retransmited
4529 						 * chunk
4530 						 */
4531 						tp1->whoTo->net_ack2 +=
4532 						    tp1->send_size;
4533 
4534 						/* update RTO too? */
4535 						if (tp1->do_rtt) {
4536 							if (rto_ok) {
4537 								tp1->whoTo->RTO =
4538 								    sctp_calculate_rto(stcb,
4539 								    asoc, tp1->whoTo,
4540 								    &tp1->sent_rcv_time,
4541 								    sctp_align_safe_nocopy,
4542 								    SCTP_RTT_FROM_DATA);
4543 								rto_ok = 0;
4544 							}
4545 							if (tp1->whoTo->rto_needed == 0) {
4546 								tp1->whoTo->rto_needed = 1;
4547 							}
4548 							tp1->do_rtt = 0;
4549 						}
4550 					}
4551 					/*
4552 					 * CMT: CUCv2 algorithm. From the
4553 					 * cumack'd TSNs, for each TSN being
4554 					 * acked for the first time, set the
4555 					 * following variables for the
4556 					 * corresp destination.
4557 					 * new_pseudo_cumack will trigger a
4558 					 * cwnd update.
4559 					 * find_(rtx_)pseudo_cumack will
4560 					 * trigger search for the next
4561 					 * expected (rtx-)pseudo-cumack.
4562 					 */
4563 					tp1->whoTo->new_pseudo_cumack = 1;
4564 					tp1->whoTo->find_pseudo_cumack = 1;
4565 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4566 
4567 
4568 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4569 						sctp_log_sack(asoc->last_acked_seq,
4570 						    cum_ack,
4571 						    tp1->rec.data.TSN_seq,
4572 						    0,
4573 						    0,
4574 						    SCTP_LOG_TSN_ACKED);
4575 					}
4576 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4577 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4578 					}
4579 				}
4580 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4581 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4582 #ifdef SCTP_AUDITING_ENABLED
4583 					sctp_audit_log(0xB3,
4584 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4585 #endif
4586 				}
4587 				if (tp1->rec.data.chunk_was_revoked) {
4588 					/* deflate the cwnd */
4589 					tp1->whoTo->cwnd -= tp1->book_size;
4590 					tp1->rec.data.chunk_was_revoked = 0;
4591 				}
4592 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4593 					tp1->sent = SCTP_DATAGRAM_ACKED;
4594 				}
4595 			}
4596 		} else {
4597 			break;
4598 		}
4599 	}
4600 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4601 	/* always set this up to cum-ack */
4602 	asoc->this_sack_highest_gap = last_tsn;
4603 
4604 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4605 
4606 		/*
4607 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4608 		 * to be greater than the cumack. Also reset saw_newack to 0
4609 		 * for all dests.
4610 		 */
4611 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 			net->saw_newack = 0;
4613 			net->this_sack_highest_newack = last_tsn;
4614 		}
4615 
4616 		/*
4617 		 * thisSackHighestGap will increase while handling NEW
4618 		 * segments this_sack_highest_newack will increase while
4619 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4620 		 * used for CMT DAC algo. saw_newack will also change.
4621 		 */
4622 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4623 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4624 		    num_seg, num_nr_seg, &rto_ok)) {
4625 			wake_him++;
4626 		}
4627 		/*
4628 		 * validate the biggest_tsn_acked in the gap acks if strict
4629 		 * adherence is wanted.
4630 		 */
4631 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4632 			/*
4633 			 * peer is either confused or we are under attack.
4634 			 * We must abort.
4635 			 */
4636 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4637 			    biggest_tsn_acked, send_s);
4638 			goto hopeless_peer;
4639 		}
4640 	}
4641 	/*******************************************/
4642 	/* cancel ALL T3-send timer if accum moved */
4643 	/*******************************************/
4644 	if (asoc->sctp_cmt_on_off > 0) {
4645 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4646 			if (net->new_pseudo_cumack)
4647 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4648 				    stcb, net,
4649 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4650 
4651 		}
4652 	} else {
4653 		if (accum_moved) {
4654 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4655 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4656 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4657 			}
4658 		}
4659 	}
4660 	/********************************************/
4661 	/* drop the acked chunks from the sentqueue */
4662 	/********************************************/
4663 	asoc->last_acked_seq = cum_ack;
4664 
4665 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4666 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4667 			break;
4668 		}
4669 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4670 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4671 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4672 #ifdef INVARIANTS
4673 			} else {
4674 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4675 #endif
4676 			}
4677 		}
4678 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4679 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4680 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4681 			asoc->trigger_reset = 1;
4682 		}
4683 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4684 		if (PR_SCTP_ENABLED(tp1->flags)) {
4685 			if (asoc->pr_sctp_cnt != 0)
4686 				asoc->pr_sctp_cnt--;
4687 		}
4688 		asoc->sent_queue_cnt--;
4689 		if (tp1->data) {
4690 			/* sa_ignore NO_NULL_CHK */
4691 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4692 			sctp_m_freem(tp1->data);
4693 			tp1->data = NULL;
4694 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4695 				asoc->sent_queue_cnt_removeable--;
4696 			}
4697 		}
4698 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4699 			sctp_log_sack(asoc->last_acked_seq,
4700 			    cum_ack,
4701 			    tp1->rec.data.TSN_seq,
4702 			    0,
4703 			    0,
4704 			    SCTP_LOG_FREE_SENT);
4705 		}
4706 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4707 		wake_him++;
4708 	}
4709 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4710 #ifdef INVARIANTS
4711 		panic("Warning flight size is positive and should be 0");
4712 #else
4713 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4714 		    asoc->total_flight);
4715 #endif
4716 		asoc->total_flight = 0;
4717 	}
4718 	/* sa_ignore NO_NULL_CHK */
4719 	if ((wake_him) && (stcb->sctp_socket)) {
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 		struct socket *so;
4722 
4723 #endif
4724 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4725 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4726 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4727 		}
4728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4729 		so = SCTP_INP_SO(stcb->sctp_ep);
4730 		atomic_add_int(&stcb->asoc.refcnt, 1);
4731 		SCTP_TCB_UNLOCK(stcb);
4732 		SCTP_SOCKET_LOCK(so, 1);
4733 		SCTP_TCB_LOCK(stcb);
4734 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4735 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4736 			/* assoc was freed while we were unlocked */
4737 			SCTP_SOCKET_UNLOCK(so, 1);
4738 			return;
4739 		}
4740 #endif
4741 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4742 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4743 		SCTP_SOCKET_UNLOCK(so, 1);
4744 #endif
4745 	} else {
4746 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4747 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4748 		}
4749 	}
4750 
4751 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4752 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4753 			/* Setup so we will exit RFC2582 fast recovery */
4754 			will_exit_fast_recovery = 1;
4755 		}
4756 	}
4757 	/*
4758 	 * Check for revoked fragments:
4759 	 *
4760 	 * if Previous sack - Had no frags then we can't have any revoked if
4761 	 * Previous sack - Had frag's then - If we now have frags aka
4762 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4763 	 * some of them. else - The peer revoked all ACKED fragments, since
4764 	 * we had some before and now we have NONE.
4765 	 */
4766 
4767 	if (num_seg) {
4768 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4769 		asoc->saw_sack_with_frags = 1;
4770 	} else if (asoc->saw_sack_with_frags) {
4771 		int cnt_revoked = 0;
4772 
4773 		/* Peer revoked all dg's marked or acked */
4774 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4775 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4776 				tp1->sent = SCTP_DATAGRAM_SENT;
4777 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4778 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4779 					    tp1->whoTo->flight_size,
4780 					    tp1->book_size,
4781 					    (uint32_t) (uintptr_t) tp1->whoTo,
4782 					    tp1->rec.data.TSN_seq);
4783 				}
4784 				sctp_flight_size_increase(tp1);
4785 				sctp_total_flight_increase(stcb, tp1);
4786 				tp1->rec.data.chunk_was_revoked = 1;
4787 				/*
4788 				 * To ensure that this increase in
4789 				 * flightsize, which is artificial, does not
4790 				 * throttle the sender, we also increase the
4791 				 * cwnd artificially.
4792 				 */
4793 				tp1->whoTo->cwnd += tp1->book_size;
4794 				cnt_revoked++;
4795 			}
4796 		}
4797 		if (cnt_revoked) {
4798 			reneged_all = 1;
4799 		}
4800 		asoc->saw_sack_with_frags = 0;
4801 	}
4802 	if (num_nr_seg > 0)
4803 		asoc->saw_sack_with_nr_frags = 1;
4804 	else
4805 		asoc->saw_sack_with_nr_frags = 0;
4806 
4807 	/* JRS - Use the congestion control given in the CC module */
4808 	if (ecne_seen == 0) {
4809 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4810 			if (net->net_ack2 > 0) {
4811 				/*
4812 				 * Karn's rule applies to clearing error
4813 				 * count, this is optional.
4814 				 */
4815 				net->error_count = 0;
4816 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4817 					/* addr came good */
4818 					net->dest_state |= SCTP_ADDR_REACHABLE;
4819 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4820 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4821 				}
4822 				if (net == stcb->asoc.primary_destination) {
4823 					if (stcb->asoc.alternate) {
4824 						/*
4825 						 * release the alternate,
4826 						 * primary is good
4827 						 */
4828 						sctp_free_remote_addr(stcb->asoc.alternate);
4829 						stcb->asoc.alternate = NULL;
4830 					}
4831 				}
4832 				if (net->dest_state & SCTP_ADDR_PF) {
4833 					net->dest_state &= ~SCTP_ADDR_PF;
4834 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4835 					    stcb->sctp_ep, stcb, net,
4836 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4837 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4838 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4839 					/* Done with this net */
4840 					net->net_ack = 0;
4841 				}
4842 				/* restore any doubled timers */
4843 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4844 				if (net->RTO < stcb->asoc.minrto) {
4845 					net->RTO = stcb->asoc.minrto;
4846 				}
4847 				if (net->RTO > stcb->asoc.maxrto) {
4848 					net->RTO = stcb->asoc.maxrto;
4849 				}
4850 			}
4851 		}
4852 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4853 	}
4854 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4855 		/* nothing left in-flight */
4856 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4857 			/* stop all timers */
4858 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4859 			    stcb, net,
4860 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4861 			net->flight_size = 0;
4862 			net->partial_bytes_acked = 0;
4863 		}
4864 		asoc->total_flight = 0;
4865 		asoc->total_flight_count = 0;
4866 	}
4867 	/**********************************/
4868 	/* Now what about shutdown issues */
4869 	/**********************************/
4870 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4871 		/* nothing left on sendqueue.. consider done */
4872 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4873 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4874 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4875 		}
4876 		asoc->peers_rwnd = a_rwnd;
4877 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4878 			/* SWS sender side engages */
4879 			asoc->peers_rwnd = 0;
4880 		}
4881 		/* clean up */
4882 		if ((asoc->stream_queue_cnt == 1) &&
4883 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4884 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4885 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4886 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4887 		}
4888 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4889 		    (asoc->stream_queue_cnt == 0)) {
4890 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4891 				/* Need to abort here */
4892 				struct mbuf *op_err;
4893 
4894 		abort_out_now:
4895 				*abort_now = 1;
4896 				/* XXX */
4897 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4898 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4899 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4900 				return;
4901 			} else {
4902 				struct sctp_nets *netp;
4903 
4904 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4905 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4906 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4907 				}
4908 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4909 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4910 				sctp_stop_timers_for_shutdown(stcb);
4911 				if (asoc->alternate) {
4912 					netp = asoc->alternate;
4913 				} else {
4914 					netp = asoc->primary_destination;
4915 				}
4916 				sctp_send_shutdown(stcb, netp);
4917 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4918 				    stcb->sctp_ep, stcb, netp);
4919 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4920 				    stcb->sctp_ep, stcb, netp);
4921 			}
4922 			return;
4923 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4924 		    (asoc->stream_queue_cnt == 0)) {
4925 			struct sctp_nets *netp;
4926 
4927 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4928 				goto abort_out_now;
4929 			}
4930 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4931 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4932 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4933 			sctp_stop_timers_for_shutdown(stcb);
4934 			if (asoc->alternate) {
4935 				netp = asoc->alternate;
4936 			} else {
4937 				netp = asoc->primary_destination;
4938 			}
4939 			sctp_send_shutdown_ack(stcb, netp);
4940 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4941 			    stcb->sctp_ep, stcb, netp);
4942 			return;
4943 		}
4944 	}
4945 	/*
4946 	 * Now here we are going to recycle net_ack for a different use...
4947 	 * HEADS UP.
4948 	 */
4949 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4950 		net->net_ack = 0;
4951 	}
4952 
4953 	/*
4954 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4955 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4956 	 * automatically ensure that.
4957 	 */
4958 	if ((asoc->sctp_cmt_on_off > 0) &&
4959 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4960 	    (cmt_dac_flag == 0)) {
4961 		this_sack_lowest_newack = cum_ack;
4962 	}
4963 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4964 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4965 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4966 	}
4967 	/* JRS - Use the congestion control given in the CC module */
4968 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4969 
4970 	/* Now are we exiting loss recovery ? */
4971 	if (will_exit_fast_recovery) {
4972 		/* Ok, we must exit fast recovery */
4973 		asoc->fast_retran_loss_recovery = 0;
4974 	}
4975 	if ((asoc->sat_t3_loss_recovery) &&
4976 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4977 		/* end satellite t3 loss recovery */
4978 		asoc->sat_t3_loss_recovery = 0;
4979 	}
4980 	/*
4981 	 * CMT Fast recovery
4982 	 */
4983 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4984 		if (net->will_exit_fast_recovery) {
4985 			/* Ok, we must exit fast recovery */
4986 			net->fast_retran_loss_recovery = 0;
4987 		}
4988 	}
4989 
4990 	/* Adjust and set the new rwnd value */
4991 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4992 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4993 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4994 	}
4995 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4996 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4997 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4998 		/* SWS sender side engages */
4999 		asoc->peers_rwnd = 0;
5000 	}
5001 	if (asoc->peers_rwnd > old_rwnd) {
5002 		win_probe_recovery = 1;
5003 	}
5004 	/*
5005 	 * Now we must setup so we have a timer up for anyone with
5006 	 * outstanding data.
5007 	 */
5008 	done_once = 0;
5009 again:
5010 	j = 0;
5011 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5012 		if (win_probe_recovery && (net->window_probe)) {
5013 			win_probe_recovered = 1;
5014 			/*-
5015 			 * Find first chunk that was used with
5016 			 * window probe and clear the event. Put
5017 			 * it back into the send queue as if has
5018 			 * not been sent.
5019 			 */
5020 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5021 				if (tp1->window_probe) {
5022 					sctp_window_probe_recovery(stcb, asoc, tp1);
5023 					break;
5024 				}
5025 			}
5026 		}
5027 		if (net->flight_size) {
5028 			j++;
5029 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5030 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5031 				    stcb->sctp_ep, stcb, net);
5032 			}
5033 			if (net->window_probe) {
5034 				net->window_probe = 0;
5035 			}
5036 		} else {
5037 			if (net->window_probe) {
5038 				/*
5039 				 * In window probes we must assure a timer
5040 				 * is still running there
5041 				 */
5042 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5043 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5044 					    stcb->sctp_ep, stcb, net);
5045 
5046 				}
5047 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5048 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5049 				    stcb, net,
5050 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5051 			}
5052 		}
5053 	}
5054 	if ((j == 0) &&
5055 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5056 	    (asoc->sent_queue_retran_cnt == 0) &&
5057 	    (win_probe_recovered == 0) &&
5058 	    (done_once == 0)) {
5059 		/*
5060 		 * huh, this should not happen unless all packets are
5061 		 * PR-SCTP and marked to skip of course.
5062 		 */
5063 		if (sctp_fs_audit(asoc)) {
5064 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5065 				net->flight_size = 0;
5066 			}
5067 			asoc->total_flight = 0;
5068 			asoc->total_flight_count = 0;
5069 			asoc->sent_queue_retran_cnt = 0;
5070 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5071 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5072 					sctp_flight_size_increase(tp1);
5073 					sctp_total_flight_increase(stcb, tp1);
5074 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5075 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5076 				}
5077 			}
5078 		}
5079 		done_once = 1;
5080 		goto again;
5081 	}
5082 	/*********************************************/
5083 	/* Here we perform PR-SCTP procedures        */
5084 	/* (section 4.2)                             */
5085 	/*********************************************/
5086 	/* C1. update advancedPeerAckPoint */
5087 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5088 		asoc->advanced_peer_ack_point = cum_ack;
5089 	}
5090 	/* C2. try to further move advancedPeerAckPoint ahead */
5091 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5092 		struct sctp_tmit_chunk *lchk;
5093 		uint32_t old_adv_peer_ack_point;
5094 
5095 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5096 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5097 		/* C3. See if we need to send a Fwd-TSN */
5098 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5099 			/*
5100 			 * ISSUE with ECN, see FWD-TSN processing.
5101 			 */
5102 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5103 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5104 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5105 				    old_adv_peer_ack_point);
5106 			}
5107 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5108 				send_forward_tsn(stcb, asoc);
5109 			} else if (lchk) {
5110 				/* try to FR fwd-tsn's that get lost too */
5111 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5112 					send_forward_tsn(stcb, asoc);
5113 				}
5114 			}
5115 		}
5116 		if (lchk) {
5117 			/* Assure a timer is up */
5118 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5119 			    stcb->sctp_ep, stcb, lchk->whoTo);
5120 		}
5121 	}
5122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5123 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5124 		    a_rwnd,
5125 		    stcb->asoc.peers_rwnd,
5126 		    stcb->asoc.total_flight,
5127 		    stcb->asoc.total_output_queue_size);
5128 	}
5129 }
5130 
5131 void
5132 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5133 {
5134 	/* Copy cum-ack */
5135 	uint32_t cum_ack, a_rwnd;
5136 
5137 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5138 	/* Arrange so a_rwnd does NOT change */
5139 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5140 
5141 	/* Now call the express sack handling */
5142 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5143 }
5144 
5145 static void
5146 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5147     struct sctp_stream_in *strmin)
5148 {
5149 	struct sctp_queued_to_read *ctl, *nctl;
5150 	struct sctp_association *asoc;
5151 	uint32_t tt;
5152 	int need_reasm_check = 0, old;
5153 
5154 	asoc = &stcb->asoc;
5155 	tt = strmin->last_sequence_delivered;
5156 	if (asoc->idata_supported) {
5157 		old = 0;
5158 	} else {
5159 		old = 1;
5160 	}
5161 	/*
5162 	 * First deliver anything prior to and including the stream no that
5163 	 * came in.
5164 	 */
5165 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5166 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5167 			/* this is deliverable now */
5168 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5169 				if (ctl->on_strm_q) {
5170 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5171 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5172 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5173 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5174 #ifdef INVARIANTS
5175 					} else {
5176 						panic("strmin: %p ctl: %p unknown %d",
5177 						    strmin, ctl, ctl->on_strm_q);
5178 #endif
5179 					}
5180 					ctl->on_strm_q = 0;
5181 				}
5182 				/* subtract pending on streams */
5183 				asoc->size_on_all_streams -= ctl->length;
5184 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5185 				/* deliver it to at least the delivery-q */
5186 				if (stcb->sctp_socket) {
5187 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5188 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5189 					    ctl,
5190 					    &stcb->sctp_socket->so_rcv,
5191 					    1, SCTP_READ_LOCK_HELD,
5192 					    SCTP_SO_NOT_LOCKED);
5193 				}
5194 			} else {
5195 				/* Its a fragmented message */
5196 				if (ctl->first_frag_seen) {
5197 					/*
5198 					 * Make it so this is next to
5199 					 * deliver, we restore later
5200 					 */
5201 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5202 					need_reasm_check = 1;
5203 					break;
5204 				}
5205 			}
5206 		} else {
5207 			/* no more delivery now. */
5208 			break;
5209 		}
5210 	}
5211 	if (need_reasm_check) {
5212 		int ret;
5213 
5214 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5215 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5216 			/* Restore the next to deliver unless we are ahead */
5217 			strmin->last_sequence_delivered = tt;
5218 		}
5219 		if (ret == 0) {
5220 			/* Left the front Partial one on */
5221 			return;
5222 		}
5223 		need_reasm_check = 0;
5224 	}
5225 	/*
5226 	 * now we must deliver things in queue the normal way  if any are
5227 	 * now ready.
5228 	 */
5229 	tt = strmin->last_sequence_delivered + 1;
5230 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5231 		if (tt == ctl->sinfo_ssn) {
5232 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5233 				/* this is deliverable now */
5234 				if (ctl->on_strm_q) {
5235 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5236 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5237 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5238 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5239 #ifdef INVARIANTS
5240 					} else {
5241 						panic("strmin: %p ctl: %p unknown %d",
5242 						    strmin, ctl, ctl->on_strm_q);
5243 #endif
5244 					}
5245 					ctl->on_strm_q = 0;
5246 				}
5247 				/* subtract pending on streams */
5248 				asoc->size_on_all_streams -= ctl->length;
5249 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5250 				/* deliver it to at least the delivery-q */
5251 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5252 				if (stcb->sctp_socket) {
5253 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5254 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5255 					    ctl,
5256 					    &stcb->sctp_socket->so_rcv, 1,
5257 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5258 
5259 				}
5260 				tt = strmin->last_sequence_delivered + 1;
5261 			} else {
5262 				/* Its a fragmented message */
5263 				if (ctl->first_frag_seen) {
5264 					/*
5265 					 * Make it so this is next to
5266 					 * deliver
5267 					 */
5268 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5269 					need_reasm_check = 1;
5270 					break;
5271 				}
5272 			}
5273 		} else {
5274 			break;
5275 		}
5276 	}
5277 	if (need_reasm_check) {
5278 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5279 	}
5280 }
5281 
5282 
5283 
5284 static void
5285 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5286     struct sctp_association *asoc,
5287     uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn)
5288 {
5289 	struct sctp_queued_to_read *control;
5290 	struct sctp_stream_in *strm;
5291 	struct sctp_tmit_chunk *chk, *nchk;
5292 	int cnt_removed = 0;
5293 
5294 	/*
5295 	 * For now large messages held on the stream reasm that are complete
5296 	 * will be tossed too. We could in theory do more work to spin
5297 	 * through and stop after dumping one msg aka seeing the start of a
5298 	 * new msg at the head, and call the delivery function... to see if
5299 	 * it can be delivered... But for now we just dump everything on the
5300 	 * queue.
5301 	 */
5302 	strm = &asoc->strmin[stream];
5303 	control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old);
5304 	if (control == NULL) {
5305 		/* Not found */
5306 		return;
5307 	}
5308 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5309 		/* Purge hanging chunks */
5310 		if (old && (ordered == 0)) {
5311 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) {
5312 				break;
5313 			}
5314 		}
5315 		cnt_removed++;
5316 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5317 		asoc->size_on_reasm_queue -= chk->send_size;
5318 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5319 		if (chk->data) {
5320 			sctp_m_freem(chk->data);
5321 			chk->data = NULL;
5322 		}
5323 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5324 	}
5325 	if (!TAILQ_EMPTY(&control->reasm)) {
5326 		/* This has to be old data, unordered */
5327 		if (control->data) {
5328 			sctp_m_freem(control->data);
5329 			control->data = NULL;
5330 		}
5331 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5332 		chk = TAILQ_FIRST(&control->reasm);
5333 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5334 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5335 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5336 			    chk, SCTP_READ_LOCK_HELD);
5337 		}
5338 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5339 		return;
5340 	}
5341 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5342 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5343 		control->on_strm_q = 0;
5344 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5345 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5346 		control->on_strm_q = 0;
5347 #ifdef INVARIANTS
5348 	} else if (control->on_strm_q) {
5349 		panic("strm: %p ctl: %p unknown %d",
5350 		    strm, control, control->on_strm_q);
5351 #endif
5352 	}
5353 	control->on_strm_q = 0;
5354 	if (control->on_read_q == 0) {
5355 		sctp_free_remote_addr(control->whoFrom);
5356 		if (control->data) {
5357 			sctp_m_freem(control->data);
5358 			control->data = NULL;
5359 		}
5360 		sctp_free_a_readq(stcb, control);
5361 	}
5362 }
5363 
5364 void
5365 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5366     struct sctp_forward_tsn_chunk *fwd,
5367     int *abort_flag, struct mbuf *m, int offset)
5368 {
5369 	/* The pr-sctp fwd tsn */
5370 	/*
5371 	 * here we will perform all the data receiver side steps for
5372 	 * processing FwdTSN, as required in by pr-sctp draft:
5373 	 *
5374 	 * Assume we get FwdTSN(x):
5375 	 *
5376 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5377 	 * others we have 3) examine and update re-ordering queue on
5378 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5379 	 * report where we are.
5380 	 */
5381 	struct sctp_association *asoc;
5382 	uint32_t new_cum_tsn, gap;
5383 	unsigned int i, fwd_sz, m_size;
5384 	uint32_t str_seq;
5385 	struct sctp_stream_in *strm;
5386 	struct sctp_queued_to_read *ctl, *sv;
5387 
5388 	asoc = &stcb->asoc;
5389 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5390 		SCTPDBG(SCTP_DEBUG_INDATA1,
5391 		    "Bad size too small/big fwd-tsn\n");
5392 		return;
5393 	}
5394 	m_size = (stcb->asoc.mapping_array_size << 3);
5395 	/*************************************************************/
5396 	/* 1. Here we update local cumTSN and shift the bitmap array */
5397 	/*************************************************************/
5398 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5399 
5400 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5401 		/* Already got there ... */
5402 		return;
5403 	}
5404 	/*
5405 	 * now we know the new TSN is more advanced, let's find the actual
5406 	 * gap
5407 	 */
5408 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5409 	asoc->cumulative_tsn = new_cum_tsn;
5410 	if (gap >= m_size) {
5411 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5412 			struct mbuf *op_err;
5413 			char msg[SCTP_DIAG_INFO_LEN];
5414 
5415 			/*
5416 			 * out of range (of single byte chunks in the rwnd I
5417 			 * give out). This must be an attacker.
5418 			 */
5419 			*abort_flag = 1;
5420 			snprintf(msg, sizeof(msg),
5421 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5422 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5423 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5424 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5425 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5426 			return;
5427 		}
5428 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5429 
5430 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5431 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5432 		asoc->highest_tsn_inside_map = new_cum_tsn;
5433 
5434 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5435 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5436 
5437 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5438 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5439 		}
5440 	} else {
5441 		SCTP_TCB_LOCK_ASSERT(stcb);
5442 		for (i = 0; i <= gap; i++) {
5443 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5444 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5445 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5446 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5447 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5448 				}
5449 			}
5450 		}
5451 	}
5452 	/*************************************************************/
5453 	/* 2. Clear up re-assembly queue                             */
5454 	/*************************************************************/
5455 
5456 	/* This is now done as part of clearing up the stream/seq */
5457 	if (asoc->idata_supported == 0) {
5458 		uint16_t sid;
5459 
5460 		/* Flush all the un-ordered data based on cum-tsn */
5461 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5462 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5463 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn);
5464 		}
5465 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5466 	}
5467 	/*******************************************************/
5468 	/* 3. Update the PR-stream re-ordering queues and fix  */
5469 	/* delivery issues as needed.                       */
5470 	/*******************************************************/
5471 	fwd_sz -= sizeof(*fwd);
5472 	if (m && fwd_sz) {
5473 		/* New method. */
5474 		unsigned int num_str;
5475 		uint32_t sequence;
5476 		uint16_t stream;
5477 		uint16_t ordered, flags;
5478 		int old;
5479 		struct sctp_strseq *stseq, strseqbuf;
5480 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5481 
5482 		offset += sizeof(*fwd);
5483 
5484 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5485 		if (asoc->idata_supported) {
5486 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5487 			old = 0;
5488 		} else {
5489 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5490 			old = 1;
5491 		}
5492 		for (i = 0; i < num_str; i++) {
5493 			if (asoc->idata_supported) {
5494 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5495 				    sizeof(struct sctp_strseq_mid),
5496 				    (uint8_t *) & strseqbuf_m);
5497 				offset += sizeof(struct sctp_strseq_mid);
5498 				if (stseq_m == NULL) {
5499 					break;
5500 				}
5501 				stream = ntohs(stseq_m->stream);
5502 				sequence = ntohl(stseq_m->msg_id);
5503 				flags = ntohs(stseq_m->flags);
5504 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5505 					ordered = 0;
5506 				} else {
5507 					ordered = 1;
5508 				}
5509 			} else {
5510 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5511 				    sizeof(struct sctp_strseq),
5512 				    (uint8_t *) & strseqbuf);
5513 				offset += sizeof(struct sctp_strseq);
5514 				if (stseq == NULL) {
5515 					break;
5516 				}
5517 				stream = ntohs(stseq->stream);
5518 				sequence = (uint32_t) ntohs(stseq->sequence);
5519 				ordered = 1;
5520 			}
5521 			/* Convert */
5522 
5523 			/* now process */
5524 
5525 			/*
5526 			 * Ok we now look for the stream/seq on the read
5527 			 * queue where its not all delivered. If we find it
5528 			 * we transmute the read entry into a PDI_ABORTED.
5529 			 */
5530 			if (stream >= asoc->streamincnt) {
5531 				/* screwed up streams, stop!  */
5532 				break;
5533 			}
5534 			if ((asoc->str_of_pdapi == stream) &&
5535 			    (asoc->ssn_of_pdapi == sequence)) {
5536 				/*
5537 				 * If this is the one we were partially
5538 				 * delivering now then we no longer are.
5539 				 * Note this will change with the reassembly
5540 				 * re-write.
5541 				 */
5542 				asoc->fragmented_delivery_inprogress = 0;
5543 			}
5544 			strm = &asoc->strmin[stream];
5545 			if (asoc->idata_supported == 0) {
5546 				uint16_t strm_at;
5547 
5548 				for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) {
5549 					sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5550 				}
5551 			} else {
5552 				uint32_t strm_at;
5553 
5554 				for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) {
5555 					sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5556 				}
5557 			}
5558 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5559 				if ((ctl->sinfo_stream == stream) &&
5560 				    (ctl->sinfo_ssn == sequence)) {
5561 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5562 					ctl->pdapi_aborted = 1;
5563 					sv = stcb->asoc.control_pdapi;
5564 					ctl->end_added = 1;
5565 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5566 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5567 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5568 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5569 #ifdef INVARIANTS
5570 					} else if (ctl->on_strm_q) {
5571 						panic("strm: %p ctl: %p unknown %d",
5572 						    strm, ctl, ctl->on_strm_q);
5573 #endif
5574 					}
5575 					ctl->on_strm_q = 0;
5576 					stcb->asoc.control_pdapi = ctl;
5577 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5578 					    stcb,
5579 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5580 					    (void *)&str_seq,
5581 					    SCTP_SO_NOT_LOCKED);
5582 					stcb->asoc.control_pdapi = sv;
5583 					break;
5584 				} else if ((ctl->sinfo_stream == stream) &&
5585 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5586 					/* We are past our victim SSN */
5587 					break;
5588 				}
5589 			}
5590 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5591 				/* Update the sequence number */
5592 				strm->last_sequence_delivered = sequence;
5593 			}
5594 			/* now kick the stream the new way */
5595 			/* sa_ignore NO_NULL_CHK */
5596 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5597 		}
5598 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5599 	}
5600 	/*
5601 	 * Now slide thing forward.
5602 	 */
5603 	sctp_slide_mapping_arrays(stcb);
5604 }
5605