xref: /freebsd/sys/netinet/sctp_indata.c (revision 59c3cb81c1769fdb6c840c971df129b52f4a848d)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_stream_in *strm,
417     struct sctp_queued_to_read *control,
418     struct sctp_tmit_chunk *chk,
419     int *abort_flag, int opspot)
420 {
421 	char msg[SCTP_DIAG_INFO_LEN];
422 	struct mbuf *oper;
423 
424 	if (stcb->asoc.idata_supported) {
425 		snprintf(msg, sizeof(msg),
426 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 		    opspot,
428 		    control->fsn_included,
429 		    chk->rec.data.TSN_seq,
430 		    chk->rec.data.stream_number,
431 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
432 	} else {
433 		snprintf(msg, sizeof(msg),
434 		    "Reass %x, CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x, SSN:%4.4x",
435 		    opspot,
436 		    control->fsn_included,
437 		    chk->rec.data.TSN_seq,
438 		    chk->rec.data.stream_number,
439 		    chk->rec.data.fsn_num,
440 		    (uint16_t) chk->rec.data.stream_seq);
441 	}
442 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 	sctp_m_freem(chk->data);
444 	chk->data = NULL;
445 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
448 	*abort_flag = 1;
449 }
450 
451 static void
452 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
453 {
454 	/*
455 	 * The control could not be placed and must be cleaned.
456 	 */
457 	struct sctp_tmit_chunk *chk, *nchk;
458 
459 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 		if (chk->data)
462 			sctp_m_freem(chk->data);
463 		chk->data = NULL;
464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 	}
466 	sctp_free_a_readq(stcb, control);
467 }
468 
469 /*
470  * Queue the chunk either right into the socket buffer if it is the next one
471  * to go OR put it in the correct place in the delivery queue.  If we do
472  * append to the so_buf, keep doing so until we are out of order as
473  * long as the control's entered are non-fragmented.
474  */
475 static void
476 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
477     struct sctp_stream_in *strm,
478     struct sctp_association *asoc,
479     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
480 {
481 	/*
482 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
483 	 * all the data in one stream this could happen quite rapidly. One
484 	 * could use the TSN to keep track of things, but this scheme breaks
485 	 * down in the other type of stream useage that could occur. Send a
486 	 * single msg to stream 0, send 4Billion messages to stream 1, now
487 	 * send a message to stream 0. You have a situation where the TSN
488 	 * has wrapped but not in the stream. Is this worth worrying about
489 	 * or should we just change our queue sort at the bottom to be by
490 	 * TSN.
491 	 *
492 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
493 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
494 	 * assignment this could happen... and I don't see how this would be
495 	 * a violation. So for now I am undecided an will leave the sort by
496 	 * SSN alone. Maybe a hybred approach is the answer
497 	 *
498 	 */
499 	struct sctp_queued_to_read *at;
500 	int queue_needed;
501 	uint32_t nxt_todel;
502 	struct mbuf *op_err;
503 	char msg[SCTP_DIAG_INFO_LEN];
504 
505 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
506 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
507 	}
508 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
509 		/* The incoming sseq is behind where we last delivered? */
510 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
511 		    control->sinfo_ssn, strm->last_sequence_delivered);
512 protocol_error:
513 		/*
514 		 * throw it in the stream so it gets cleaned up in
515 		 * association destruction
516 		 */
517 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
518 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
519 		    strm->last_sequence_delivered, control->sinfo_tsn,
520 		    control->sinfo_stream, control->sinfo_ssn);
521 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
522 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
523 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
524 		*abort_flag = 1;
525 		return;
526 
527 	}
528 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
529 		goto protocol_error;
530 	}
531 	queue_needed = 1;
532 	asoc->size_on_all_streams += control->length;
533 	sctp_ucount_incr(asoc->cnt_on_all_streams);
534 	nxt_todel = strm->last_sequence_delivered + 1;
535 	if (nxt_todel == control->sinfo_ssn) {
536 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
537 		struct socket *so;
538 
539 		so = SCTP_INP_SO(stcb->sctp_ep);
540 		atomic_add_int(&stcb->asoc.refcnt, 1);
541 		SCTP_TCB_UNLOCK(stcb);
542 		SCTP_SOCKET_LOCK(so, 1);
543 		SCTP_TCB_LOCK(stcb);
544 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
545 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
546 			SCTP_SOCKET_UNLOCK(so, 1);
547 			return;
548 		}
549 #endif
550 		/* can be delivered right away? */
551 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
552 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
553 		}
554 		/* EY it wont be queued if it could be delivered directly */
555 		queue_needed = 0;
556 		asoc->size_on_all_streams -= control->length;
557 		sctp_ucount_decr(asoc->cnt_on_all_streams);
558 		strm->last_sequence_delivered++;
559 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
560 		sctp_add_to_readq(stcb->sctp_ep, stcb,
561 		    control,
562 		    &stcb->sctp_socket->so_rcv, 1,
563 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
564 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
565 			/* all delivered */
566 			nxt_todel = strm->last_sequence_delivered + 1;
567 			if ((nxt_todel == control->sinfo_ssn) &&
568 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
569 				asoc->size_on_all_streams -= control->length;
570 				sctp_ucount_decr(asoc->cnt_on_all_streams);
571 				if (control->on_strm_q == SCTP_ON_ORDERED) {
572 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
573 				} else {
574 					panic("Huh control:%p is on_strm_q:%d",
575 					    control, control->on_strm_q);
576 				}
577 				control->on_strm_q = 0;
578 				strm->last_sequence_delivered++;
579 				/*
580 				 * We ignore the return of deliver_data here
581 				 * since we always can hold the chunk on the
582 				 * d-queue. And we have a finite number that
583 				 * can be delivered from the strq.
584 				 */
585 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
586 					sctp_log_strm_del(control, NULL,
587 					    SCTP_STR_LOG_FROM_IMMED_DEL);
588 				}
589 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
590 				sctp_add_to_readq(stcb->sctp_ep, stcb,
591 				    control,
592 				    &stcb->sctp_socket->so_rcv, 1,
593 				    SCTP_READ_LOCK_NOT_HELD,
594 				    SCTP_SO_NOT_LOCKED);
595 				continue;
596 			} else if (nxt_todel == control->sinfo_ssn) {
597 				*need_reasm = 1;
598 			}
599 			break;
600 		}
601 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
602 		SCTP_SOCKET_UNLOCK(so, 1);
603 #endif
604 	}
605 	if (queue_needed) {
606 		/*
607 		 * Ok, we did not deliver this guy, find the correct place
608 		 * to put it on the queue.
609 		 */
610 		if (sctp_place_control_in_stream(strm, asoc, control)) {
611 			char msg[SCTP_DIAG_INFO_LEN];
612 			struct mbuf *oper;
613 
614 			snprintf(msg, sizeof(msg),
615 			    "Queue to str mid:%d duplicate",
616 			    control->msg_id);
617 			clean_up_control(stcb, control);
618 			oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
619 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
620 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
621 			*abort_flag = 1;
622 		}
623 	}
624 }
625 
626 
627 static void
628 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
629 {
630 	struct mbuf *m, *prev = NULL;
631 	struct sctp_tcb *stcb;
632 
633 	stcb = control->stcb;
634 	control->held_length = 0;
635 	control->length = 0;
636 	m = control->data;
637 	while (m) {
638 		if (SCTP_BUF_LEN(m) == 0) {
639 			/* Skip mbufs with NO length */
640 			if (prev == NULL) {
641 				/* First one */
642 				control->data = sctp_m_free(m);
643 				m = control->data;
644 			} else {
645 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
646 				m = SCTP_BUF_NEXT(prev);
647 			}
648 			if (m == NULL) {
649 				control->tail_mbuf = prev;
650 			}
651 			continue;
652 		}
653 		prev = m;
654 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
655 		if (control->on_read_q) {
656 			/*
657 			 * On read queue so we must increment the SB stuff,
658 			 * we assume caller has done any locks of SB.
659 			 */
660 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
661 		}
662 		m = SCTP_BUF_NEXT(m);
663 	}
664 	if (prev) {
665 		control->tail_mbuf = prev;
666 	}
667 }
668 
669 static void
670 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
671 {
672 	struct mbuf *prev = NULL;
673 	struct sctp_tcb *stcb;
674 
675 	stcb = control->stcb;
676 	if (stcb == NULL) {
677 		panic("Control broken");
678 	}
679 	if (control->tail_mbuf == NULL) {
680 		/* TSNH */
681 		control->data = m;
682 		sctp_setup_tail_pointer(control);
683 		return;
684 	}
685 	control->tail_mbuf->m_next = m;
686 	while (m) {
687 		if (SCTP_BUF_LEN(m) == 0) {
688 			/* Skip mbufs with NO length */
689 			if (prev == NULL) {
690 				/* First one */
691 				control->tail_mbuf->m_next = sctp_m_free(m);
692 				m = control->tail_mbuf->m_next;
693 			} else {
694 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
695 				m = SCTP_BUF_NEXT(prev);
696 			}
697 			if (m == NULL) {
698 				control->tail_mbuf = prev;
699 			}
700 			continue;
701 		}
702 		prev = m;
703 		if (control->on_read_q) {
704 			/*
705 			 * On read queue so we must increment the SB stuff,
706 			 * we assume caller has done any locks of SB.
707 			 */
708 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
709 		}
710 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
711 		m = SCTP_BUF_NEXT(m);
712 	}
713 	if (prev) {
714 		control->tail_mbuf = prev;
715 	}
716 }
717 
718 static void
719 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
720 {
721 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
722 	nc->sinfo_stream = control->sinfo_stream;
723 	nc->sinfo_ssn = control->sinfo_ssn;
724 	TAILQ_INIT(&nc->reasm);
725 	nc->top_fsn = control->top_fsn;
726 	nc->msg_id = control->msg_id;
727 	nc->sinfo_flags = control->sinfo_flags;
728 	nc->sinfo_ppid = control->sinfo_ppid;
729 	nc->sinfo_context = control->sinfo_context;
730 	nc->fsn_included = 0xffffffff;
731 	nc->sinfo_tsn = control->sinfo_tsn;
732 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
733 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
734 	nc->whoFrom = control->whoFrom;
735 	atomic_add_int(&nc->whoFrom->ref_count, 1);
736 	nc->stcb = control->stcb;
737 	nc->port_from = control->port_from;
738 }
739 
740 static int
741 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
742     struct sctp_queued_to_read *control, uint32_t pd_point)
743 {
744 	/*
745 	 * Special handling for the old un-ordered data chunk. All the
746 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
747 	 * watching to see if we have it all. If you return one, no other
748 	 * control entries on the un-ordered queue will be looked at. In
749 	 * theory there should be no others entries in reality, unless the
750 	 * guy is sending both unordered NDATA and unordered DATA...
751 	 */
752 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
753 	uint32_t fsn;
754 	struct sctp_queued_to_read *nc = NULL;
755 	int cnt_added;
756 
757 	if (control->first_frag_seen == 0) {
758 		/* Nothing we can do, we have not seen the first piece yet */
759 		return (1);
760 	}
761 	/* Collapse any we can */
762 	cnt_added = 0;
763 restart:
764 	fsn = control->fsn_included + 1;
765 	/* Now what can we add? */
766 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
767 		if (chk->rec.data.fsn_num == fsn) {
768 			/* Ok lets add it */
769 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
770 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
771 			fsn++;
772 			cnt_added++;
773 			chk = NULL;
774 			if (control->end_added) {
775 				/* We are done */
776 				if (!TAILQ_EMPTY(&control->reasm)) {
777 					/*
778 					 * Ok we have to move anything left
779 					 * on the control queue to a new
780 					 * control.
781 					 */
782 					sctp_alloc_a_readq(stcb, nc);
783 					sctp_build_readq_entry_from_ctl(nc, control);
784 					tchk = TAILQ_FIRST(&control->reasm);
785 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
786 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
787 						nc->first_frag_seen = 1;
788 						nc->fsn_included = tchk->rec.data.fsn_num;
789 						nc->data = tchk->data;
790 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
791 						tchk->data = NULL;
792 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
793 						sctp_setup_tail_pointer(nc);
794 						tchk = TAILQ_FIRST(&control->reasm);
795 					}
796 					/* Spin the rest onto the queue */
797 					while (tchk) {
798 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
799 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
800 						tchk = TAILQ_FIRST(&control->reasm);
801 					}
802 					/*
803 					 * Now lets add it to the queue
804 					 * after removing control
805 					 */
806 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
807 					nc->on_strm_q = SCTP_ON_UNORDERED;
808 					if (control->on_strm_q) {
809 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
810 						control->on_strm_q = 0;
811 					}
812 				}
813 				if (control->on_read_q == 0) {
814 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
815 					    &stcb->sctp_socket->so_rcv, control->end_added,
816 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
817 				}
818 				if (control->pdapi_started) {
819 					strm->pd_api_started = 0;
820 					control->pdapi_started = 0;
821 				}
822 				if (control->on_strm_q) {
823 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
824 					control->on_strm_q = 0;
825 				}
826 				sctp_wakeup_the_read_socket(stcb->sctp_ep);
827 				if ((nc) && (nc->first_frag_seen)) {
828 					/*
829 					 * Switch to the new guy and
830 					 * continue
831 					 */
832 					control = nc;
833 					nc = NULL;
834 					goto restart;
835 				}
836 				return (1);
837 			}
838 		} else {
839 			/* Can't add more */
840 			break;
841 		}
842 	}
843 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
844 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
845 		    &stcb->sctp_socket->so_rcv, control->end_added,
846 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
847 		strm->pd_api_started = 1;
848 		control->pdapi_started = 1;
849 		sctp_wakeup_the_read_socket(stcb->sctp_ep);
850 		return (0);
851 	} else {
852 		return (1);
853 	}
854 }
855 
856 static void
857 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
858     struct sctp_stream_in *strm,
859     struct sctp_queued_to_read *control,
860     struct sctp_tmit_chunk *chk,
861     int *abort_flag)
862 {
863 	struct sctp_tmit_chunk *at;
864 	int inserted = 0;
865 
866 	/*
867 	 * Here we need to place the chunk into the control structure sorted
868 	 * in the correct order.
869 	 */
870 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
871 		/* Its the very first one. */
872 		SCTPDBG(SCTP_DEBUG_XXX,
873 		    "chunk is a first fsn:%d becomes fsn_included\n",
874 		    chk->rec.data.fsn_num);
875 		if (control->first_frag_seen) {
876 			/*
877 			 * In old un-ordered we can reassembly on one
878 			 * control multiple messages. As long as the next
879 			 * FIRST is greater then the old first (TSN i.e. FSN
880 			 * wise)
881 			 */
882 			struct mbuf *tdata;
883 			uint32_t tmp;
884 
885 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
886 				/*
887 				 * Easy way the start of a new guy beyond
888 				 * the lowest
889 				 */
890 				goto place_chunk;
891 			}
892 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
893 			    (control->pdapi_started)) {
894 				/*
895 				 * Ok this should not happen, if it does we
896 				 * started the pd-api on the higher TSN
897 				 * (since the equals part is a TSN failure
898 				 * it must be that).
899 				 *
900 				 * We are completly hosed in that case since I
901 				 * have no way to recover. This really will
902 				 * only happen if we can get more TSN's
903 				 * higher before the pd-api-point.
904 				 */
905 				sctp_abort_in_reasm(stcb, strm, control, chk,
906 				    abort_flag,
907 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
908 
909 				return;
910 			}
911 			/*
912 			 * Ok we have two firsts and the one we just got is
913 			 * smaller than the one we previously placed.. yuck!
914 			 * We must swap them out.
915 			 */
916 			/* swap the mbufs */
917 			tdata = control->data;
918 			control->data = chk->data;
919 			chk->data = tdata;
920 			/* Swap the lengths */
921 			tmp = control->length;
922 			control->length = chk->send_size;
923 			chk->send_size = tmp;
924 			/* Fix the FSN included */
925 			tmp = control->fsn_included;
926 			control->fsn_included = chk->rec.data.fsn_num;
927 			chk->rec.data.fsn_num = tmp;
928 			goto place_chunk;
929 		}
930 		control->first_frag_seen = 1;
931 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
932 		control->data = chk->data;
933 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
934 		chk->data = NULL;
935 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
936 		sctp_setup_tail_pointer(control);
937 		return;
938 	}
939 place_chunk:
940 	if (TAILQ_EMPTY(&control->reasm)) {
941 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
942 		asoc->size_on_reasm_queue += chk->send_size;
943 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
944 		return;
945 	}
946 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
947 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
948 			/*
949 			 * This one in queue is bigger than the new one,
950 			 * insert the new one before at.
951 			 */
952 			asoc->size_on_reasm_queue += chk->send_size;
953 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
954 			inserted = 1;
955 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
956 			break;
957 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
958 			/*
959 			 * They sent a duplicate fsn number. This really
960 			 * should not happen since the FSN is a TSN and it
961 			 * should have been dropped earlier.
962 			 */
963 			if (chk->data) {
964 				sctp_m_freem(chk->data);
965 				chk->data = NULL;
966 			}
967 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
968 			sctp_abort_in_reasm(stcb, strm, control, chk,
969 			    abort_flag,
970 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
971 			return;
972 		}
973 	}
974 	if (inserted == 0) {
975 		/* Its at the end */
976 		asoc->size_on_reasm_queue += chk->send_size;
977 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
978 		control->top_fsn = chk->rec.data.fsn_num;
979 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
980 	}
981 }
982 
983 static int
984 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
985 {
986 	/*
987 	 * Given a stream, strm, see if any of the SSN's on it that are
988 	 * fragmented are ready to deliver. If so go ahead and place them on
989 	 * the read queue. In so placing if we have hit the end, then we
990 	 * need to remove them from the stream's queue.
991 	 */
992 	struct sctp_queued_to_read *control, *nctl = NULL;
993 	uint32_t next_to_del;
994 	uint32_t pd_point;
995 	int ret = 0;
996 
997 	if (stcb->sctp_socket) {
998 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
999 		    stcb->sctp_ep->partial_delivery_point);
1000 	} else {
1001 		pd_point = stcb->sctp_ep->partial_delivery_point;
1002 	}
1003 	control = TAILQ_FIRST(&strm->uno_inqueue);
1004 	if ((control) &&
1005 	    (asoc->idata_supported == 0)) {
1006 		/* Special handling needed for "old" data format */
1007 		nctl = TAILQ_NEXT(control, next_instrm);
1008 		if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1009 			goto done_un;
1010 		}
1011 	}
1012 	if (strm->pd_api_started) {
1013 		/* Can't add more */
1014 		return (0);
1015 	}
1016 	while (control) {
1017 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d -uo\n",
1018 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1019 		nctl = TAILQ_NEXT(control, next_instrm);
1020 		if (control->end_added) {
1021 			/* We just put the last bit on */
1022 			if (control->on_strm_q) {
1023 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1024 					panic("Huh control:%p on_q:%d -- not unordered?",
1025 					    control, control->on_strm_q);
1026 				}
1027 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1028 				control->on_strm_q = 0;
1029 			}
1030 			if (control->on_read_q == 0) {
1031 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1032 				    control,
1033 				    &stcb->sctp_socket->so_rcv, control->end_added,
1034 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1035 			}
1036 		} else {
1037 			/* Can we do a PD-API for this un-ordered guy? */
1038 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1039 				strm->pd_api_started = 1;
1040 				control->pdapi_started = 1;
1041 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1042 				    control,
1043 				    &stcb->sctp_socket->so_rcv, control->end_added,
1044 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1045 
1046 				break;
1047 			}
1048 		}
1049 		control = nctl;
1050 	}
1051 done_un:
1052 	control = TAILQ_FIRST(&strm->inqueue);
1053 	if (strm->pd_api_started) {
1054 		/* Can't add more */
1055 		return (0);
1056 	}
1057 	if (control == NULL) {
1058 		return (ret);
1059 	}
1060 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1061 		/*
1062 		 * Ok the guy at the top was being partially delivered
1063 		 * completed, so we remove it. Note the pd_api flag was
1064 		 * taken off when the chunk was merged on in
1065 		 * sctp_queue_data_for_reasm below.
1066 		 */
1067 		nctl = TAILQ_NEXT(control, next_instrm);
1068 		SCTPDBG(SCTP_DEBUG_XXX,
1069 		    "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d (lastdel:%d)- o\n",
1070 		    control, control->end_added, control->sinfo_ssn,
1071 		    control->top_fsn, control->fsn_included,
1072 		    strm->last_sequence_delivered);
1073 		if (control->end_added) {
1074 			if (control->on_strm_q) {
1075 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1076 					panic("Huh control:%p on_q:%d -- not ordered?",
1077 					    control, control->on_strm_q);
1078 				}
1079 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1080 				control->on_strm_q = 0;
1081 			}
1082 			if (control->on_read_q == 0) {
1083 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1084 				    control,
1085 				    &stcb->sctp_socket->so_rcv, control->end_added,
1086 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1087 			}
1088 			if (strm->pd_api_started && control->pdapi_started) {
1089 				control->pdapi_started = 0;
1090 				strm->pd_api_started = 0;
1091 			}
1092 			control = nctl;
1093 		}
1094 	}
1095 	if (strm->pd_api_started) {
1096 		/*
1097 		 * Can't add more must have gotten an un-ordered above being
1098 		 * partially delivered.
1099 		 */
1100 		return (0);
1101 	}
1102 deliver_more:
1103 	next_to_del = strm->last_sequence_delivered + 1;
1104 	if (control) {
1105 		SCTPDBG(SCTP_DEBUG_XXX,
1106 		    "Looking at control:%p e(%d) ssn:%d top_fsn:%d inc_fsn:%d (nxtdel:%d)- o\n",
1107 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1108 		    next_to_del);
1109 		nctl = TAILQ_NEXT(control, next_instrm);
1110 		if ((control->sinfo_ssn == next_to_del) &&
1111 		    (control->first_frag_seen)) {
1112 			/* Ok we can deliver it onto the stream. */
1113 			if (control->end_added) {
1114 				/* We are done with it afterwards */
1115 				if (control->on_strm_q) {
1116 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1117 						panic("Huh control:%p on_q:%d -- not ordered?",
1118 						    control, control->on_strm_q);
1119 					}
1120 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1121 					control->on_strm_q = 0;
1122 				}
1123 				ret++;
1124 			}
1125 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1126 				/*
1127 				 * A singleton now slipping through - mark
1128 				 * it non-revokable too
1129 				 */
1130 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1131 			} else if (control->end_added == 0) {
1132 				/*
1133 				 * Check if we can defer adding until its
1134 				 * all there
1135 				 */
1136 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1137 					/*
1138 					 * Don't need it or cannot add more
1139 					 * (one being delivered that way)
1140 					 */
1141 					goto out;
1142 				}
1143 			}
1144 			if (control->on_read_q == 0) {
1145 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1146 				    control,
1147 				    &stcb->sctp_socket->so_rcv, control->end_added,
1148 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1149 			}
1150 			strm->last_sequence_delivered = next_to_del;
1151 			if ((control->end_added) && (control->last_frag_seen)) {
1152 				control = nctl;
1153 				goto deliver_more;
1154 			} else {
1155 				/* We are now doing PD API */
1156 				strm->pd_api_started = 1;
1157 				control->pdapi_started = 1;
1158 			}
1159 		}
1160 	}
1161 out:
1162 	return (ret);
1163 }
1164 
1165 void
1166 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1167     struct sctp_stream_in *strm,
1168     struct sctp_tcb *stcb, struct sctp_association *asoc,
1169     struct sctp_tmit_chunk *chk)
1170 {
1171 	/*
1172 	 * Given a control and a chunk, merge the data from the chk onto the
1173 	 * control and free up the chunk resources.
1174 	 */
1175 	int i_locked = 0;
1176 
1177 	if (control->on_read_q) {
1178 		/*
1179 		 * Its being pd-api'd so we must do some locks.
1180 		 */
1181 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1182 		i_locked = 1;
1183 	}
1184 	if (control->data == NULL) {
1185 		control->data = chk->data;
1186 		sctp_setup_tail_pointer(control);
1187 	} else {
1188 		sctp_add_to_tail_pointer(control, chk->data);
1189 	}
1190 	control->fsn_included = chk->rec.data.fsn_num;
1191 	asoc->size_on_reasm_queue -= chk->send_size;
1192 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1193 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1194 	chk->data = NULL;
1195 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1196 		control->first_frag_seen = 1;
1197 	}
1198 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1199 		/* Its complete */
1200 		if ((control->on_strm_q) && (control->on_read_q)) {
1201 			if (control->pdapi_started) {
1202 				control->pdapi_started = 0;
1203 				strm->pd_api_started = 0;
1204 			}
1205 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1206 				/* Unordered */
1207 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1208 				control->on_strm_q = 0;
1209 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1210 				/* Ordered */
1211 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1212 				control->on_strm_q = 0;
1213 			} else if (control->on_strm_q) {
1214 				panic("Unknown state on ctrl:%p on_strm_q:%d", control,
1215 				    control->on_strm_q);
1216 			}
1217 		}
1218 		control->end_added = 1;
1219 		control->last_frag_seen = 1;
1220 	}
1221 	if (i_locked) {
1222 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1223 	}
1224 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1225 }
1226 
1227 /*
1228  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1229  * queue, see if anthing can be delivered. If so pull it off (or as much as
1230  * we can. If we run out of space then we must dump what we can and set the
1231  * appropriate flag to say we queued what we could.
1232  */
1233 static void
1234 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1235     struct sctp_stream_in *strm,
1236     struct sctp_queued_to_read *control,
1237     struct sctp_tmit_chunk *chk,
1238     int created_control,
1239     int *abort_flag, uint32_t tsn)
1240 {
1241 	uint32_t next_fsn;
1242 	struct sctp_tmit_chunk *at, *nat;
1243 	int cnt_added, unordered;
1244 
1245 	/*
1246 	 * For old un-ordered data chunks.
1247 	 */
1248 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1249 		unordered = 1;
1250 	} else {
1251 		unordered = 0;
1252 	}
1253 	/* Must be added to the stream-in queue */
1254 	if (created_control) {
1255 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1256 			/* Duplicate SSN? */
1257 			clean_up_control(stcb, control);
1258 			sctp_abort_in_reasm(stcb, strm, control, chk,
1259 			    abort_flag,
1260 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1261 			return;
1262 		}
1263 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1264 			/*
1265 			 * Ok we created this control and now lets validate
1266 			 * that its legal i.e. there is a B bit set, if not
1267 			 * and we have up to the cum-ack then its invalid.
1268 			 */
1269 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1270 				sctp_abort_in_reasm(stcb, strm, control, chk,
1271 				    abort_flag,
1272 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1273 				return;
1274 			}
1275 		}
1276 	}
1277 	if ((asoc->idata_supported == 0) && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1278 		sctp_inject_old_data_unordered(stcb, asoc, strm, control, chk, abort_flag);
1279 		return;
1280 	}
1281 	/*
1282 	 * Ok we must queue the chunk into the reasembly portion: o if its
1283 	 * the first it goes to the control mbuf. o if its not first but the
1284 	 * next in sequence it goes to the control, and each succeeding one
1285 	 * in order also goes. o if its not in order we place it on the list
1286 	 * in its place.
1287 	 */
1288 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1289 		/* Its the very first one. */
1290 		SCTPDBG(SCTP_DEBUG_XXX,
1291 		    "chunk is a first fsn:%d becomes fsn_included\n",
1292 		    chk->rec.data.fsn_num);
1293 		if (control->first_frag_seen) {
1294 			/*
1295 			 * Error on senders part, they either sent us two
1296 			 * data chunks with FIRST, or they sent two
1297 			 * un-ordered chunks that were fragmented at the
1298 			 * same time in the same stream.
1299 			 */
1300 			sctp_abort_in_reasm(stcb, strm, control, chk,
1301 			    abort_flag,
1302 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1303 			return;
1304 		}
1305 		control->first_frag_seen = 1;
1306 		control->fsn_included = chk->rec.data.fsn_num;
1307 		control->data = chk->data;
1308 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1309 		chk->data = NULL;
1310 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1311 		sctp_setup_tail_pointer(control);
1312 	} else {
1313 		/* Place the chunk in our list */
1314 		int inserted = 0;
1315 
1316 		if (control->last_frag_seen == 0) {
1317 			/* Still willing to raise highest FSN seen */
1318 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1319 				SCTPDBG(SCTP_DEBUG_XXX,
1320 				    "We have a new top_fsn:%d\n",
1321 				    chk->rec.data.fsn_num);
1322 				control->top_fsn = chk->rec.data.fsn_num;
1323 			}
1324 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1325 				SCTPDBG(SCTP_DEBUG_XXX,
1326 				    "The last fsn is now in place fsn:%d\n",
1327 				    chk->rec.data.fsn_num);
1328 				control->last_frag_seen = 1;
1329 			}
1330 			if (asoc->idata_supported || control->first_frag_seen) {
1331 				/*
1332 				 * For IDATA we always check since we know
1333 				 * that the first fragment is 0. For old
1334 				 * DATA we have to receive the first before
1335 				 * we knwo the first FSN (which is the TSN).
1336 				 */
1337 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1338 					/*
1339 					 * We have already delivered up to
1340 					 * this so its a dup
1341 					 */
1342 					sctp_abort_in_reasm(stcb, strm, control, chk,
1343 					    abort_flag,
1344 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1345 					return;
1346 				}
1347 			}
1348 		} else {
1349 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1350 				/* Second last? huh? */
1351 				SCTPDBG(SCTP_DEBUG_XXX,
1352 				    "Duplicate last fsn:%d (top:%d) -- abort\n",
1353 				    chk->rec.data.fsn_num, control->top_fsn);
1354 				sctp_abort_in_reasm(stcb, strm, control,
1355 				    chk, abort_flag,
1356 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1357 				return;
1358 			}
1359 			if (asoc->idata_supported || control->first_frag_seen) {
1360 				/*
1361 				 * For IDATA we always check since we know
1362 				 * that the first fragment is 0. For old
1363 				 * DATA we have to receive the first before
1364 				 * we knwo the first FSN (which is the TSN).
1365 				 */
1366 
1367 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1368 					/*
1369 					 * We have already delivered up to
1370 					 * this so its a dup
1371 					 */
1372 					SCTPDBG(SCTP_DEBUG_XXX,
1373 					    "New fsn:%d is already seen in included_fsn:%d -- abort\n",
1374 					    chk->rec.data.fsn_num, control->fsn_included);
1375 					sctp_abort_in_reasm(stcb, strm, control, chk,
1376 					    abort_flag,
1377 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1378 					return;
1379 				}
1380 			}
1381 			/*
1382 			 * validate not beyond top FSN if we have seen last
1383 			 * one
1384 			 */
1385 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1386 				SCTPDBG(SCTP_DEBUG_XXX,
1387 				    "New fsn:%d is beyond or at top_fsn:%d -- abort\n",
1388 				    chk->rec.data.fsn_num,
1389 				    control->top_fsn);
1390 				sctp_abort_in_reasm(stcb, strm, control, chk,
1391 				    abort_flag,
1392 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1393 				return;
1394 			}
1395 		}
1396 		/*
1397 		 * If we reach here, we need to place the new chunk in the
1398 		 * reassembly for this control.
1399 		 */
1400 		SCTPDBG(SCTP_DEBUG_XXX,
1401 		    "chunk is a not first fsn:%d needs to be inserted\n",
1402 		    chk->rec.data.fsn_num);
1403 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1404 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1405 				/*
1406 				 * This one in queue is bigger than the new
1407 				 * one, insert the new one before at.
1408 				 */
1409 				SCTPDBG(SCTP_DEBUG_XXX,
1410 				    "Insert it before fsn:%d\n",
1411 				    at->rec.data.fsn_num);
1412 				asoc->size_on_reasm_queue += chk->send_size;
1413 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1414 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1415 				inserted = 1;
1416 				break;
1417 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1418 				/*
1419 				 * Gak, He sent me a duplicate str seq
1420 				 * number
1421 				 */
1422 				/*
1423 				 * foo bar, I guess I will just free this
1424 				 * new guy, should we abort too? FIX ME
1425 				 * MAYBE? Or it COULD be that the SSN's have
1426 				 * wrapped. Maybe I should compare to TSN
1427 				 * somehow... sigh for now just blow away
1428 				 * the chunk!
1429 				 */
1430 				SCTPDBG(SCTP_DEBUG_XXX,
1431 				    "Duplicate to fsn:%d -- abort\n",
1432 				    at->rec.data.fsn_num);
1433 				sctp_abort_in_reasm(stcb, strm, control,
1434 				    chk, abort_flag,
1435 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1436 				return;
1437 			}
1438 		}
1439 		if (inserted == 0) {
1440 			/* Goes on the end */
1441 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn:%d\n",
1442 			    chk->rec.data.fsn_num);
1443 			asoc->size_on_reasm_queue += chk->send_size;
1444 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1445 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1446 		}
1447 	}
1448 	/*
1449 	 * Ok lets see if we can suck any up into the control structure that
1450 	 * are in seq if it makes sense.
1451 	 */
1452 	cnt_added = 0;
1453 	/*
1454 	 * If the first fragment has not been seen there is no sense in
1455 	 * looking.
1456 	 */
1457 	if (control->first_frag_seen) {
1458 		next_fsn = control->fsn_included + 1;
1459 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1460 			if (at->rec.data.fsn_num == next_fsn) {
1461 				/* We can add this one now to the control */
1462 				SCTPDBG(SCTP_DEBUG_XXX,
1463 				    "Adding more to control:%p at:%p fsn:%d next_fsn:%d included:%d\n",
1464 				    control, at,
1465 				    at->rec.data.fsn_num,
1466 				    next_fsn, control->fsn_included);
1467 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1468 				sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1469 				cnt_added++;
1470 				next_fsn++;
1471 				if (control->end_added && control->pdapi_started) {
1472 					if (strm->pd_api_started) {
1473 						strm->pd_api_started = 0;
1474 						control->pdapi_started = 0;
1475 					}
1476 					if (control->on_read_q == 0) {
1477 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1478 						    control,
1479 						    &stcb->sctp_socket->so_rcv, control->end_added,
1480 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1481 					}
1482 					break;
1483 				}
1484 			} else {
1485 				break;
1486 			}
1487 		}
1488 	}
1489 	if ((control->on_read_q) && (cnt_added > 0)) {
1490 		/* Need to wakeup the reader */
1491 		sctp_wakeup_the_read_socket(stcb->sctp_ep);
1492 	}
1493 }
1494 
1495 static struct sctp_queued_to_read *
1496 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1497 {
1498 	struct sctp_queued_to_read *reasm;
1499 
1500 	if (ordered) {
1501 		TAILQ_FOREACH(reasm, &strm->inqueue, next_instrm) {
1502 			if (reasm->msg_id == msg_id) {
1503 				break;
1504 			}
1505 		}
1506 	} else {
1507 		if (old) {
1508 			reasm = TAILQ_FIRST(&strm->uno_inqueue);
1509 			return (reasm);
1510 		}
1511 		TAILQ_FOREACH(reasm, &strm->uno_inqueue, next_instrm) {
1512 			if (reasm->msg_id == msg_id) {
1513 				break;
1514 			}
1515 		}
1516 	}
1517 	return (reasm);
1518 }
1519 
1520 
1521 static int
1522 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1523     struct mbuf **m, int offset, int chk_length,
1524     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1525     int *break_flag, int last_chunk, uint8_t chtype)
1526 {
1527 	/* Process a data chunk */
1528 	/* struct sctp_tmit_chunk *chk; */
1529 	struct sctp_data_chunk *ch;
1530 	struct sctp_idata_chunk *nch, chunk_buf;
1531 	struct sctp_tmit_chunk *chk;
1532 	uint32_t tsn, fsn, gap, msg_id;
1533 	struct mbuf *dmbuf;
1534 	int the_len;
1535 	int need_reasm_check = 0;
1536 	uint16_t strmno;
1537 	struct mbuf *op_err;
1538 	char msg[SCTP_DIAG_INFO_LEN];
1539 	struct sctp_queued_to_read *control = NULL;
1540 	uint32_t protocol_id;
1541 	uint8_t chunk_flags;
1542 	struct sctp_stream_reset_list *liste;
1543 	struct sctp_stream_in *strm;
1544 	int ordered;
1545 	size_t clen;
1546 	int created_control = 0;
1547 	uint8_t old_data;
1548 
1549 	chk = NULL;
1550 	if (chtype == SCTP_IDATA) {
1551 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1552 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1553 
1554 		ch = (struct sctp_data_chunk *)nch;
1555 		clen = sizeof(struct sctp_idata_chunk);
1556 		tsn = ntohl(ch->dp.tsn);
1557 		msg_id = ntohl(nch->dp.msg_id);
1558 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1559 			fsn = 0;
1560 		else
1561 			fsn = ntohl(nch->dp.fsn);
1562 		old_data = 0;
1563 	} else {
1564 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1565 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1566 
1567 		tsn = ntohl(ch->dp.tsn);
1568 		clen = sizeof(struct sctp_data_chunk);
1569 		fsn = tsn;
1570 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1571 		nch = NULL;
1572 		old_data = 1;
1573 	}
1574 	chunk_flags = ch->ch.chunk_flags;
1575 	if ((size_t)chk_length == clen) {
1576 		/*
1577 		 * Need to send an abort since we had a empty data chunk.
1578 		 */
1579 		struct mbuf *op_err;
1580 
1581 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1582 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1583 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1584 		*abort_flag = 1;
1585 		return (0);
1586 	}
1587 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1588 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1589 		asoc->send_sack = 1;
1590 	}
1591 	protocol_id = ch->dp.protocol_id;
1592 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1593 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1594 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1595 	}
1596 	if (stcb == NULL) {
1597 		return (0);
1598 	}
1599 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1600 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1601 		/* It is a duplicate */
1602 		SCTP_STAT_INCR(sctps_recvdupdata);
1603 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1604 			/* Record a dup for the next outbound sack */
1605 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1606 			asoc->numduptsns++;
1607 		}
1608 		asoc->send_sack = 1;
1609 		return (0);
1610 	}
1611 	/* Calculate the number of TSN's between the base and this TSN */
1612 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1613 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1614 		/* Can't hold the bit in the mapping at max array, toss it */
1615 		return (0);
1616 	}
1617 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1618 		SCTP_TCB_LOCK_ASSERT(stcb);
1619 		if (sctp_expand_mapping_array(asoc, gap)) {
1620 			/* Can't expand, drop it */
1621 			return (0);
1622 		}
1623 	}
1624 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1625 		*high_tsn = tsn;
1626 	}
1627 	/* See if we have received this one already */
1628 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1629 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1630 		SCTP_STAT_INCR(sctps_recvdupdata);
1631 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1632 			/* Record a dup for the next outbound sack */
1633 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1634 			asoc->numduptsns++;
1635 		}
1636 		asoc->send_sack = 1;
1637 		return (0);
1638 	}
1639 	/*
1640 	 * Check to see about the GONE flag, duplicates would cause a sack
1641 	 * to be sent up above
1642 	 */
1643 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1644 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1645 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1646 		/*
1647 		 * wait a minute, this guy is gone, there is no longer a
1648 		 * receiver. Send peer an ABORT!
1649 		 */
1650 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1651 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1652 		*abort_flag = 1;
1653 		return (0);
1654 	}
1655 	/*
1656 	 * Now before going further we see if there is room. If NOT then we
1657 	 * MAY let one through only IF this TSN is the one we are waiting
1658 	 * for on a partial delivery API.
1659 	 */
1660 
1661 	/* Is the stream valid? */
1662 	strmno = ntohs(ch->dp.stream_id);
1663 
1664 	if (strmno >= asoc->streamincnt) {
1665 		struct sctp_error_invalid_stream *cause;
1666 
1667 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1668 		    0, M_NOWAIT, 1, MT_DATA);
1669 		if (op_err != NULL) {
1670 			/* add some space up front so prepend will work well */
1671 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1672 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1673 			/*
1674 			 * Error causes are just param's and this one has
1675 			 * two back to back phdr, one with the error type
1676 			 * and size, the other with the streamid and a rsvd
1677 			 */
1678 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1679 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1680 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1681 			cause->stream_id = ch->dp.stream_id;
1682 			cause->reserved = htons(0);
1683 			sctp_queue_op_err(stcb, op_err);
1684 		}
1685 		SCTP_STAT_INCR(sctps_badsid);
1686 		SCTP_TCB_LOCK_ASSERT(stcb);
1687 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1688 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1689 			asoc->highest_tsn_inside_nr_map = tsn;
1690 		}
1691 		if (tsn == (asoc->cumulative_tsn + 1)) {
1692 			/* Update cum-ack */
1693 			asoc->cumulative_tsn = tsn;
1694 		}
1695 		return (0);
1696 	}
1697 	strm = &asoc->strmin[strmno];
1698 	/*
1699 	 * If its a fragmented message, lets see if we can find the control
1700 	 * on the reassembly queues.
1701 	 */
1702 	if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1703 		/*
1704 		 * The first *must* be fsn 0, and other (middle/end) pieces
1705 		 * can *not* be fsn 0.
1706 		 */
1707 		goto err_out;
1708 	}
1709 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1710 		/* See if we can find the re-assembly entity */
1711 		control = find_reasm_entry(strm, msg_id, ordered, old_data);
1712 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1713 		    chunk_flags, control);
1714 		if (control) {
1715 			/* We found something, does it belong? */
1716 			if (ordered && (msg_id != control->sinfo_ssn)) {
1717 		err_out:
1718 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1719 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1720 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1721 				*abort_flag = 1;
1722 				return (0);
1723 			}
1724 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1725 				/*
1726 				 * We can't have a switched order with an
1727 				 * unordered chunk
1728 				 */
1729 				goto err_out;
1730 			}
1731 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1732 				/*
1733 				 * We can't have a switched unordered with a
1734 				 * ordered chunk
1735 				 */
1736 				goto err_out;
1737 			}
1738 		}
1739 	} else {
1740 		/*
1741 		 * Its a complete segment. Lets validate we don't have a
1742 		 * re-assembly going on with the same Stream/Seq (for
1743 		 * ordered) or in the same Stream for unordered.
1744 		 */
1745 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1746 		    chunk_flags);
1747 		if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1748 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x dup detected on msg_id:%d\n",
1749 			    chunk_flags,
1750 			    msg_id);
1751 
1752 			goto err_out;
1753 		}
1754 	}
1755 	/* now do the tests */
1756 	if (((asoc->cnt_on_all_streams +
1757 	    asoc->cnt_on_reasm_queue +
1758 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1759 	    (((int)asoc->my_rwnd) <= 0)) {
1760 		/*
1761 		 * When we have NO room in the rwnd we check to make sure
1762 		 * the reader is doing its job...
1763 		 */
1764 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1765 			/* some to read, wake-up */
1766 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1767 			struct socket *so;
1768 
1769 			so = SCTP_INP_SO(stcb->sctp_ep);
1770 			atomic_add_int(&stcb->asoc.refcnt, 1);
1771 			SCTP_TCB_UNLOCK(stcb);
1772 			SCTP_SOCKET_LOCK(so, 1);
1773 			SCTP_TCB_LOCK(stcb);
1774 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1775 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1776 				/* assoc was freed while we were unlocked */
1777 				SCTP_SOCKET_UNLOCK(so, 1);
1778 				return (0);
1779 			}
1780 #endif
1781 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1782 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1783 			SCTP_SOCKET_UNLOCK(so, 1);
1784 #endif
1785 		}
1786 		/* now is it in the mapping array of what we have accepted? */
1787 		if (nch == NULL) {
1788 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1789 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1790 				/* Nope not in the valid range dump it */
1791 		dump_packet:
1792 				sctp_set_rwnd(stcb, asoc);
1793 				if ((asoc->cnt_on_all_streams +
1794 				    asoc->cnt_on_reasm_queue +
1795 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1796 					SCTP_STAT_INCR(sctps_datadropchklmt);
1797 				} else {
1798 					SCTP_STAT_INCR(sctps_datadroprwnd);
1799 				}
1800 				*break_flag = 1;
1801 				return (0);
1802 			}
1803 		} else {
1804 			if (control == NULL) {
1805 				goto dump_packet;
1806 			}
1807 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1808 				goto dump_packet;
1809 			}
1810 		}
1811 	}
1812 #ifdef SCTP_ASOCLOG_OF_TSNS
1813 	SCTP_TCB_LOCK_ASSERT(stcb);
1814 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1815 		asoc->tsn_in_at = 0;
1816 		asoc->tsn_in_wrapped = 1;
1817 	}
1818 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1819 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1820 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1821 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1822 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1823 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1824 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1825 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1826 	asoc->tsn_in_at++;
1827 #endif
1828 	/*
1829 	 * Before we continue lets validate that we are not being fooled by
1830 	 * an evil attacker. We can only have Nk chunks based on our TSN
1831 	 * spread allowed by the mapping array N * 8 bits, so there is no
1832 	 * way our stream sequence numbers could have wrapped. We of course
1833 	 * only validate the FIRST fragment so the bit must be set.
1834 	 */
1835 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1836 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1837 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1838 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1839 		/* The incoming sseq is behind where we last delivered? */
1840 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1841 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1842 
1843 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1844 		    asoc->strmin[strmno].last_sequence_delivered,
1845 		    tsn, strmno, msg_id);
1846 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1847 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1848 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1849 		*abort_flag = 1;
1850 		return (0);
1851 	}
1852 	/************************************
1853 	 * From here down we may find ch-> invalid
1854 	 * so its a good idea NOT to use it.
1855 	 *************************************/
1856 	if (nch) {
1857 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1858 	} else {
1859 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1860 	}
1861 	if (last_chunk == 0) {
1862 		if (nch) {
1863 			dmbuf = SCTP_M_COPYM(*m,
1864 			    (offset + sizeof(struct sctp_idata_chunk)),
1865 			    the_len, M_NOWAIT);
1866 		} else {
1867 			dmbuf = SCTP_M_COPYM(*m,
1868 			    (offset + sizeof(struct sctp_data_chunk)),
1869 			    the_len, M_NOWAIT);
1870 		}
1871 #ifdef SCTP_MBUF_LOGGING
1872 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1873 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1874 		}
1875 #endif
1876 	} else {
1877 		/* We can steal the last chunk */
1878 		int l_len;
1879 
1880 		dmbuf = *m;
1881 		/* lop off the top part */
1882 		if (nch) {
1883 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1884 		} else {
1885 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1886 		}
1887 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1888 			l_len = SCTP_BUF_LEN(dmbuf);
1889 		} else {
1890 			/*
1891 			 * need to count up the size hopefully does not hit
1892 			 * this to often :-0
1893 			 */
1894 			struct mbuf *lat;
1895 
1896 			l_len = 0;
1897 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1898 				l_len += SCTP_BUF_LEN(lat);
1899 			}
1900 		}
1901 		if (l_len > the_len) {
1902 			/* Trim the end round bytes off  too */
1903 			m_adj(dmbuf, -(l_len - the_len));
1904 		}
1905 	}
1906 	if (dmbuf == NULL) {
1907 		SCTP_STAT_INCR(sctps_nomem);
1908 		return (0);
1909 	}
1910 	/*
1911 	 * Now no matter what we need a control, get one if we don't have
1912 	 * one (we may have gotten it above when we found the message was
1913 	 * fragmented
1914 	 */
1915 	if (control == NULL) {
1916 		sctp_alloc_a_readq(stcb, control);
1917 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1918 		    protocol_id,
1919 		    strmno, msg_id,
1920 		    chunk_flags,
1921 		    NULL, fsn, msg_id);
1922 		if (control == NULL) {
1923 			SCTP_STAT_INCR(sctps_nomem);
1924 			return (0);
1925 		}
1926 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1927 			control->data = dmbuf;
1928 			control->tail_mbuf = NULL;
1929 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1930 			control->top_fsn = control->fsn_included = fsn;
1931 		}
1932 		created_control = 1;
1933 	}
1934 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x ordered:%d msgid:%d control:%p\n",
1935 	    chunk_flags, ordered, msg_id, control);
1936 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1937 	    TAILQ_EMPTY(&asoc->resetHead) &&
1938 	    ((ordered == 0) ||
1939 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1940 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1941 		/* Candidate for express delivery */
1942 		/*
1943 		 * Its not fragmented, No PD-API is up, Nothing in the
1944 		 * delivery queue, Its un-ordered OR ordered and the next to
1945 		 * deliver AND nothing else is stuck on the stream queue,
1946 		 * And there is room for it in the socket buffer. Lets just
1947 		 * stuff it up the buffer....
1948 		 */
1949 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1950 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1951 			asoc->highest_tsn_inside_nr_map = tsn;
1952 		}
1953 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control:%p to be read (mid:%d)\n",
1954 		    control, msg_id);
1955 
1956 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1957 		    control, &stcb->sctp_socket->so_rcv,
1958 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1959 
1960 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1961 			/* for ordered, bump what we delivered */
1962 			strm->last_sequence_delivered++;
1963 		}
1964 		SCTP_STAT_INCR(sctps_recvexpress);
1965 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1966 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1967 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1968 		}
1969 		control = NULL;
1970 		goto finish_express_del;
1971 	}
1972 	/* Now will we need a chunk too? */
1973 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1974 		sctp_alloc_a_chunk(stcb, chk);
1975 		if (chk == NULL) {
1976 			/* No memory so we drop the chunk */
1977 			SCTP_STAT_INCR(sctps_nomem);
1978 			if (last_chunk == 0) {
1979 				/* we copied it, free the copy */
1980 				sctp_m_freem(dmbuf);
1981 			}
1982 			return (0);
1983 		}
1984 		chk->rec.data.TSN_seq = tsn;
1985 		chk->no_fr_allowed = 0;
1986 		chk->rec.data.fsn_num = fsn;
1987 		chk->rec.data.stream_seq = msg_id;
1988 		chk->rec.data.stream_number = strmno;
1989 		chk->rec.data.payloadtype = protocol_id;
1990 		chk->rec.data.context = stcb->asoc.context;
1991 		chk->rec.data.doing_fast_retransmit = 0;
1992 		chk->rec.data.rcv_flags = chunk_flags;
1993 		chk->asoc = asoc;
1994 		chk->send_size = the_len;
1995 		chk->whoTo = net;
1996 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck:%p for control:%p to be read (mid:%d)\n",
1997 		    chk,
1998 		    control, msg_id);
1999 		atomic_add_int(&net->ref_count, 1);
2000 		chk->data = dmbuf;
2001 	}
2002 	/* Set the appropriate TSN mark */
2003 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2004 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2005 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2006 			asoc->highest_tsn_inside_nr_map = tsn;
2007 		}
2008 	} else {
2009 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2010 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2011 			asoc->highest_tsn_inside_map = tsn;
2012 		}
2013 	}
2014 	/* Now is it complete (i.e. not fragmented)? */
2015 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2016 		/*
2017 		 * Special check for when streams are resetting. We could be
2018 		 * more smart about this and check the actual stream to see
2019 		 * if it is not being reset.. that way we would not create a
2020 		 * HOLB when amongst streams being reset and those not being
2021 		 * reset.
2022 		 *
2023 		 */
2024 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2025 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2026 			/*
2027 			 * yep its past where we need to reset... go ahead
2028 			 * and queue it.
2029 			 */
2030 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2031 				/* first one on */
2032 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2033 			} else {
2034 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2035 				unsigned char inserted = 0;
2036 
2037 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2038 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2039 
2040 						continue;
2041 					} else {
2042 						/* found it */
2043 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2044 						inserted = 1;
2045 						break;
2046 					}
2047 				}
2048 				if (inserted == 0) {
2049 					/*
2050 					 * must be put at end, use prevP
2051 					 * (all setup from loop) to setup
2052 					 * nextP.
2053 					 */
2054 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2055 				}
2056 			}
2057 			goto finish_express_del;
2058 		}
2059 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2060 			/* queue directly into socket buffer */
2061 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control:%p msg_id:%d\n",
2062 			    control, msg_id);
2063 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2064 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2065 			    control,
2066 			    &stcb->sctp_socket->so_rcv, 1,
2067 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2068 
2069 		} else {
2070 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control:%p for reordering msg_id:%d\n", control,
2071 			    msg_id);
2072 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2073 			if (*abort_flag) {
2074 				if (last_chunk) {
2075 					*m = NULL;
2076 				}
2077 				return (0);
2078 			}
2079 		}
2080 		goto finish_express_del;
2081 	}
2082 	/* If we reach here its a reassembly */
2083 	need_reasm_check = 1;
2084 	SCTPDBG(SCTP_DEBUG_XXX,
2085 	    "Queue data to stream for reasm control:%p msg_id:%d\n",
2086 	    control, msg_id);
2087 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2088 	if (*abort_flag) {
2089 		/*
2090 		 * the assoc is now gone and chk was put onto the reasm
2091 		 * queue, which has all been freed.
2092 		 */
2093 		if (last_chunk) {
2094 			*m = NULL;
2095 		}
2096 		return (0);
2097 	}
2098 finish_express_del:
2099 	/* Here we tidy up things */
2100 	if (tsn == (asoc->cumulative_tsn + 1)) {
2101 		/* Update cum-ack */
2102 		asoc->cumulative_tsn = tsn;
2103 	}
2104 	if (last_chunk) {
2105 		*m = NULL;
2106 	}
2107 	if (ordered) {
2108 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2109 	} else {
2110 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2111 	}
2112 	SCTP_STAT_INCR(sctps_recvdata);
2113 	/* Set it present please */
2114 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2115 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2116 	}
2117 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2118 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2119 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2120 	}
2121 	/* check the special flag for stream resets */
2122 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2123 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2124 		/*
2125 		 * we have finished working through the backlogged TSN's now
2126 		 * time to reset streams. 1: call reset function. 2: free
2127 		 * pending_reply space 3: distribute any chunks in
2128 		 * pending_reply_queue.
2129 		 */
2130 		struct sctp_queued_to_read *ctl, *nctl;
2131 
2132 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2133 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2134 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2135 		SCTP_FREE(liste, SCTP_M_STRESET);
2136 		/* sa_ignore FREED_MEMORY */
2137 		liste = TAILQ_FIRST(&asoc->resetHead);
2138 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2139 			/* All can be removed */
2140 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2141 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2142 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2143 				if (*abort_flag) {
2144 					return (0);
2145 				}
2146 			}
2147 		} else {
2148 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2149 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2150 					break;
2151 				}
2152 				/*
2153 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2154 				 * process it which is the NOT of
2155 				 * ctl->sinfo_tsn > liste->tsn
2156 				 */
2157 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2158 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2159 				if (*abort_flag) {
2160 					return (0);
2161 				}
2162 			}
2163 		}
2164 		/*
2165 		 * Now service re-assembly to pick up anything that has been
2166 		 * held on reassembly queue?
2167 		 */
2168 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2169 		need_reasm_check = 0;
2170 	}
2171 	if (need_reasm_check) {
2172 		/* Another one waits ? */
2173 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2174 	}
2175 	return (1);
2176 }
2177 
2178 static const int8_t sctp_map_lookup_tab[256] = {
2179 	0, 1, 0, 2, 0, 1, 0, 3,
2180 	0, 1, 0, 2, 0, 1, 0, 4,
2181 	0, 1, 0, 2, 0, 1, 0, 3,
2182 	0, 1, 0, 2, 0, 1, 0, 5,
2183 	0, 1, 0, 2, 0, 1, 0, 3,
2184 	0, 1, 0, 2, 0, 1, 0, 4,
2185 	0, 1, 0, 2, 0, 1, 0, 3,
2186 	0, 1, 0, 2, 0, 1, 0, 6,
2187 	0, 1, 0, 2, 0, 1, 0, 3,
2188 	0, 1, 0, 2, 0, 1, 0, 4,
2189 	0, 1, 0, 2, 0, 1, 0, 3,
2190 	0, 1, 0, 2, 0, 1, 0, 5,
2191 	0, 1, 0, 2, 0, 1, 0, 3,
2192 	0, 1, 0, 2, 0, 1, 0, 4,
2193 	0, 1, 0, 2, 0, 1, 0, 3,
2194 	0, 1, 0, 2, 0, 1, 0, 7,
2195 	0, 1, 0, 2, 0, 1, 0, 3,
2196 	0, 1, 0, 2, 0, 1, 0, 4,
2197 	0, 1, 0, 2, 0, 1, 0, 3,
2198 	0, 1, 0, 2, 0, 1, 0, 5,
2199 	0, 1, 0, 2, 0, 1, 0, 3,
2200 	0, 1, 0, 2, 0, 1, 0, 4,
2201 	0, 1, 0, 2, 0, 1, 0, 3,
2202 	0, 1, 0, 2, 0, 1, 0, 6,
2203 	0, 1, 0, 2, 0, 1, 0, 3,
2204 	0, 1, 0, 2, 0, 1, 0, 4,
2205 	0, 1, 0, 2, 0, 1, 0, 3,
2206 	0, 1, 0, 2, 0, 1, 0, 5,
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 4,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 8
2211 };
2212 
2213 
2214 void
2215 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2216 {
2217 	/*
2218 	 * Now we also need to check the mapping array in a couple of ways.
2219 	 * 1) Did we move the cum-ack point?
2220 	 *
2221 	 * When you first glance at this you might think that all entries that
2222 	 * make up the postion of the cum-ack would be in the nr-mapping
2223 	 * array only.. i.e. things up to the cum-ack are always
2224 	 * deliverable. Thats true with one exception, when its a fragmented
2225 	 * message we may not deliver the data until some threshold (or all
2226 	 * of it) is in place. So we must OR the nr_mapping_array and
2227 	 * mapping_array to get a true picture of the cum-ack.
2228 	 */
2229 	struct sctp_association *asoc;
2230 	int at;
2231 	uint8_t val;
2232 	int slide_from, slide_end, lgap, distance;
2233 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2234 
2235 	asoc = &stcb->asoc;
2236 
2237 	old_cumack = asoc->cumulative_tsn;
2238 	old_base = asoc->mapping_array_base_tsn;
2239 	old_highest = asoc->highest_tsn_inside_map;
2240 	/*
2241 	 * We could probably improve this a small bit by calculating the
2242 	 * offset of the current cum-ack as the starting point.
2243 	 */
2244 	at = 0;
2245 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2246 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2247 		if (val == 0xff) {
2248 			at += 8;
2249 		} else {
2250 			/* there is a 0 bit */
2251 			at += sctp_map_lookup_tab[val];
2252 			break;
2253 		}
2254 	}
2255 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2256 
2257 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2258 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2259 #ifdef INVARIANTS
2260 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2261 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2262 #else
2263 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2264 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2265 		sctp_print_mapping_array(asoc);
2266 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2267 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2268 		}
2269 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2270 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2271 #endif
2272 	}
2273 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2274 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2275 	} else {
2276 		highest_tsn = asoc->highest_tsn_inside_map;
2277 	}
2278 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2279 		/* The complete array was completed by a single FR */
2280 		/* highest becomes the cum-ack */
2281 		int clr;
2282 
2283 #ifdef INVARIANTS
2284 		unsigned int i;
2285 
2286 #endif
2287 
2288 		/* clear the array */
2289 		clr = ((at + 7) >> 3);
2290 		if (clr > asoc->mapping_array_size) {
2291 			clr = asoc->mapping_array_size;
2292 		}
2293 		memset(asoc->mapping_array, 0, clr);
2294 		memset(asoc->nr_mapping_array, 0, clr);
2295 #ifdef INVARIANTS
2296 		for (i = 0; i < asoc->mapping_array_size; i++) {
2297 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2298 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2299 				sctp_print_mapping_array(asoc);
2300 			}
2301 		}
2302 #endif
2303 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2304 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2305 	} else if (at >= 8) {
2306 		/* we can slide the mapping array down */
2307 		/* slide_from holds where we hit the first NON 0xff byte */
2308 
2309 		/*
2310 		 * now calculate the ceiling of the move using our highest
2311 		 * TSN value
2312 		 */
2313 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2314 		slide_end = (lgap >> 3);
2315 		if (slide_end < slide_from) {
2316 			sctp_print_mapping_array(asoc);
2317 #ifdef INVARIANTS
2318 			panic("impossible slide");
2319 #else
2320 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2321 			    lgap, slide_end, slide_from, at);
2322 			return;
2323 #endif
2324 		}
2325 		if (slide_end > asoc->mapping_array_size) {
2326 #ifdef INVARIANTS
2327 			panic("would overrun buffer");
2328 #else
2329 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2330 			    asoc->mapping_array_size, slide_end);
2331 			slide_end = asoc->mapping_array_size;
2332 #endif
2333 		}
2334 		distance = (slide_end - slide_from) + 1;
2335 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2336 			sctp_log_map(old_base, old_cumack, old_highest,
2337 			    SCTP_MAP_PREPARE_SLIDE);
2338 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2339 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2340 		}
2341 		if (distance + slide_from > asoc->mapping_array_size ||
2342 		    distance < 0) {
2343 			/*
2344 			 * Here we do NOT slide forward the array so that
2345 			 * hopefully when more data comes in to fill it up
2346 			 * we will be able to slide it forward. Really I
2347 			 * don't think this should happen :-0
2348 			 */
2349 
2350 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2351 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2352 				    (uint32_t) asoc->mapping_array_size,
2353 				    SCTP_MAP_SLIDE_NONE);
2354 			}
2355 		} else {
2356 			int ii;
2357 
2358 			for (ii = 0; ii < distance; ii++) {
2359 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2360 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2361 
2362 			}
2363 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2364 				asoc->mapping_array[ii] = 0;
2365 				asoc->nr_mapping_array[ii] = 0;
2366 			}
2367 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2368 				asoc->highest_tsn_inside_map += (slide_from << 3);
2369 			}
2370 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2371 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2372 			}
2373 			asoc->mapping_array_base_tsn += (slide_from << 3);
2374 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2375 				sctp_log_map(asoc->mapping_array_base_tsn,
2376 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2377 				    SCTP_MAP_SLIDE_RESULT);
2378 			}
2379 		}
2380 	}
2381 }
2382 
2383 void
2384 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2385 {
2386 	struct sctp_association *asoc;
2387 	uint32_t highest_tsn;
2388 
2389 	asoc = &stcb->asoc;
2390 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2391 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2392 	} else {
2393 		highest_tsn = asoc->highest_tsn_inside_map;
2394 	}
2395 
2396 	/*
2397 	 * Now we need to see if we need to queue a sack or just start the
2398 	 * timer (if allowed).
2399 	 */
2400 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2401 		/*
2402 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2403 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2404 		 * SACK
2405 		 */
2406 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2407 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2408 			    stcb->sctp_ep, stcb, NULL,
2409 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2410 		}
2411 		sctp_send_shutdown(stcb,
2412 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2413 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2414 	} else {
2415 		int is_a_gap;
2416 
2417 		/* is there a gap now ? */
2418 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2419 
2420 		/*
2421 		 * CMT DAC algorithm: increase number of packets received
2422 		 * since last ack
2423 		 */
2424 		stcb->asoc.cmt_dac_pkts_rcvd++;
2425 
2426 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2427 							 * SACK */
2428 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2429 							 * longer is one */
2430 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2431 		    (is_a_gap) ||	/* is still a gap */
2432 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2433 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2434 		    ) {
2435 
2436 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2437 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2438 			    (stcb->asoc.send_sack == 0) &&
2439 			    (stcb->asoc.numduptsns == 0) &&
2440 			    (stcb->asoc.delayed_ack) &&
2441 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2442 
2443 				/*
2444 				 * CMT DAC algorithm: With CMT, delay acks
2445 				 * even in the face of
2446 				 *
2447 				 * reordering. Therefore, if acks that do not
2448 				 * have to be sent because of the above
2449 				 * reasons, will be delayed. That is, acks
2450 				 * that would have been sent due to gap
2451 				 * reports will be delayed with DAC. Start
2452 				 * the delayed ack timer.
2453 				 */
2454 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2455 				    stcb->sctp_ep, stcb, NULL);
2456 			} else {
2457 				/*
2458 				 * Ok we must build a SACK since the timer
2459 				 * is pending, we got our first packet OR
2460 				 * there are gaps or duplicates.
2461 				 */
2462 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2463 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2464 			}
2465 		} else {
2466 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2467 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2468 				    stcb->sctp_ep, stcb, NULL);
2469 			}
2470 		}
2471 	}
2472 }
2473 
2474 int
2475 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2476     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2477     struct sctp_nets *net, uint32_t * high_tsn)
2478 {
2479 	struct sctp_chunkhdr *ch, chunk_buf;
2480 	struct sctp_association *asoc;
2481 	int num_chunks = 0;	/* number of control chunks processed */
2482 	int stop_proc = 0;
2483 	int chk_length, break_flag, last_chunk;
2484 	int abort_flag = 0, was_a_gap;
2485 	struct mbuf *m;
2486 	uint32_t highest_tsn;
2487 
2488 	/* set the rwnd */
2489 	sctp_set_rwnd(stcb, &stcb->asoc);
2490 
2491 	m = *mm;
2492 	SCTP_TCB_LOCK_ASSERT(stcb);
2493 	asoc = &stcb->asoc;
2494 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2495 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2496 	} else {
2497 		highest_tsn = asoc->highest_tsn_inside_map;
2498 	}
2499 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2500 	/*
2501 	 * setup where we got the last DATA packet from for any SACK that
2502 	 * may need to go out. Don't bump the net. This is done ONLY when a
2503 	 * chunk is assigned.
2504 	 */
2505 	asoc->last_data_chunk_from = net;
2506 
2507 	/*-
2508 	 * Now before we proceed we must figure out if this is a wasted
2509 	 * cluster... i.e. it is a small packet sent in and yet the driver
2510 	 * underneath allocated a full cluster for it. If so we must copy it
2511 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2512 	 * with cluster starvation. Note for __Panda__ we don't do this
2513 	 * since it has clusters all the way down to 64 bytes.
2514 	 */
2515 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2516 		/* we only handle mbufs that are singletons.. not chains */
2517 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2518 		if (m) {
2519 			/* ok lets see if we can copy the data up */
2520 			caddr_t *from, *to;
2521 
2522 			/* get the pointers and copy */
2523 			to = mtod(m, caddr_t *);
2524 			from = mtod((*mm), caddr_t *);
2525 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2526 			/* copy the length and free up the old */
2527 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2528 			sctp_m_freem(*mm);
2529 			/* sucess, back copy */
2530 			*mm = m;
2531 		} else {
2532 			/* We are in trouble in the mbuf world .. yikes */
2533 			m = *mm;
2534 		}
2535 	}
2536 	/* get pointer to the first chunk header */
2537 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2538 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2539 	if (ch == NULL) {
2540 		return (1);
2541 	}
2542 	/*
2543 	 * process all DATA chunks...
2544 	 */
2545 	*high_tsn = asoc->cumulative_tsn;
2546 	break_flag = 0;
2547 	asoc->data_pkts_seen++;
2548 	while (stop_proc == 0) {
2549 		/* validate chunk length */
2550 		chk_length = ntohs(ch->chunk_length);
2551 		if (length - *offset < chk_length) {
2552 			/* all done, mutulated chunk */
2553 			stop_proc = 1;
2554 			continue;
2555 		}
2556 		if ((asoc->idata_supported == 1) &&
2557 		    (ch->chunk_type == SCTP_DATA)) {
2558 			struct mbuf *op_err;
2559 			char msg[SCTP_DIAG_INFO_LEN];
2560 
2561 			snprintf(msg, sizeof(msg), "I-DATA chunk received when DATA was negotiated");
2562 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2563 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2564 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2565 			return (2);
2566 		}
2567 		if ((asoc->idata_supported == 0) &&
2568 		    (ch->chunk_type == SCTP_IDATA)) {
2569 			struct mbuf *op_err;
2570 			char msg[SCTP_DIAG_INFO_LEN];
2571 
2572 			snprintf(msg, sizeof(msg), "DATA chunk received when I-DATA was negotiated");
2573 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2574 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2575 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2576 			return (2);
2577 		}
2578 		if ((ch->chunk_type == SCTP_DATA) ||
2579 		    (ch->chunk_type == SCTP_IDATA)) {
2580 			int clen;
2581 
2582 			if (ch->chunk_type == SCTP_DATA) {
2583 				clen = sizeof(struct sctp_data_chunk);
2584 			} else {
2585 				clen = sizeof(struct sctp_idata_chunk);
2586 			}
2587 			if ((size_t)chk_length < clen) {
2588 				/*
2589 				 * Need to send an abort since we had a
2590 				 * invalid data chunk.
2591 				 */
2592 				struct mbuf *op_err;
2593 				char msg[SCTP_DIAG_INFO_LEN];
2594 
2595 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2596 				    chk_length);
2597 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2598 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2599 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2600 				return (2);
2601 			}
2602 #ifdef SCTP_AUDITING_ENABLED
2603 			sctp_audit_log(0xB1, 0);
2604 #endif
2605 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2606 				last_chunk = 1;
2607 			} else {
2608 				last_chunk = 0;
2609 			}
2610 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2611 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2612 			    last_chunk, ch->chunk_type)) {
2613 				num_chunks++;
2614 			}
2615 			if (abort_flag)
2616 				return (2);
2617 
2618 			if (break_flag) {
2619 				/*
2620 				 * Set because of out of rwnd space and no
2621 				 * drop rep space left.
2622 				 */
2623 				stop_proc = 1;
2624 				continue;
2625 			}
2626 		} else {
2627 			/* not a data chunk in the data region */
2628 			switch (ch->chunk_type) {
2629 			case SCTP_INITIATION:
2630 			case SCTP_INITIATION_ACK:
2631 			case SCTP_SELECTIVE_ACK:
2632 			case SCTP_NR_SELECTIVE_ACK:
2633 			case SCTP_HEARTBEAT_REQUEST:
2634 			case SCTP_HEARTBEAT_ACK:
2635 			case SCTP_ABORT_ASSOCIATION:
2636 			case SCTP_SHUTDOWN:
2637 			case SCTP_SHUTDOWN_ACK:
2638 			case SCTP_OPERATION_ERROR:
2639 			case SCTP_COOKIE_ECHO:
2640 			case SCTP_COOKIE_ACK:
2641 			case SCTP_ECN_ECHO:
2642 			case SCTP_ECN_CWR:
2643 			case SCTP_SHUTDOWN_COMPLETE:
2644 			case SCTP_AUTHENTICATION:
2645 			case SCTP_ASCONF_ACK:
2646 			case SCTP_PACKET_DROPPED:
2647 			case SCTP_STREAM_RESET:
2648 			case SCTP_FORWARD_CUM_TSN:
2649 			case SCTP_ASCONF:
2650 				/*
2651 				 * Now, what do we do with KNOWN chunks that
2652 				 * are NOT in the right place?
2653 				 *
2654 				 * For now, I do nothing but ignore them. We
2655 				 * may later want to add sysctl stuff to
2656 				 * switch out and do either an ABORT() or
2657 				 * possibly process them.
2658 				 */
2659 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2660 					struct mbuf *op_err;
2661 					char msg[SCTP_DIAG_INFO_LEN];
2662 
2663 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2664 					    ch->chunk_type);
2665 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2666 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2667 					return (2);
2668 				}
2669 				break;
2670 			default:
2671 				/* unknown chunk type, use bit rules */
2672 				if (ch->chunk_type & 0x40) {
2673 					/* Add a error report to the queue */
2674 					struct mbuf *op_err;
2675 					struct sctp_gen_error_cause *cause;
2676 
2677 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2678 					    0, M_NOWAIT, 1, MT_DATA);
2679 					if (op_err != NULL) {
2680 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2681 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2682 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2683 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2684 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2685 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2686 							sctp_queue_op_err(stcb, op_err);
2687 						} else {
2688 							sctp_m_freem(op_err);
2689 						}
2690 					}
2691 				}
2692 				if ((ch->chunk_type & 0x80) == 0) {
2693 					/* discard the rest of this packet */
2694 					stop_proc = 1;
2695 				}	/* else skip this bad chunk and
2696 					 * continue... */
2697 				break;
2698 			}	/* switch of chunk type */
2699 		}
2700 		*offset += SCTP_SIZE32(chk_length);
2701 		if ((*offset >= length) || stop_proc) {
2702 			/* no more data left in the mbuf chain */
2703 			stop_proc = 1;
2704 			continue;
2705 		}
2706 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2707 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2708 		if (ch == NULL) {
2709 			*offset = length;
2710 			stop_proc = 1;
2711 			continue;
2712 		}
2713 	}
2714 	if (break_flag) {
2715 		/*
2716 		 * we need to report rwnd overrun drops.
2717 		 */
2718 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2719 	}
2720 	if (num_chunks) {
2721 		/*
2722 		 * Did we get data, if so update the time for auto-close and
2723 		 * give peer credit for being alive.
2724 		 */
2725 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2726 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2727 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2728 			    stcb->asoc.overall_error_count,
2729 			    0,
2730 			    SCTP_FROM_SCTP_INDATA,
2731 			    __LINE__);
2732 		}
2733 		stcb->asoc.overall_error_count = 0;
2734 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2735 	}
2736 	/* now service all of the reassm queue if needed */
2737 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2738 		/* Assure that we ack right away */
2739 		stcb->asoc.send_sack = 1;
2740 	}
2741 	/* Start a sack timer or QUEUE a SACK for sending */
2742 	sctp_sack_check(stcb, was_a_gap);
2743 	return (0);
2744 }
2745 
2746 static int
2747 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2748     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2749     int *num_frs,
2750     uint32_t * biggest_newly_acked_tsn,
2751     uint32_t * this_sack_lowest_newack,
2752     int *rto_ok)
2753 {
2754 	struct sctp_tmit_chunk *tp1;
2755 	unsigned int theTSN;
2756 	int j, wake_him = 0, circled = 0;
2757 
2758 	/* Recover the tp1 we last saw */
2759 	tp1 = *p_tp1;
2760 	if (tp1 == NULL) {
2761 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2762 	}
2763 	for (j = frag_strt; j <= frag_end; j++) {
2764 		theTSN = j + last_tsn;
2765 		while (tp1) {
2766 			if (tp1->rec.data.doing_fast_retransmit)
2767 				(*num_frs) += 1;
2768 
2769 			/*-
2770 			 * CMT: CUCv2 algorithm. For each TSN being
2771 			 * processed from the sent queue, track the
2772 			 * next expected pseudo-cumack, or
2773 			 * rtx_pseudo_cumack, if required. Separate
2774 			 * cumack trackers for first transmissions,
2775 			 * and retransmissions.
2776 			 */
2777 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2778 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2779 			    (tp1->snd_count == 1)) {
2780 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2781 				tp1->whoTo->find_pseudo_cumack = 0;
2782 			}
2783 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2784 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2785 			    (tp1->snd_count > 1)) {
2786 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2787 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2788 			}
2789 			if (tp1->rec.data.TSN_seq == theTSN) {
2790 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2791 					/*-
2792 					 * must be held until
2793 					 * cum-ack passes
2794 					 */
2795 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2796 						/*-
2797 						 * If it is less than RESEND, it is
2798 						 * now no-longer in flight.
2799 						 * Higher values may already be set
2800 						 * via previous Gap Ack Blocks...
2801 						 * i.e. ACKED or RESEND.
2802 						 */
2803 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2804 						    *biggest_newly_acked_tsn)) {
2805 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2806 						}
2807 						/*-
2808 						 * CMT: SFR algo (and HTNA) - set
2809 						 * saw_newack to 1 for dest being
2810 						 * newly acked. update
2811 						 * this_sack_highest_newack if
2812 						 * appropriate.
2813 						 */
2814 						if (tp1->rec.data.chunk_was_revoked == 0)
2815 							tp1->whoTo->saw_newack = 1;
2816 
2817 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2818 						    tp1->whoTo->this_sack_highest_newack)) {
2819 							tp1->whoTo->this_sack_highest_newack =
2820 							    tp1->rec.data.TSN_seq;
2821 						}
2822 						/*-
2823 						 * CMT DAC algo: also update
2824 						 * this_sack_lowest_newack
2825 						 */
2826 						if (*this_sack_lowest_newack == 0) {
2827 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2828 								sctp_log_sack(*this_sack_lowest_newack,
2829 								    last_tsn,
2830 								    tp1->rec.data.TSN_seq,
2831 								    0,
2832 								    0,
2833 								    SCTP_LOG_TSN_ACKED);
2834 							}
2835 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2836 						}
2837 						/*-
2838 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2839 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2840 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2841 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2842 						 * Separate pseudo_cumack trackers for first transmissions and
2843 						 * retransmissions.
2844 						 */
2845 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2846 							if (tp1->rec.data.chunk_was_revoked == 0) {
2847 								tp1->whoTo->new_pseudo_cumack = 1;
2848 							}
2849 							tp1->whoTo->find_pseudo_cumack = 1;
2850 						}
2851 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2852 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2853 						}
2854 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2855 							if (tp1->rec.data.chunk_was_revoked == 0) {
2856 								tp1->whoTo->new_pseudo_cumack = 1;
2857 							}
2858 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2859 						}
2860 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2861 							sctp_log_sack(*biggest_newly_acked_tsn,
2862 							    last_tsn,
2863 							    tp1->rec.data.TSN_seq,
2864 							    frag_strt,
2865 							    frag_end,
2866 							    SCTP_LOG_TSN_ACKED);
2867 						}
2868 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2869 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2870 							    tp1->whoTo->flight_size,
2871 							    tp1->book_size,
2872 							    (uint32_t) (uintptr_t) tp1->whoTo,
2873 							    tp1->rec.data.TSN_seq);
2874 						}
2875 						sctp_flight_size_decrease(tp1);
2876 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2877 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2878 							    tp1);
2879 						}
2880 						sctp_total_flight_decrease(stcb, tp1);
2881 
2882 						tp1->whoTo->net_ack += tp1->send_size;
2883 						if (tp1->snd_count < 2) {
2884 							/*-
2885 							 * True non-retransmited chunk
2886 							 */
2887 							tp1->whoTo->net_ack2 += tp1->send_size;
2888 
2889 							/*-
2890 							 * update RTO too ?
2891 							 */
2892 							if (tp1->do_rtt) {
2893 								if (*rto_ok) {
2894 									tp1->whoTo->RTO =
2895 									    sctp_calculate_rto(stcb,
2896 									    &stcb->asoc,
2897 									    tp1->whoTo,
2898 									    &tp1->sent_rcv_time,
2899 									    sctp_align_safe_nocopy,
2900 									    SCTP_RTT_FROM_DATA);
2901 									*rto_ok = 0;
2902 								}
2903 								if (tp1->whoTo->rto_needed == 0) {
2904 									tp1->whoTo->rto_needed = 1;
2905 								}
2906 								tp1->do_rtt = 0;
2907 							}
2908 						}
2909 					}
2910 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2911 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2912 						    stcb->asoc.this_sack_highest_gap)) {
2913 							stcb->asoc.this_sack_highest_gap =
2914 							    tp1->rec.data.TSN_seq;
2915 						}
2916 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2917 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2918 #ifdef SCTP_AUDITING_ENABLED
2919 							sctp_audit_log(0xB2,
2920 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2921 #endif
2922 						}
2923 					}
2924 					/*-
2925 					 * All chunks NOT UNSENT fall through here and are marked
2926 					 * (leave PR-SCTP ones that are to skip alone though)
2927 					 */
2928 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2929 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2930 						tp1->sent = SCTP_DATAGRAM_MARKED;
2931 					}
2932 					if (tp1->rec.data.chunk_was_revoked) {
2933 						/* deflate the cwnd */
2934 						tp1->whoTo->cwnd -= tp1->book_size;
2935 						tp1->rec.data.chunk_was_revoked = 0;
2936 					}
2937 					/* NR Sack code here */
2938 					if (nr_sacking &&
2939 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2940 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2941 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2942 #ifdef INVARIANTS
2943 						} else {
2944 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2945 #endif
2946 						}
2947 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2948 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2949 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2950 							stcb->asoc.trigger_reset = 1;
2951 						}
2952 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2953 						if (tp1->data) {
2954 							/*
2955 							 * sa_ignore
2956 							 * NO_NULL_CHK
2957 							 */
2958 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2959 							sctp_m_freem(tp1->data);
2960 							tp1->data = NULL;
2961 						}
2962 						wake_him++;
2963 					}
2964 				}
2965 				break;
2966 			}	/* if (tp1->TSN_seq == theTSN) */
2967 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2968 				break;
2969 			}
2970 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2971 			if ((tp1 == NULL) && (circled == 0)) {
2972 				circled++;
2973 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2974 			}
2975 		}		/* end while (tp1) */
2976 		if (tp1 == NULL) {
2977 			circled = 0;
2978 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2979 		}
2980 		/* In case the fragments were not in order we must reset */
2981 	}			/* end for (j = fragStart */
2982 	*p_tp1 = tp1;
2983 	return (wake_him);	/* Return value only used for nr-sack */
2984 }
2985 
2986 
2987 static int
2988 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2989     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2990     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2991     int num_seg, int num_nr_seg, int *rto_ok)
2992 {
2993 	struct sctp_gap_ack_block *frag, block;
2994 	struct sctp_tmit_chunk *tp1;
2995 	int i;
2996 	int num_frs = 0;
2997 	int chunk_freed;
2998 	int non_revocable;
2999 	uint16_t frag_strt, frag_end, prev_frag_end;
3000 
3001 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3002 	prev_frag_end = 0;
3003 	chunk_freed = 0;
3004 
3005 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3006 		if (i == num_seg) {
3007 			prev_frag_end = 0;
3008 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3009 		}
3010 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3011 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3012 		*offset += sizeof(block);
3013 		if (frag == NULL) {
3014 			return (chunk_freed);
3015 		}
3016 		frag_strt = ntohs(frag->start);
3017 		frag_end = ntohs(frag->end);
3018 
3019 		if (frag_strt > frag_end) {
3020 			/* This gap report is malformed, skip it. */
3021 			continue;
3022 		}
3023 		if (frag_strt <= prev_frag_end) {
3024 			/* This gap report is not in order, so restart. */
3025 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3026 		}
3027 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3028 			*biggest_tsn_acked = last_tsn + frag_end;
3029 		}
3030 		if (i < num_seg) {
3031 			non_revocable = 0;
3032 		} else {
3033 			non_revocable = 1;
3034 		}
3035 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3036 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3037 		    this_sack_lowest_newack, rto_ok)) {
3038 			chunk_freed = 1;
3039 		}
3040 		prev_frag_end = frag_end;
3041 	}
3042 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3043 		if (num_frs)
3044 			sctp_log_fr(*biggest_tsn_acked,
3045 			    *biggest_newly_acked_tsn,
3046 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3047 	}
3048 	return (chunk_freed);
3049 }
3050 
3051 static void
3052 sctp_check_for_revoked(struct sctp_tcb *stcb,
3053     struct sctp_association *asoc, uint32_t cumack,
3054     uint32_t biggest_tsn_acked)
3055 {
3056 	struct sctp_tmit_chunk *tp1;
3057 
3058 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3059 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3060 			/*
3061 			 * ok this guy is either ACK or MARKED. If it is
3062 			 * ACKED it has been previously acked but not this
3063 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3064 			 * again.
3065 			 */
3066 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3067 				break;
3068 			}
3069 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3070 				/* it has been revoked */
3071 				tp1->sent = SCTP_DATAGRAM_SENT;
3072 				tp1->rec.data.chunk_was_revoked = 1;
3073 				/*
3074 				 * We must add this stuff back in to assure
3075 				 * timers and such get started.
3076 				 */
3077 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3078 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3079 					    tp1->whoTo->flight_size,
3080 					    tp1->book_size,
3081 					    (uint32_t) (uintptr_t) tp1->whoTo,
3082 					    tp1->rec.data.TSN_seq);
3083 				}
3084 				sctp_flight_size_increase(tp1);
3085 				sctp_total_flight_increase(stcb, tp1);
3086 				/*
3087 				 * We inflate the cwnd to compensate for our
3088 				 * artificial inflation of the flight_size.
3089 				 */
3090 				tp1->whoTo->cwnd += tp1->book_size;
3091 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3092 					sctp_log_sack(asoc->last_acked_seq,
3093 					    cumack,
3094 					    tp1->rec.data.TSN_seq,
3095 					    0,
3096 					    0,
3097 					    SCTP_LOG_TSN_REVOKED);
3098 				}
3099 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3100 				/* it has been re-acked in this SACK */
3101 				tp1->sent = SCTP_DATAGRAM_ACKED;
3102 			}
3103 		}
3104 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3105 			break;
3106 	}
3107 }
3108 
3109 
3110 static void
3111 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3112     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3113 {
3114 	struct sctp_tmit_chunk *tp1;
3115 	int strike_flag = 0;
3116 	struct timeval now;
3117 	int tot_retrans = 0;
3118 	uint32_t sending_seq;
3119 	struct sctp_nets *net;
3120 	int num_dests_sacked = 0;
3121 
3122 	/*
3123 	 * select the sending_seq, this is either the next thing ready to be
3124 	 * sent but not transmitted, OR, the next seq we assign.
3125 	 */
3126 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3127 	if (tp1 == NULL) {
3128 		sending_seq = asoc->sending_seq;
3129 	} else {
3130 		sending_seq = tp1->rec.data.TSN_seq;
3131 	}
3132 
3133 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3134 	if ((asoc->sctp_cmt_on_off > 0) &&
3135 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3136 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3137 			if (net->saw_newack)
3138 				num_dests_sacked++;
3139 		}
3140 	}
3141 	if (stcb->asoc.prsctp_supported) {
3142 		(void)SCTP_GETTIME_TIMEVAL(&now);
3143 	}
3144 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3145 		strike_flag = 0;
3146 		if (tp1->no_fr_allowed) {
3147 			/* this one had a timeout or something */
3148 			continue;
3149 		}
3150 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3151 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3152 				sctp_log_fr(biggest_tsn_newly_acked,
3153 				    tp1->rec.data.TSN_seq,
3154 				    tp1->sent,
3155 				    SCTP_FR_LOG_CHECK_STRIKE);
3156 		}
3157 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3158 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3159 			/* done */
3160 			break;
3161 		}
3162 		if (stcb->asoc.prsctp_supported) {
3163 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3164 				/* Is it expired? */
3165 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3166 					/* Yes so drop it */
3167 					if (tp1->data != NULL) {
3168 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3169 						    SCTP_SO_NOT_LOCKED);
3170 					}
3171 					continue;
3172 				}
3173 			}
3174 		}
3175 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3176 			/* we are beyond the tsn in the sack  */
3177 			break;
3178 		}
3179 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3180 			/* either a RESEND, ACKED, or MARKED */
3181 			/* skip */
3182 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3183 				/* Continue strikin FWD-TSN chunks */
3184 				tp1->rec.data.fwd_tsn_cnt++;
3185 			}
3186 			continue;
3187 		}
3188 		/*
3189 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3190 		 */
3191 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3192 			/*
3193 			 * No new acks were receieved for data sent to this
3194 			 * dest. Therefore, according to the SFR algo for
3195 			 * CMT, no data sent to this dest can be marked for
3196 			 * FR using this SACK.
3197 			 */
3198 			continue;
3199 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3200 		    tp1->whoTo->this_sack_highest_newack)) {
3201 			/*
3202 			 * CMT: New acks were receieved for data sent to
3203 			 * this dest. But no new acks were seen for data
3204 			 * sent after tp1. Therefore, according to the SFR
3205 			 * algo for CMT, tp1 cannot be marked for FR using
3206 			 * this SACK. This step covers part of the DAC algo
3207 			 * and the HTNA algo as well.
3208 			 */
3209 			continue;
3210 		}
3211 		/*
3212 		 * Here we check to see if we were have already done a FR
3213 		 * and if so we see if the biggest TSN we saw in the sack is
3214 		 * smaller than the recovery point. If so we don't strike
3215 		 * the tsn... otherwise we CAN strike the TSN.
3216 		 */
3217 		/*
3218 		 * @@@ JRI: Check for CMT if (accum_moved &&
3219 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3220 		 * 0)) {
3221 		 */
3222 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3223 			/*
3224 			 * Strike the TSN if in fast-recovery and cum-ack
3225 			 * moved.
3226 			 */
3227 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3228 				sctp_log_fr(biggest_tsn_newly_acked,
3229 				    tp1->rec.data.TSN_seq,
3230 				    tp1->sent,
3231 				    SCTP_FR_LOG_STRIKE_CHUNK);
3232 			}
3233 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3234 				tp1->sent++;
3235 			}
3236 			if ((asoc->sctp_cmt_on_off > 0) &&
3237 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3238 				/*
3239 				 * CMT DAC algorithm: If SACK flag is set to
3240 				 * 0, then lowest_newack test will not pass
3241 				 * because it would have been set to the
3242 				 * cumack earlier. If not already to be
3243 				 * rtx'd, If not a mixed sack and if tp1 is
3244 				 * not between two sacked TSNs, then mark by
3245 				 * one more. NOTE that we are marking by one
3246 				 * additional time since the SACK DAC flag
3247 				 * indicates that two packets have been
3248 				 * received after this missing TSN.
3249 				 */
3250 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3251 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3252 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3253 						sctp_log_fr(16 + num_dests_sacked,
3254 						    tp1->rec.data.TSN_seq,
3255 						    tp1->sent,
3256 						    SCTP_FR_LOG_STRIKE_CHUNK);
3257 					}
3258 					tp1->sent++;
3259 				}
3260 			}
3261 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3262 		    (asoc->sctp_cmt_on_off == 0)) {
3263 			/*
3264 			 * For those that have done a FR we must take
3265 			 * special consideration if we strike. I.e the
3266 			 * biggest_newly_acked must be higher than the
3267 			 * sending_seq at the time we did the FR.
3268 			 */
3269 			if (
3270 #ifdef SCTP_FR_TO_ALTERNATE
3271 			/*
3272 			 * If FR's go to new networks, then we must only do
3273 			 * this for singly homed asoc's. However if the FR's
3274 			 * go to the same network (Armando's work) then its
3275 			 * ok to FR multiple times.
3276 			 */
3277 			    (asoc->numnets < 2)
3278 #else
3279 			    (1)
3280 #endif
3281 			    ) {
3282 
3283 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3284 				    tp1->rec.data.fast_retran_tsn)) {
3285 					/*
3286 					 * Strike the TSN, since this ack is
3287 					 * beyond where things were when we
3288 					 * did a FR.
3289 					 */
3290 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3291 						sctp_log_fr(biggest_tsn_newly_acked,
3292 						    tp1->rec.data.TSN_seq,
3293 						    tp1->sent,
3294 						    SCTP_FR_LOG_STRIKE_CHUNK);
3295 					}
3296 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3297 						tp1->sent++;
3298 					}
3299 					strike_flag = 1;
3300 					if ((asoc->sctp_cmt_on_off > 0) &&
3301 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3302 						/*
3303 						 * CMT DAC algorithm: If
3304 						 * SACK flag is set to 0,
3305 						 * then lowest_newack test
3306 						 * will not pass because it
3307 						 * would have been set to
3308 						 * the cumack earlier. If
3309 						 * not already to be rtx'd,
3310 						 * If not a mixed sack and
3311 						 * if tp1 is not between two
3312 						 * sacked TSNs, then mark by
3313 						 * one more. NOTE that we
3314 						 * are marking by one
3315 						 * additional time since the
3316 						 * SACK DAC flag indicates
3317 						 * that two packets have
3318 						 * been received after this
3319 						 * missing TSN.
3320 						 */
3321 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3322 						    (num_dests_sacked == 1) &&
3323 						    SCTP_TSN_GT(this_sack_lowest_newack,
3324 						    tp1->rec.data.TSN_seq)) {
3325 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3326 								sctp_log_fr(32 + num_dests_sacked,
3327 								    tp1->rec.data.TSN_seq,
3328 								    tp1->sent,
3329 								    SCTP_FR_LOG_STRIKE_CHUNK);
3330 							}
3331 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3332 								tp1->sent++;
3333 							}
3334 						}
3335 					}
3336 				}
3337 			}
3338 			/*
3339 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3340 			 * algo covers HTNA.
3341 			 */
3342 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3343 		    biggest_tsn_newly_acked)) {
3344 			/*
3345 			 * We don't strike these: This is the  HTNA
3346 			 * algorithm i.e. we don't strike If our TSN is
3347 			 * larger than the Highest TSN Newly Acked.
3348 			 */
3349 			;
3350 		} else {
3351 			/* Strike the TSN */
3352 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3353 				sctp_log_fr(biggest_tsn_newly_acked,
3354 				    tp1->rec.data.TSN_seq,
3355 				    tp1->sent,
3356 				    SCTP_FR_LOG_STRIKE_CHUNK);
3357 			}
3358 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3359 				tp1->sent++;
3360 			}
3361 			if ((asoc->sctp_cmt_on_off > 0) &&
3362 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3363 				/*
3364 				 * CMT DAC algorithm: If SACK flag is set to
3365 				 * 0, then lowest_newack test will not pass
3366 				 * because it would have been set to the
3367 				 * cumack earlier. If not already to be
3368 				 * rtx'd, If not a mixed sack and if tp1 is
3369 				 * not between two sacked TSNs, then mark by
3370 				 * one more. NOTE that we are marking by one
3371 				 * additional time since the SACK DAC flag
3372 				 * indicates that two packets have been
3373 				 * received after this missing TSN.
3374 				 */
3375 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3376 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3377 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3378 						sctp_log_fr(48 + num_dests_sacked,
3379 						    tp1->rec.data.TSN_seq,
3380 						    tp1->sent,
3381 						    SCTP_FR_LOG_STRIKE_CHUNK);
3382 					}
3383 					tp1->sent++;
3384 				}
3385 			}
3386 		}
3387 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3388 			struct sctp_nets *alt;
3389 
3390 			/* fix counts and things */
3391 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3392 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3393 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3394 				    tp1->book_size,
3395 				    (uint32_t) (uintptr_t) tp1->whoTo,
3396 				    tp1->rec.data.TSN_seq);
3397 			}
3398 			if (tp1->whoTo) {
3399 				tp1->whoTo->net_ack++;
3400 				sctp_flight_size_decrease(tp1);
3401 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3402 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3403 					    tp1);
3404 				}
3405 			}
3406 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3407 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3408 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3409 			}
3410 			/* add back to the rwnd */
3411 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3412 
3413 			/* remove from the total flight */
3414 			sctp_total_flight_decrease(stcb, tp1);
3415 
3416 			if ((stcb->asoc.prsctp_supported) &&
3417 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3418 				/*
3419 				 * Has it been retransmitted tv_sec times? -
3420 				 * we store the retran count there.
3421 				 */
3422 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3423 					/* Yes, so drop it */
3424 					if (tp1->data != NULL) {
3425 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3426 						    SCTP_SO_NOT_LOCKED);
3427 					}
3428 					/* Make sure to flag we had a FR */
3429 					tp1->whoTo->net_ack++;
3430 					continue;
3431 				}
3432 			}
3433 			/*
3434 			 * SCTP_PRINTF("OK, we are now ready to FR this
3435 			 * guy\n");
3436 			 */
3437 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3438 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3439 				    0, SCTP_FR_MARKED);
3440 			}
3441 			if (strike_flag) {
3442 				/* This is a subsequent FR */
3443 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3444 			}
3445 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3446 			if (asoc->sctp_cmt_on_off > 0) {
3447 				/*
3448 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3449 				 * If CMT is being used, then pick dest with
3450 				 * largest ssthresh for any retransmission.
3451 				 */
3452 				tp1->no_fr_allowed = 1;
3453 				alt = tp1->whoTo;
3454 				/* sa_ignore NO_NULL_CHK */
3455 				if (asoc->sctp_cmt_pf > 0) {
3456 					/*
3457 					 * JRS 5/18/07 - If CMT PF is on,
3458 					 * use the PF version of
3459 					 * find_alt_net()
3460 					 */
3461 					alt = sctp_find_alternate_net(stcb, alt, 2);
3462 				} else {
3463 					/*
3464 					 * JRS 5/18/07 - If only CMT is on,
3465 					 * use the CMT version of
3466 					 * find_alt_net()
3467 					 */
3468 					/* sa_ignore NO_NULL_CHK */
3469 					alt = sctp_find_alternate_net(stcb, alt, 1);
3470 				}
3471 				if (alt == NULL) {
3472 					alt = tp1->whoTo;
3473 				}
3474 				/*
3475 				 * CUCv2: If a different dest is picked for
3476 				 * the retransmission, then new
3477 				 * (rtx-)pseudo_cumack needs to be tracked
3478 				 * for orig dest. Let CUCv2 track new (rtx-)
3479 				 * pseudo-cumack always.
3480 				 */
3481 				if (tp1->whoTo) {
3482 					tp1->whoTo->find_pseudo_cumack = 1;
3483 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3484 				}
3485 			} else {/* CMT is OFF */
3486 
3487 #ifdef SCTP_FR_TO_ALTERNATE
3488 				/* Can we find an alternate? */
3489 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3490 #else
3491 				/*
3492 				 * default behavior is to NOT retransmit
3493 				 * FR's to an alternate. Armando Caro's
3494 				 * paper details why.
3495 				 */
3496 				alt = tp1->whoTo;
3497 #endif
3498 			}
3499 
3500 			tp1->rec.data.doing_fast_retransmit = 1;
3501 			tot_retrans++;
3502 			/* mark the sending seq for possible subsequent FR's */
3503 			/*
3504 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3505 			 * (uint32_t)tpi->rec.data.TSN_seq);
3506 			 */
3507 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3508 				/*
3509 				 * If the queue of send is empty then its
3510 				 * the next sequence number that will be
3511 				 * assigned so we subtract one from this to
3512 				 * get the one we last sent.
3513 				 */
3514 				tp1->rec.data.fast_retran_tsn = sending_seq;
3515 			} else {
3516 				/*
3517 				 * If there are chunks on the send queue
3518 				 * (unsent data that has made it from the
3519 				 * stream queues but not out the door, we
3520 				 * take the first one (which will have the
3521 				 * lowest TSN) and subtract one to get the
3522 				 * one we last sent.
3523 				 */
3524 				struct sctp_tmit_chunk *ttt;
3525 
3526 				ttt = TAILQ_FIRST(&asoc->send_queue);
3527 				tp1->rec.data.fast_retran_tsn =
3528 				    ttt->rec.data.TSN_seq;
3529 			}
3530 
3531 			if (tp1->do_rtt) {
3532 				/*
3533 				 * this guy had a RTO calculation pending on
3534 				 * it, cancel it
3535 				 */
3536 				if ((tp1->whoTo != NULL) &&
3537 				    (tp1->whoTo->rto_needed == 0)) {
3538 					tp1->whoTo->rto_needed = 1;
3539 				}
3540 				tp1->do_rtt = 0;
3541 			}
3542 			if (alt != tp1->whoTo) {
3543 				/* yes, there is an alternate. */
3544 				sctp_free_remote_addr(tp1->whoTo);
3545 				/* sa_ignore FREED_MEMORY */
3546 				tp1->whoTo = alt;
3547 				atomic_add_int(&alt->ref_count, 1);
3548 			}
3549 		}
3550 	}
3551 }
3552 
3553 struct sctp_tmit_chunk *
3554 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3555     struct sctp_association *asoc)
3556 {
3557 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3558 	struct timeval now;
3559 	int now_filled = 0;
3560 
3561 	if (asoc->prsctp_supported == 0) {
3562 		return (NULL);
3563 	}
3564 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3565 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3566 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3567 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3568 			/* no chance to advance, out of here */
3569 			break;
3570 		}
3571 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3572 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3573 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3574 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3575 				    asoc->advanced_peer_ack_point,
3576 				    tp1->rec.data.TSN_seq, 0, 0);
3577 			}
3578 		}
3579 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3580 			/*
3581 			 * We can't fwd-tsn past any that are reliable aka
3582 			 * retransmitted until the asoc fails.
3583 			 */
3584 			break;
3585 		}
3586 		if (!now_filled) {
3587 			(void)SCTP_GETTIME_TIMEVAL(&now);
3588 			now_filled = 1;
3589 		}
3590 		/*
3591 		 * now we got a chunk which is marked for another
3592 		 * retransmission to a PR-stream but has run out its chances
3593 		 * already maybe OR has been marked to skip now. Can we skip
3594 		 * it if its a resend?
3595 		 */
3596 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3597 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3598 			/*
3599 			 * Now is this one marked for resend and its time is
3600 			 * now up?
3601 			 */
3602 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3603 				/* Yes so drop it */
3604 				if (tp1->data) {
3605 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3606 					    1, SCTP_SO_NOT_LOCKED);
3607 				}
3608 			} else {
3609 				/*
3610 				 * No, we are done when hit one for resend
3611 				 * whos time as not expired.
3612 				 */
3613 				break;
3614 			}
3615 		}
3616 		/*
3617 		 * Ok now if this chunk is marked to drop it we can clean up
3618 		 * the chunk, advance our peer ack point and we can check
3619 		 * the next chunk.
3620 		 */
3621 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3622 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3623 			/* advance PeerAckPoint goes forward */
3624 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3625 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3626 				a_adv = tp1;
3627 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3628 				/* No update but we do save the chk */
3629 				a_adv = tp1;
3630 			}
3631 		} else {
3632 			/*
3633 			 * If it is still in RESEND we can advance no
3634 			 * further
3635 			 */
3636 			break;
3637 		}
3638 	}
3639 	return (a_adv);
3640 }
3641 
3642 static int
3643 sctp_fs_audit(struct sctp_association *asoc)
3644 {
3645 	struct sctp_tmit_chunk *chk;
3646 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3647 	int ret;
3648 
3649 #ifndef INVARIANTS
3650 	int entry_flight, entry_cnt;
3651 
3652 #endif
3653 
3654 	ret = 0;
3655 #ifndef INVARIANTS
3656 	entry_flight = asoc->total_flight;
3657 	entry_cnt = asoc->total_flight_count;
3658 #endif
3659 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3660 		return (0);
3661 
3662 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3663 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3664 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3665 			    chk->rec.data.TSN_seq,
3666 			    chk->send_size,
3667 			    chk->snd_count);
3668 			inflight++;
3669 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3670 			resend++;
3671 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3672 			inbetween++;
3673 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3674 			above++;
3675 		} else {
3676 			acked++;
3677 		}
3678 	}
3679 
3680 	if ((inflight > 0) || (inbetween > 0)) {
3681 #ifdef INVARIANTS
3682 		panic("Flight size-express incorrect? \n");
3683 #else
3684 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3685 		    entry_flight, entry_cnt);
3686 
3687 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3688 		    inflight, inbetween, resend, above, acked);
3689 		ret = 1;
3690 #endif
3691 	}
3692 	return (ret);
3693 }
3694 
3695 
3696 static void
3697 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3698     struct sctp_association *asoc,
3699     struct sctp_tmit_chunk *tp1)
3700 {
3701 	tp1->window_probe = 0;
3702 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3703 		/* TSN's skipped we do NOT move back. */
3704 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3705 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3706 		    tp1->book_size,
3707 		    (uint32_t) (uintptr_t) tp1->whoTo,
3708 		    tp1->rec.data.TSN_seq);
3709 		return;
3710 	}
3711 	/* First setup this by shrinking flight */
3712 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3713 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3714 		    tp1);
3715 	}
3716 	sctp_flight_size_decrease(tp1);
3717 	sctp_total_flight_decrease(stcb, tp1);
3718 	/* Now mark for resend */
3719 	tp1->sent = SCTP_DATAGRAM_RESEND;
3720 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3721 
3722 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3723 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3724 		    tp1->whoTo->flight_size,
3725 		    tp1->book_size,
3726 		    (uint32_t) (uintptr_t) tp1->whoTo,
3727 		    tp1->rec.data.TSN_seq);
3728 	}
3729 }
3730 
3731 void
3732 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3733     uint32_t rwnd, int *abort_now, int ecne_seen)
3734 {
3735 	struct sctp_nets *net;
3736 	struct sctp_association *asoc;
3737 	struct sctp_tmit_chunk *tp1, *tp2;
3738 	uint32_t old_rwnd;
3739 	int win_probe_recovery = 0;
3740 	int win_probe_recovered = 0;
3741 	int j, done_once = 0;
3742 	int rto_ok = 1;
3743 
3744 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3745 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3746 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3747 	}
3748 	SCTP_TCB_LOCK_ASSERT(stcb);
3749 #ifdef SCTP_ASOCLOG_OF_TSNS
3750 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3751 	stcb->asoc.cumack_log_at++;
3752 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3753 		stcb->asoc.cumack_log_at = 0;
3754 	}
3755 #endif
3756 	asoc = &stcb->asoc;
3757 	old_rwnd = asoc->peers_rwnd;
3758 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3759 		/* old ack */
3760 		return;
3761 	} else if (asoc->last_acked_seq == cumack) {
3762 		/* Window update sack */
3763 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3764 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3765 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3766 			/* SWS sender side engages */
3767 			asoc->peers_rwnd = 0;
3768 		}
3769 		if (asoc->peers_rwnd > old_rwnd) {
3770 			goto again;
3771 		}
3772 		return;
3773 	}
3774 	/* First setup for CC stuff */
3775 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3776 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3777 			/* Drag along the window_tsn for cwr's */
3778 			net->cwr_window_tsn = cumack;
3779 		}
3780 		net->prev_cwnd = net->cwnd;
3781 		net->net_ack = 0;
3782 		net->net_ack2 = 0;
3783 
3784 		/*
3785 		 * CMT: Reset CUC and Fast recovery algo variables before
3786 		 * SACK processing
3787 		 */
3788 		net->new_pseudo_cumack = 0;
3789 		net->will_exit_fast_recovery = 0;
3790 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3791 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3792 		}
3793 	}
3794 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3795 		uint32_t send_s;
3796 
3797 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3798 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3799 			    sctpchunk_listhead);
3800 			send_s = tp1->rec.data.TSN_seq + 1;
3801 		} else {
3802 			send_s = asoc->sending_seq;
3803 		}
3804 		if (SCTP_TSN_GE(cumack, send_s)) {
3805 			struct mbuf *op_err;
3806 			char msg[SCTP_DIAG_INFO_LEN];
3807 
3808 			*abort_now = 1;
3809 			/* XXX */
3810 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3811 			    cumack, send_s);
3812 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3813 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3814 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3815 			return;
3816 		}
3817 	}
3818 	asoc->this_sack_highest_gap = cumack;
3819 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3820 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3821 		    stcb->asoc.overall_error_count,
3822 		    0,
3823 		    SCTP_FROM_SCTP_INDATA,
3824 		    __LINE__);
3825 	}
3826 	stcb->asoc.overall_error_count = 0;
3827 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3828 		/* process the new consecutive TSN first */
3829 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3830 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3831 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3832 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3833 				}
3834 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3835 					/*
3836 					 * If it is less than ACKED, it is
3837 					 * now no-longer in flight. Higher
3838 					 * values may occur during marking
3839 					 */
3840 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3841 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3842 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3843 							    tp1->whoTo->flight_size,
3844 							    tp1->book_size,
3845 							    (uint32_t) (uintptr_t) tp1->whoTo,
3846 							    tp1->rec.data.TSN_seq);
3847 						}
3848 						sctp_flight_size_decrease(tp1);
3849 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3850 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3851 							    tp1);
3852 						}
3853 						/* sa_ignore NO_NULL_CHK */
3854 						sctp_total_flight_decrease(stcb, tp1);
3855 					}
3856 					tp1->whoTo->net_ack += tp1->send_size;
3857 					if (tp1->snd_count < 2) {
3858 						/*
3859 						 * True non-retransmited
3860 						 * chunk
3861 						 */
3862 						tp1->whoTo->net_ack2 +=
3863 						    tp1->send_size;
3864 
3865 						/* update RTO too? */
3866 						if (tp1->do_rtt) {
3867 							if (rto_ok) {
3868 								tp1->whoTo->RTO =
3869 								/*
3870 								 * sa_ignore
3871 								 * NO_NULL_CH
3872 								 * K
3873 								 */
3874 								    sctp_calculate_rto(stcb,
3875 								    asoc, tp1->whoTo,
3876 								    &tp1->sent_rcv_time,
3877 								    sctp_align_safe_nocopy,
3878 								    SCTP_RTT_FROM_DATA);
3879 								rto_ok = 0;
3880 							}
3881 							if (tp1->whoTo->rto_needed == 0) {
3882 								tp1->whoTo->rto_needed = 1;
3883 							}
3884 							tp1->do_rtt = 0;
3885 						}
3886 					}
3887 					/*
3888 					 * CMT: CUCv2 algorithm. From the
3889 					 * cumack'd TSNs, for each TSN being
3890 					 * acked for the first time, set the
3891 					 * following variables for the
3892 					 * corresp destination.
3893 					 * new_pseudo_cumack will trigger a
3894 					 * cwnd update.
3895 					 * find_(rtx_)pseudo_cumack will
3896 					 * trigger search for the next
3897 					 * expected (rtx-)pseudo-cumack.
3898 					 */
3899 					tp1->whoTo->new_pseudo_cumack = 1;
3900 					tp1->whoTo->find_pseudo_cumack = 1;
3901 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3902 
3903 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3904 						/* sa_ignore NO_NULL_CHK */
3905 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3906 					}
3907 				}
3908 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3909 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3910 				}
3911 				if (tp1->rec.data.chunk_was_revoked) {
3912 					/* deflate the cwnd */
3913 					tp1->whoTo->cwnd -= tp1->book_size;
3914 					tp1->rec.data.chunk_was_revoked = 0;
3915 				}
3916 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3917 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3918 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3919 #ifdef INVARIANTS
3920 					} else {
3921 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3922 #endif
3923 					}
3924 				}
3925 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3926 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3927 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3928 					asoc->trigger_reset = 1;
3929 				}
3930 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3931 				if (tp1->data) {
3932 					/* sa_ignore NO_NULL_CHK */
3933 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3934 					sctp_m_freem(tp1->data);
3935 					tp1->data = NULL;
3936 				}
3937 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3938 					sctp_log_sack(asoc->last_acked_seq,
3939 					    cumack,
3940 					    tp1->rec.data.TSN_seq,
3941 					    0,
3942 					    0,
3943 					    SCTP_LOG_FREE_SENT);
3944 				}
3945 				asoc->sent_queue_cnt--;
3946 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3947 			} else {
3948 				break;
3949 			}
3950 		}
3951 
3952 	}
3953 	/* sa_ignore NO_NULL_CHK */
3954 	if (stcb->sctp_socket) {
3955 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3956 		struct socket *so;
3957 
3958 #endif
3959 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3960 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3961 			/* sa_ignore NO_NULL_CHK */
3962 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3963 		}
3964 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3965 		so = SCTP_INP_SO(stcb->sctp_ep);
3966 		atomic_add_int(&stcb->asoc.refcnt, 1);
3967 		SCTP_TCB_UNLOCK(stcb);
3968 		SCTP_SOCKET_LOCK(so, 1);
3969 		SCTP_TCB_LOCK(stcb);
3970 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3971 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3972 			/* assoc was freed while we were unlocked */
3973 			SCTP_SOCKET_UNLOCK(so, 1);
3974 			return;
3975 		}
3976 #endif
3977 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3978 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3979 		SCTP_SOCKET_UNLOCK(so, 1);
3980 #endif
3981 	} else {
3982 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3983 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3984 		}
3985 	}
3986 
3987 	/* JRS - Use the congestion control given in the CC module */
3988 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3989 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3990 			if (net->net_ack2 > 0) {
3991 				/*
3992 				 * Karn's rule applies to clearing error
3993 				 * count, this is optional.
3994 				 */
3995 				net->error_count = 0;
3996 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3997 					/* addr came good */
3998 					net->dest_state |= SCTP_ADDR_REACHABLE;
3999 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4000 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4001 				}
4002 				if (net == stcb->asoc.primary_destination) {
4003 					if (stcb->asoc.alternate) {
4004 						/*
4005 						 * release the alternate,
4006 						 * primary is good
4007 						 */
4008 						sctp_free_remote_addr(stcb->asoc.alternate);
4009 						stcb->asoc.alternate = NULL;
4010 					}
4011 				}
4012 				if (net->dest_state & SCTP_ADDR_PF) {
4013 					net->dest_state &= ~SCTP_ADDR_PF;
4014 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4015 					    stcb->sctp_ep, stcb, net,
4016 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4017 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4018 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4019 					/* Done with this net */
4020 					net->net_ack = 0;
4021 				}
4022 				/* restore any doubled timers */
4023 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4024 				if (net->RTO < stcb->asoc.minrto) {
4025 					net->RTO = stcb->asoc.minrto;
4026 				}
4027 				if (net->RTO > stcb->asoc.maxrto) {
4028 					net->RTO = stcb->asoc.maxrto;
4029 				}
4030 			}
4031 		}
4032 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4033 	}
4034 	asoc->last_acked_seq = cumack;
4035 
4036 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4037 		/* nothing left in-flight */
4038 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4039 			net->flight_size = 0;
4040 			net->partial_bytes_acked = 0;
4041 		}
4042 		asoc->total_flight = 0;
4043 		asoc->total_flight_count = 0;
4044 	}
4045 	/* RWND update */
4046 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4047 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4048 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4049 		/* SWS sender side engages */
4050 		asoc->peers_rwnd = 0;
4051 	}
4052 	if (asoc->peers_rwnd > old_rwnd) {
4053 		win_probe_recovery = 1;
4054 	}
4055 	/* Now assure a timer where data is queued at */
4056 again:
4057 	j = 0;
4058 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4059 		int to_ticks;
4060 
4061 		if (win_probe_recovery && (net->window_probe)) {
4062 			win_probe_recovered = 1;
4063 			/*
4064 			 * Find first chunk that was used with window probe
4065 			 * and clear the sent
4066 			 */
4067 			/* sa_ignore FREED_MEMORY */
4068 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4069 				if (tp1->window_probe) {
4070 					/* move back to data send queue */
4071 					sctp_window_probe_recovery(stcb, asoc, tp1);
4072 					break;
4073 				}
4074 			}
4075 		}
4076 		if (net->RTO == 0) {
4077 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4078 		} else {
4079 			to_ticks = MSEC_TO_TICKS(net->RTO);
4080 		}
4081 		if (net->flight_size) {
4082 			j++;
4083 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4084 			    sctp_timeout_handler, &net->rxt_timer);
4085 			if (net->window_probe) {
4086 				net->window_probe = 0;
4087 			}
4088 		} else {
4089 			if (net->window_probe) {
4090 				/*
4091 				 * In window probes we must assure a timer
4092 				 * is still running there
4093 				 */
4094 				net->window_probe = 0;
4095 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4096 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4097 					    sctp_timeout_handler, &net->rxt_timer);
4098 				}
4099 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4100 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4101 				    stcb, net,
4102 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4103 			}
4104 		}
4105 	}
4106 	if ((j == 0) &&
4107 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4108 	    (asoc->sent_queue_retran_cnt == 0) &&
4109 	    (win_probe_recovered == 0) &&
4110 	    (done_once == 0)) {
4111 		/*
4112 		 * huh, this should not happen unless all packets are
4113 		 * PR-SCTP and marked to skip of course.
4114 		 */
4115 		if (sctp_fs_audit(asoc)) {
4116 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4117 				net->flight_size = 0;
4118 			}
4119 			asoc->total_flight = 0;
4120 			asoc->total_flight_count = 0;
4121 			asoc->sent_queue_retran_cnt = 0;
4122 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4123 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4124 					sctp_flight_size_increase(tp1);
4125 					sctp_total_flight_increase(stcb, tp1);
4126 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4127 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4128 				}
4129 			}
4130 		}
4131 		done_once = 1;
4132 		goto again;
4133 	}
4134 	/**********************************/
4135 	/* Now what about shutdown issues */
4136 	/**********************************/
4137 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4138 		/* nothing left on sendqueue.. consider done */
4139 		/* clean up */
4140 		if ((asoc->stream_queue_cnt == 1) &&
4141 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4142 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4143 		    (asoc->locked_on_sending)
4144 		    ) {
4145 			struct sctp_stream_queue_pending *sp;
4146 
4147 			/*
4148 			 * I may be in a state where we got all across.. but
4149 			 * cannot write more due to a shutdown... we abort
4150 			 * since the user did not indicate EOR in this case.
4151 			 * The sp will be cleaned during free of the asoc.
4152 			 */
4153 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4154 			    sctp_streamhead);
4155 			if ((sp) && (sp->length == 0)) {
4156 				/* Let cleanup code purge it */
4157 				if (sp->msg_is_complete) {
4158 					asoc->stream_queue_cnt--;
4159 				} else {
4160 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4161 					asoc->locked_on_sending = NULL;
4162 					asoc->stream_queue_cnt--;
4163 				}
4164 			}
4165 		}
4166 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4167 		    (asoc->stream_queue_cnt == 0)) {
4168 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4169 				/* Need to abort here */
4170 				struct mbuf *op_err;
4171 
4172 		abort_out_now:
4173 				*abort_now = 1;
4174 				/* XXX */
4175 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4176 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4177 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4178 				return;
4179 			} else {
4180 				struct sctp_nets *netp;
4181 
4182 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4183 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4184 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4185 				}
4186 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4187 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4188 				sctp_stop_timers_for_shutdown(stcb);
4189 				if (asoc->alternate) {
4190 					netp = asoc->alternate;
4191 				} else {
4192 					netp = asoc->primary_destination;
4193 				}
4194 				sctp_send_shutdown(stcb, netp);
4195 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4196 				    stcb->sctp_ep, stcb, netp);
4197 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4198 				    stcb->sctp_ep, stcb, netp);
4199 			}
4200 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4201 		    (asoc->stream_queue_cnt == 0)) {
4202 			struct sctp_nets *netp;
4203 
4204 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4205 				goto abort_out_now;
4206 			}
4207 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4208 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4209 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4210 			sctp_stop_timers_for_shutdown(stcb);
4211 			if (asoc->alternate) {
4212 				netp = asoc->alternate;
4213 			} else {
4214 				netp = asoc->primary_destination;
4215 			}
4216 			sctp_send_shutdown_ack(stcb, netp);
4217 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4218 			    stcb->sctp_ep, stcb, netp);
4219 		}
4220 	}
4221 	/*********************************************/
4222 	/* Here we perform PR-SCTP procedures        */
4223 	/* (section 4.2)                             */
4224 	/*********************************************/
4225 	/* C1. update advancedPeerAckPoint */
4226 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4227 		asoc->advanced_peer_ack_point = cumack;
4228 	}
4229 	/* PR-Sctp issues need to be addressed too */
4230 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4231 		struct sctp_tmit_chunk *lchk;
4232 		uint32_t old_adv_peer_ack_point;
4233 
4234 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4235 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4236 		/* C3. See if we need to send a Fwd-TSN */
4237 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4238 			/*
4239 			 * ISSUE with ECN, see FWD-TSN processing.
4240 			 */
4241 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4242 				send_forward_tsn(stcb, asoc);
4243 			} else if (lchk) {
4244 				/* try to FR fwd-tsn's that get lost too */
4245 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4246 					send_forward_tsn(stcb, asoc);
4247 				}
4248 			}
4249 		}
4250 		if (lchk) {
4251 			/* Assure a timer is up */
4252 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4253 			    stcb->sctp_ep, stcb, lchk->whoTo);
4254 		}
4255 	}
4256 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4257 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4258 		    rwnd,
4259 		    stcb->asoc.peers_rwnd,
4260 		    stcb->asoc.total_flight,
4261 		    stcb->asoc.total_output_queue_size);
4262 	}
4263 }
4264 
4265 void
4266 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4267     struct sctp_tcb *stcb,
4268     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4269     int *abort_now, uint8_t flags,
4270     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4271 {
4272 	struct sctp_association *asoc;
4273 	struct sctp_tmit_chunk *tp1, *tp2;
4274 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4275 	uint16_t wake_him = 0;
4276 	uint32_t send_s = 0;
4277 	long j;
4278 	int accum_moved = 0;
4279 	int will_exit_fast_recovery = 0;
4280 	uint32_t a_rwnd, old_rwnd;
4281 	int win_probe_recovery = 0;
4282 	int win_probe_recovered = 0;
4283 	struct sctp_nets *net = NULL;
4284 	int done_once;
4285 	int rto_ok = 1;
4286 	uint8_t reneged_all = 0;
4287 	uint8_t cmt_dac_flag;
4288 
4289 	/*
4290 	 * we take any chance we can to service our queues since we cannot
4291 	 * get awoken when the socket is read from :<
4292 	 */
4293 	/*
4294 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4295 	 * old sack, if so discard. 2) If there is nothing left in the send
4296 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4297 	 * too, update any rwnd change and verify no timers are running.
4298 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4299 	 * moved process these first and note that it moved. 4) Process any
4300 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4301 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4302 	 * sync up flightsizes and things, stop all timers and also check
4303 	 * for shutdown_pending state. If so then go ahead and send off the
4304 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4305 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4306 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4307 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4308 	 * if in shutdown_recv state.
4309 	 */
4310 	SCTP_TCB_LOCK_ASSERT(stcb);
4311 	/* CMT DAC algo */
4312 	this_sack_lowest_newack = 0;
4313 	SCTP_STAT_INCR(sctps_slowpath_sack);
4314 	last_tsn = cum_ack;
4315 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4316 #ifdef SCTP_ASOCLOG_OF_TSNS
4317 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4318 	stcb->asoc.cumack_log_at++;
4319 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4320 		stcb->asoc.cumack_log_at = 0;
4321 	}
4322 #endif
4323 	a_rwnd = rwnd;
4324 
4325 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4326 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4327 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4328 	}
4329 	old_rwnd = stcb->asoc.peers_rwnd;
4330 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4331 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4332 		    stcb->asoc.overall_error_count,
4333 		    0,
4334 		    SCTP_FROM_SCTP_INDATA,
4335 		    __LINE__);
4336 	}
4337 	stcb->asoc.overall_error_count = 0;
4338 	asoc = &stcb->asoc;
4339 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4340 		sctp_log_sack(asoc->last_acked_seq,
4341 		    cum_ack,
4342 		    0,
4343 		    num_seg,
4344 		    num_dup,
4345 		    SCTP_LOG_NEW_SACK);
4346 	}
4347 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4348 		uint16_t i;
4349 		uint32_t *dupdata, dblock;
4350 
4351 		for (i = 0; i < num_dup; i++) {
4352 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4353 			    sizeof(uint32_t), (uint8_t *) & dblock);
4354 			if (dupdata == NULL) {
4355 				break;
4356 			}
4357 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4358 		}
4359 	}
4360 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4361 		/* reality check */
4362 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4363 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4364 			    sctpchunk_listhead);
4365 			send_s = tp1->rec.data.TSN_seq + 1;
4366 		} else {
4367 			tp1 = NULL;
4368 			send_s = asoc->sending_seq;
4369 		}
4370 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4371 			struct mbuf *op_err;
4372 			char msg[SCTP_DIAG_INFO_LEN];
4373 
4374 			/*
4375 			 * no way, we have not even sent this TSN out yet.
4376 			 * Peer is hopelessly messed up with us.
4377 			 */
4378 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4379 			    cum_ack, send_s);
4380 			if (tp1) {
4381 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4382 				    tp1->rec.data.TSN_seq, (void *)tp1);
4383 			}
4384 	hopeless_peer:
4385 			*abort_now = 1;
4386 			/* XXX */
4387 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4388 			    cum_ack, send_s);
4389 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4390 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4391 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4392 			return;
4393 		}
4394 	}
4395 	/**********************/
4396 	/* 1) check the range */
4397 	/**********************/
4398 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4399 		/* acking something behind */
4400 		return;
4401 	}
4402 	/* update the Rwnd of the peer */
4403 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4404 	    TAILQ_EMPTY(&asoc->send_queue) &&
4405 	    (asoc->stream_queue_cnt == 0)) {
4406 		/* nothing left on send/sent and strmq */
4407 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4408 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4409 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4410 		}
4411 		asoc->peers_rwnd = a_rwnd;
4412 		if (asoc->sent_queue_retran_cnt) {
4413 			asoc->sent_queue_retran_cnt = 0;
4414 		}
4415 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4416 			/* SWS sender side engages */
4417 			asoc->peers_rwnd = 0;
4418 		}
4419 		/* stop any timers */
4420 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4421 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4422 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4423 			net->partial_bytes_acked = 0;
4424 			net->flight_size = 0;
4425 		}
4426 		asoc->total_flight = 0;
4427 		asoc->total_flight_count = 0;
4428 		return;
4429 	}
4430 	/*
4431 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4432 	 * things. The total byte count acked is tracked in netAckSz AND
4433 	 * netAck2 is used to track the total bytes acked that are un-
4434 	 * amibguious and were never retransmitted. We track these on a per
4435 	 * destination address basis.
4436 	 */
4437 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4438 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4439 			/* Drag along the window_tsn for cwr's */
4440 			net->cwr_window_tsn = cum_ack;
4441 		}
4442 		net->prev_cwnd = net->cwnd;
4443 		net->net_ack = 0;
4444 		net->net_ack2 = 0;
4445 
4446 		/*
4447 		 * CMT: Reset CUC and Fast recovery algo variables before
4448 		 * SACK processing
4449 		 */
4450 		net->new_pseudo_cumack = 0;
4451 		net->will_exit_fast_recovery = 0;
4452 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4453 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4454 		}
4455 	}
4456 	/* process the new consecutive TSN first */
4457 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4458 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4459 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4460 				accum_moved = 1;
4461 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4462 					/*
4463 					 * If it is less than ACKED, it is
4464 					 * now no-longer in flight. Higher
4465 					 * values may occur during marking
4466 					 */
4467 					if ((tp1->whoTo->dest_state &
4468 					    SCTP_ADDR_UNCONFIRMED) &&
4469 					    (tp1->snd_count < 2)) {
4470 						/*
4471 						 * If there was no retran
4472 						 * and the address is
4473 						 * un-confirmed and we sent
4474 						 * there and are now
4475 						 * sacked.. its confirmed,
4476 						 * mark it so.
4477 						 */
4478 						tp1->whoTo->dest_state &=
4479 						    ~SCTP_ADDR_UNCONFIRMED;
4480 					}
4481 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4482 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4483 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4484 							    tp1->whoTo->flight_size,
4485 							    tp1->book_size,
4486 							    (uint32_t) (uintptr_t) tp1->whoTo,
4487 							    tp1->rec.data.TSN_seq);
4488 						}
4489 						sctp_flight_size_decrease(tp1);
4490 						sctp_total_flight_decrease(stcb, tp1);
4491 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4492 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4493 							    tp1);
4494 						}
4495 					}
4496 					tp1->whoTo->net_ack += tp1->send_size;
4497 
4498 					/* CMT SFR and DAC algos */
4499 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4500 					tp1->whoTo->saw_newack = 1;
4501 
4502 					if (tp1->snd_count < 2) {
4503 						/*
4504 						 * True non-retransmited
4505 						 * chunk
4506 						 */
4507 						tp1->whoTo->net_ack2 +=
4508 						    tp1->send_size;
4509 
4510 						/* update RTO too? */
4511 						if (tp1->do_rtt) {
4512 							if (rto_ok) {
4513 								tp1->whoTo->RTO =
4514 								    sctp_calculate_rto(stcb,
4515 								    asoc, tp1->whoTo,
4516 								    &tp1->sent_rcv_time,
4517 								    sctp_align_safe_nocopy,
4518 								    SCTP_RTT_FROM_DATA);
4519 								rto_ok = 0;
4520 							}
4521 							if (tp1->whoTo->rto_needed == 0) {
4522 								tp1->whoTo->rto_needed = 1;
4523 							}
4524 							tp1->do_rtt = 0;
4525 						}
4526 					}
4527 					/*
4528 					 * CMT: CUCv2 algorithm. From the
4529 					 * cumack'd TSNs, for each TSN being
4530 					 * acked for the first time, set the
4531 					 * following variables for the
4532 					 * corresp destination.
4533 					 * new_pseudo_cumack will trigger a
4534 					 * cwnd update.
4535 					 * find_(rtx_)pseudo_cumack will
4536 					 * trigger search for the next
4537 					 * expected (rtx-)pseudo-cumack.
4538 					 */
4539 					tp1->whoTo->new_pseudo_cumack = 1;
4540 					tp1->whoTo->find_pseudo_cumack = 1;
4541 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4542 
4543 
4544 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4545 						sctp_log_sack(asoc->last_acked_seq,
4546 						    cum_ack,
4547 						    tp1->rec.data.TSN_seq,
4548 						    0,
4549 						    0,
4550 						    SCTP_LOG_TSN_ACKED);
4551 					}
4552 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4553 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4554 					}
4555 				}
4556 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4557 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4558 #ifdef SCTP_AUDITING_ENABLED
4559 					sctp_audit_log(0xB3,
4560 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4561 #endif
4562 				}
4563 				if (tp1->rec.data.chunk_was_revoked) {
4564 					/* deflate the cwnd */
4565 					tp1->whoTo->cwnd -= tp1->book_size;
4566 					tp1->rec.data.chunk_was_revoked = 0;
4567 				}
4568 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4569 					tp1->sent = SCTP_DATAGRAM_ACKED;
4570 				}
4571 			}
4572 		} else {
4573 			break;
4574 		}
4575 	}
4576 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4577 	/* always set this up to cum-ack */
4578 	asoc->this_sack_highest_gap = last_tsn;
4579 
4580 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4581 
4582 		/*
4583 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4584 		 * to be greater than the cumack. Also reset saw_newack to 0
4585 		 * for all dests.
4586 		 */
4587 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 			net->saw_newack = 0;
4589 			net->this_sack_highest_newack = last_tsn;
4590 		}
4591 
4592 		/*
4593 		 * thisSackHighestGap will increase while handling NEW
4594 		 * segments this_sack_highest_newack will increase while
4595 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4596 		 * used for CMT DAC algo. saw_newack will also change.
4597 		 */
4598 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4599 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4600 		    num_seg, num_nr_seg, &rto_ok)) {
4601 			wake_him++;
4602 		}
4603 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4604 			/*
4605 			 * validate the biggest_tsn_acked in the gap acks if
4606 			 * strict adherence is wanted.
4607 			 */
4608 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4609 				/*
4610 				 * peer is either confused or we are under
4611 				 * attack. We must abort.
4612 				 */
4613 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4614 				    biggest_tsn_acked, send_s);
4615 				goto hopeless_peer;
4616 			}
4617 		}
4618 	}
4619 	/*******************************************/
4620 	/* cancel ALL T3-send timer if accum moved */
4621 	/*******************************************/
4622 	if (asoc->sctp_cmt_on_off > 0) {
4623 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4624 			if (net->new_pseudo_cumack)
4625 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4626 				    stcb, net,
4627 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4628 
4629 		}
4630 	} else {
4631 		if (accum_moved) {
4632 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4633 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4634 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4635 			}
4636 		}
4637 	}
4638 	/********************************************/
4639 	/* drop the acked chunks from the sentqueue */
4640 	/********************************************/
4641 	asoc->last_acked_seq = cum_ack;
4642 
4643 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4644 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4645 			break;
4646 		}
4647 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4648 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4649 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4650 #ifdef INVARIANTS
4651 			} else {
4652 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4653 #endif
4654 			}
4655 		}
4656 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4657 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4658 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4659 			asoc->trigger_reset = 1;
4660 		}
4661 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4662 		if (PR_SCTP_ENABLED(tp1->flags)) {
4663 			if (asoc->pr_sctp_cnt != 0)
4664 				asoc->pr_sctp_cnt--;
4665 		}
4666 		asoc->sent_queue_cnt--;
4667 		if (tp1->data) {
4668 			/* sa_ignore NO_NULL_CHK */
4669 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4670 			sctp_m_freem(tp1->data);
4671 			tp1->data = NULL;
4672 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4673 				asoc->sent_queue_cnt_removeable--;
4674 			}
4675 		}
4676 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4677 			sctp_log_sack(asoc->last_acked_seq,
4678 			    cum_ack,
4679 			    tp1->rec.data.TSN_seq,
4680 			    0,
4681 			    0,
4682 			    SCTP_LOG_FREE_SENT);
4683 		}
4684 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4685 		wake_him++;
4686 	}
4687 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4688 #ifdef INVARIANTS
4689 		panic("Warning flight size is postive and should be 0");
4690 #else
4691 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4692 		    asoc->total_flight);
4693 #endif
4694 		asoc->total_flight = 0;
4695 	}
4696 	/* sa_ignore NO_NULL_CHK */
4697 	if ((wake_him) && (stcb->sctp_socket)) {
4698 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4699 		struct socket *so;
4700 
4701 #endif
4702 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4703 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4704 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4705 		}
4706 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4707 		so = SCTP_INP_SO(stcb->sctp_ep);
4708 		atomic_add_int(&stcb->asoc.refcnt, 1);
4709 		SCTP_TCB_UNLOCK(stcb);
4710 		SCTP_SOCKET_LOCK(so, 1);
4711 		SCTP_TCB_LOCK(stcb);
4712 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4713 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4714 			/* assoc was freed while we were unlocked */
4715 			SCTP_SOCKET_UNLOCK(so, 1);
4716 			return;
4717 		}
4718 #endif
4719 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 		SCTP_SOCKET_UNLOCK(so, 1);
4722 #endif
4723 	} else {
4724 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4725 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4726 		}
4727 	}
4728 
4729 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4730 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4731 			/* Setup so we will exit RFC2582 fast recovery */
4732 			will_exit_fast_recovery = 1;
4733 		}
4734 	}
4735 	/*
4736 	 * Check for revoked fragments:
4737 	 *
4738 	 * if Previous sack - Had no frags then we can't have any revoked if
4739 	 * Previous sack - Had frag's then - If we now have frags aka
4740 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4741 	 * some of them. else - The peer revoked all ACKED fragments, since
4742 	 * we had some before and now we have NONE.
4743 	 */
4744 
4745 	if (num_seg) {
4746 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4747 		asoc->saw_sack_with_frags = 1;
4748 	} else if (asoc->saw_sack_with_frags) {
4749 		int cnt_revoked = 0;
4750 
4751 		/* Peer revoked all dg's marked or acked */
4752 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4753 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4754 				tp1->sent = SCTP_DATAGRAM_SENT;
4755 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4756 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4757 					    tp1->whoTo->flight_size,
4758 					    tp1->book_size,
4759 					    (uint32_t) (uintptr_t) tp1->whoTo,
4760 					    tp1->rec.data.TSN_seq);
4761 				}
4762 				sctp_flight_size_increase(tp1);
4763 				sctp_total_flight_increase(stcb, tp1);
4764 				tp1->rec.data.chunk_was_revoked = 1;
4765 				/*
4766 				 * To ensure that this increase in
4767 				 * flightsize, which is artificial, does not
4768 				 * throttle the sender, we also increase the
4769 				 * cwnd artificially.
4770 				 */
4771 				tp1->whoTo->cwnd += tp1->book_size;
4772 				cnt_revoked++;
4773 			}
4774 		}
4775 		if (cnt_revoked) {
4776 			reneged_all = 1;
4777 		}
4778 		asoc->saw_sack_with_frags = 0;
4779 	}
4780 	if (num_nr_seg > 0)
4781 		asoc->saw_sack_with_nr_frags = 1;
4782 	else
4783 		asoc->saw_sack_with_nr_frags = 0;
4784 
4785 	/* JRS - Use the congestion control given in the CC module */
4786 	if (ecne_seen == 0) {
4787 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4788 			if (net->net_ack2 > 0) {
4789 				/*
4790 				 * Karn's rule applies to clearing error
4791 				 * count, this is optional.
4792 				 */
4793 				net->error_count = 0;
4794 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4795 					/* addr came good */
4796 					net->dest_state |= SCTP_ADDR_REACHABLE;
4797 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4798 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4799 				}
4800 				if (net == stcb->asoc.primary_destination) {
4801 					if (stcb->asoc.alternate) {
4802 						/*
4803 						 * release the alternate,
4804 						 * primary is good
4805 						 */
4806 						sctp_free_remote_addr(stcb->asoc.alternate);
4807 						stcb->asoc.alternate = NULL;
4808 					}
4809 				}
4810 				if (net->dest_state & SCTP_ADDR_PF) {
4811 					net->dest_state &= ~SCTP_ADDR_PF;
4812 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4813 					    stcb->sctp_ep, stcb, net,
4814 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4815 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4816 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4817 					/* Done with this net */
4818 					net->net_ack = 0;
4819 				}
4820 				/* restore any doubled timers */
4821 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4822 				if (net->RTO < stcb->asoc.minrto) {
4823 					net->RTO = stcb->asoc.minrto;
4824 				}
4825 				if (net->RTO > stcb->asoc.maxrto) {
4826 					net->RTO = stcb->asoc.maxrto;
4827 				}
4828 			}
4829 		}
4830 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4831 	}
4832 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4833 		/* nothing left in-flight */
4834 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4835 			/* stop all timers */
4836 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4837 			    stcb, net,
4838 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4839 			net->flight_size = 0;
4840 			net->partial_bytes_acked = 0;
4841 		}
4842 		asoc->total_flight = 0;
4843 		asoc->total_flight_count = 0;
4844 	}
4845 	/**********************************/
4846 	/* Now what about shutdown issues */
4847 	/**********************************/
4848 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4849 		/* nothing left on sendqueue.. consider done */
4850 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4851 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4852 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4853 		}
4854 		asoc->peers_rwnd = a_rwnd;
4855 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4856 			/* SWS sender side engages */
4857 			asoc->peers_rwnd = 0;
4858 		}
4859 		/* clean up */
4860 		if ((asoc->stream_queue_cnt == 1) &&
4861 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4862 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4863 		    (asoc->locked_on_sending)
4864 		    ) {
4865 			struct sctp_stream_queue_pending *sp;
4866 
4867 			/*
4868 			 * I may be in a state where we got all across.. but
4869 			 * cannot write more due to a shutdown... we abort
4870 			 * since the user did not indicate EOR in this case.
4871 			 */
4872 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4873 			    sctp_streamhead);
4874 			if ((sp) && (sp->length == 0)) {
4875 				asoc->locked_on_sending = NULL;
4876 				if (sp->msg_is_complete) {
4877 					asoc->stream_queue_cnt--;
4878 				} else {
4879 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4880 					asoc->stream_queue_cnt--;
4881 				}
4882 			}
4883 		}
4884 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4885 		    (asoc->stream_queue_cnt == 0)) {
4886 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4887 				/* Need to abort here */
4888 				struct mbuf *op_err;
4889 
4890 		abort_out_now:
4891 				*abort_now = 1;
4892 				/* XXX */
4893 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4894 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4895 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4896 				return;
4897 			} else {
4898 				struct sctp_nets *netp;
4899 
4900 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4901 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4902 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4903 				}
4904 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4905 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4906 				sctp_stop_timers_for_shutdown(stcb);
4907 				if (asoc->alternate) {
4908 					netp = asoc->alternate;
4909 				} else {
4910 					netp = asoc->primary_destination;
4911 				}
4912 				sctp_send_shutdown(stcb, netp);
4913 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4914 				    stcb->sctp_ep, stcb, netp);
4915 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4916 				    stcb->sctp_ep, stcb, netp);
4917 			}
4918 			return;
4919 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4920 		    (asoc->stream_queue_cnt == 0)) {
4921 			struct sctp_nets *netp;
4922 
4923 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4924 				goto abort_out_now;
4925 			}
4926 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4927 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4928 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4929 			sctp_stop_timers_for_shutdown(stcb);
4930 			if (asoc->alternate) {
4931 				netp = asoc->alternate;
4932 			} else {
4933 				netp = asoc->primary_destination;
4934 			}
4935 			sctp_send_shutdown_ack(stcb, netp);
4936 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4937 			    stcb->sctp_ep, stcb, netp);
4938 			return;
4939 		}
4940 	}
4941 	/*
4942 	 * Now here we are going to recycle net_ack for a different use...
4943 	 * HEADS UP.
4944 	 */
4945 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4946 		net->net_ack = 0;
4947 	}
4948 
4949 	/*
4950 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4951 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4952 	 * automatically ensure that.
4953 	 */
4954 	if ((asoc->sctp_cmt_on_off > 0) &&
4955 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4956 	    (cmt_dac_flag == 0)) {
4957 		this_sack_lowest_newack = cum_ack;
4958 	}
4959 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4960 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4961 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4962 	}
4963 	/* JRS - Use the congestion control given in the CC module */
4964 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4965 
4966 	/* Now are we exiting loss recovery ? */
4967 	if (will_exit_fast_recovery) {
4968 		/* Ok, we must exit fast recovery */
4969 		asoc->fast_retran_loss_recovery = 0;
4970 	}
4971 	if ((asoc->sat_t3_loss_recovery) &&
4972 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4973 		/* end satellite t3 loss recovery */
4974 		asoc->sat_t3_loss_recovery = 0;
4975 	}
4976 	/*
4977 	 * CMT Fast recovery
4978 	 */
4979 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4980 		if (net->will_exit_fast_recovery) {
4981 			/* Ok, we must exit fast recovery */
4982 			net->fast_retran_loss_recovery = 0;
4983 		}
4984 	}
4985 
4986 	/* Adjust and set the new rwnd value */
4987 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4988 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4989 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4990 	}
4991 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4992 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4993 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 		/* SWS sender side engages */
4995 		asoc->peers_rwnd = 0;
4996 	}
4997 	if (asoc->peers_rwnd > old_rwnd) {
4998 		win_probe_recovery = 1;
4999 	}
5000 	/*
5001 	 * Now we must setup so we have a timer up for anyone with
5002 	 * outstanding data.
5003 	 */
5004 	done_once = 0;
5005 again:
5006 	j = 0;
5007 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5008 		if (win_probe_recovery && (net->window_probe)) {
5009 			win_probe_recovered = 1;
5010 			/*-
5011 			 * Find first chunk that was used with
5012 			 * window probe and clear the event. Put
5013 			 * it back into the send queue as if has
5014 			 * not been sent.
5015 			 */
5016 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5017 				if (tp1->window_probe) {
5018 					sctp_window_probe_recovery(stcb, asoc, tp1);
5019 					break;
5020 				}
5021 			}
5022 		}
5023 		if (net->flight_size) {
5024 			j++;
5025 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5026 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5027 				    stcb->sctp_ep, stcb, net);
5028 			}
5029 			if (net->window_probe) {
5030 				net->window_probe = 0;
5031 			}
5032 		} else {
5033 			if (net->window_probe) {
5034 				/*
5035 				 * In window probes we must assure a timer
5036 				 * is still running there
5037 				 */
5038 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5039 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5040 					    stcb->sctp_ep, stcb, net);
5041 
5042 				}
5043 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5044 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5045 				    stcb, net,
5046 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5047 			}
5048 		}
5049 	}
5050 	if ((j == 0) &&
5051 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5052 	    (asoc->sent_queue_retran_cnt == 0) &&
5053 	    (win_probe_recovered == 0) &&
5054 	    (done_once == 0)) {
5055 		/*
5056 		 * huh, this should not happen unless all packets are
5057 		 * PR-SCTP and marked to skip of course.
5058 		 */
5059 		if (sctp_fs_audit(asoc)) {
5060 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5061 				net->flight_size = 0;
5062 			}
5063 			asoc->total_flight = 0;
5064 			asoc->total_flight_count = 0;
5065 			asoc->sent_queue_retran_cnt = 0;
5066 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5067 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5068 					sctp_flight_size_increase(tp1);
5069 					sctp_total_flight_increase(stcb, tp1);
5070 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5071 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5072 				}
5073 			}
5074 		}
5075 		done_once = 1;
5076 		goto again;
5077 	}
5078 	/*********************************************/
5079 	/* Here we perform PR-SCTP procedures        */
5080 	/* (section 4.2)                             */
5081 	/*********************************************/
5082 	/* C1. update advancedPeerAckPoint */
5083 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5084 		asoc->advanced_peer_ack_point = cum_ack;
5085 	}
5086 	/* C2. try to further move advancedPeerAckPoint ahead */
5087 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5088 		struct sctp_tmit_chunk *lchk;
5089 		uint32_t old_adv_peer_ack_point;
5090 
5091 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5092 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5093 		/* C3. See if we need to send a Fwd-TSN */
5094 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5095 			/*
5096 			 * ISSUE with ECN, see FWD-TSN processing.
5097 			 */
5098 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5099 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5100 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5101 				    old_adv_peer_ack_point);
5102 			}
5103 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5104 				send_forward_tsn(stcb, asoc);
5105 			} else if (lchk) {
5106 				/* try to FR fwd-tsn's that get lost too */
5107 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5108 					send_forward_tsn(stcb, asoc);
5109 				}
5110 			}
5111 		}
5112 		if (lchk) {
5113 			/* Assure a timer is up */
5114 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5115 			    stcb->sctp_ep, stcb, lchk->whoTo);
5116 		}
5117 	}
5118 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5119 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5120 		    a_rwnd,
5121 		    stcb->asoc.peers_rwnd,
5122 		    stcb->asoc.total_flight,
5123 		    stcb->asoc.total_output_queue_size);
5124 	}
5125 }
5126 
5127 void
5128 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5129 {
5130 	/* Copy cum-ack */
5131 	uint32_t cum_ack, a_rwnd;
5132 
5133 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5134 	/* Arrange so a_rwnd does NOT change */
5135 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5136 
5137 	/* Now call the express sack handling */
5138 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5139 }
5140 
5141 static void
5142 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5143     struct sctp_stream_in *strmin)
5144 {
5145 	struct sctp_queued_to_read *ctl, *nctl;
5146 	struct sctp_association *asoc;
5147 	uint32_t tt;
5148 	int need_reasm_check = 0, old;
5149 
5150 	asoc = &stcb->asoc;
5151 	tt = strmin->last_sequence_delivered;
5152 	if (asoc->idata_supported) {
5153 		old = 0;
5154 	} else {
5155 		old = 1;
5156 	}
5157 	/*
5158 	 * First deliver anything prior to and including the stream no that
5159 	 * came in.
5160 	 */
5161 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5162 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5163 			/* this is deliverable now */
5164 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5165 				if (ctl->on_strm_q) {
5166 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5167 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5168 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5169 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5170 					} else {
5171 						panic("strmin:%p ctl:%p unknown %d",
5172 						    strmin, ctl, ctl->on_strm_q);
5173 					}
5174 					ctl->on_strm_q = 0;
5175 				}
5176 				/* subtract pending on streams */
5177 				asoc->size_on_all_streams -= ctl->length;
5178 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5179 				/* deliver it to at least the delivery-q */
5180 				if (stcb->sctp_socket) {
5181 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5182 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5183 					    ctl,
5184 					    &stcb->sctp_socket->so_rcv,
5185 					    1, SCTP_READ_LOCK_HELD,
5186 					    SCTP_SO_NOT_LOCKED);
5187 				}
5188 			} else {
5189 				/* Its a fragmented message */
5190 				if (ctl->first_frag_seen) {
5191 					/*
5192 					 * Make it so this is next to
5193 					 * deliver, we restore later
5194 					 */
5195 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5196 					need_reasm_check = 1;
5197 					break;
5198 				}
5199 			}
5200 		} else {
5201 			/* no more delivery now. */
5202 			break;
5203 		}
5204 	}
5205 	if (need_reasm_check) {
5206 		int ret;
5207 
5208 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5209 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5210 			/* Restore the next to deliver unless we are ahead */
5211 			strmin->last_sequence_delivered = tt;
5212 		}
5213 		if (ret == 0) {
5214 			/* Left the front Partial one on */
5215 			return;
5216 		}
5217 		need_reasm_check = 0;
5218 	}
5219 	/*
5220 	 * now we must deliver things in queue the normal way  if any are
5221 	 * now ready.
5222 	 */
5223 	tt = strmin->last_sequence_delivered + 1;
5224 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5225 		if (tt == ctl->sinfo_ssn) {
5226 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5227 				/* this is deliverable now */
5228 				if (ctl->on_strm_q) {
5229 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5230 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5231 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5232 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5233 					} else {
5234 						panic("strmin:%p ctl:%p unknown %d",
5235 						    strmin, ctl, ctl->on_strm_q);
5236 					}
5237 					ctl->on_strm_q = 0;
5238 				}
5239 				/* subtract pending on streams */
5240 				asoc->size_on_all_streams -= ctl->length;
5241 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5242 				/* deliver it to at least the delivery-q */
5243 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5244 				if (stcb->sctp_socket) {
5245 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5246 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5247 					    ctl,
5248 					    &stcb->sctp_socket->so_rcv, 1,
5249 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5250 
5251 				}
5252 				tt = strmin->last_sequence_delivered + 1;
5253 			} else {
5254 				/* Its a fragmented message */
5255 				if (ctl->first_frag_seen) {
5256 					/*
5257 					 * Make it so this is next to
5258 					 * deliver
5259 					 */
5260 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5261 					need_reasm_check = 1;
5262 					break;
5263 				}
5264 			}
5265 		} else {
5266 			break;
5267 		}
5268 	}
5269 	if (need_reasm_check) {
5270 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5271 	}
5272 }
5273 
5274 static void
5275 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5276     struct sctp_association *asoc,
5277     uint16_t stream, uint32_t seq)
5278 {
5279 	struct sctp_queued_to_read *control;
5280 	struct sctp_stream_in *strm;
5281 	struct sctp_tmit_chunk *chk, *nchk;
5282 
5283 	/*
5284 	 * For now large messages held on the stream reasm that are complete
5285 	 * will be tossed too. We could in theory do more work to spin
5286 	 * through and stop after dumping one msg aka seeing the start of a
5287 	 * new msg at the head, and call the delivery function... to see if
5288 	 * it can be delivered... But for now we just dump everything on the
5289 	 * queue.
5290 	 */
5291 	strm = &asoc->strmin[stream];
5292 	control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5293 	if (control == NULL) {
5294 		/* Not found */
5295 		return;
5296 	}
5297 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5298 		/* Purge hanging chunks */
5299 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5300 		asoc->size_on_reasm_queue -= chk->send_size;
5301 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5302 		if (chk->data) {
5303 			sctp_m_freem(chk->data);
5304 			chk->data = NULL;
5305 		}
5306 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5307 	}
5308 	TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5309 	if (control->on_read_q == 0) {
5310 		sctp_free_remote_addr(control->whoFrom);
5311 		if (control->data) {
5312 			sctp_m_freem(control->data);
5313 			control->data = NULL;
5314 		}
5315 		sctp_free_a_readq(stcb, control);
5316 	}
5317 }
5318 
5319 
5320 void
5321 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5322     struct sctp_forward_tsn_chunk *fwd,
5323     int *abort_flag, struct mbuf *m, int offset)
5324 {
5325 	/* The pr-sctp fwd tsn */
5326 	/*
5327 	 * here we will perform all the data receiver side steps for
5328 	 * processing FwdTSN, as required in by pr-sctp draft:
5329 	 *
5330 	 * Assume we get FwdTSN(x):
5331 	 *
5332 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5333 	 * others we have 3) examine and update re-ordering queue on
5334 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5335 	 * report where we are.
5336 	 */
5337 	struct sctp_association *asoc;
5338 	uint32_t new_cum_tsn, gap;
5339 	unsigned int i, fwd_sz, m_size;
5340 	uint32_t str_seq;
5341 	struct sctp_stream_in *strm;
5342 	struct sctp_queued_to_read *ctl, *sv;
5343 
5344 	asoc = &stcb->asoc;
5345 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5346 		SCTPDBG(SCTP_DEBUG_INDATA1,
5347 		    "Bad size too small/big fwd-tsn\n");
5348 		return;
5349 	}
5350 	m_size = (stcb->asoc.mapping_array_size << 3);
5351 	/*************************************************************/
5352 	/* 1. Here we update local cumTSN and shift the bitmap array */
5353 	/*************************************************************/
5354 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5355 
5356 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5357 		/* Already got there ... */
5358 		return;
5359 	}
5360 	/*
5361 	 * now we know the new TSN is more advanced, let's find the actual
5362 	 * gap
5363 	 */
5364 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5365 	asoc->cumulative_tsn = new_cum_tsn;
5366 	if (gap >= m_size) {
5367 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5368 			struct mbuf *op_err;
5369 			char msg[SCTP_DIAG_INFO_LEN];
5370 
5371 			/*
5372 			 * out of range (of single byte chunks in the rwnd I
5373 			 * give out). This must be an attacker.
5374 			 */
5375 			*abort_flag = 1;
5376 			snprintf(msg, sizeof(msg),
5377 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5378 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5379 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5380 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5381 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5382 			return;
5383 		}
5384 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5385 
5386 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5387 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5388 		asoc->highest_tsn_inside_map = new_cum_tsn;
5389 
5390 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5391 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5392 
5393 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5394 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5395 		}
5396 	} else {
5397 		SCTP_TCB_LOCK_ASSERT(stcb);
5398 		for (i = 0; i <= gap; i++) {
5399 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5400 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5401 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5402 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5403 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5404 				}
5405 			}
5406 		}
5407 	}
5408 	/*************************************************************/
5409 	/* 2. Clear up re-assembly queue                             */
5410 	/*************************************************************/
5411 
5412 	/* This is now done as part of clearing up the stream/seq */
5413 
5414 	/*******************************************************/
5415 	/* 3. Update the PR-stream re-ordering queues and fix  */
5416 	/* delivery issues as needed.                       */
5417 	/*******************************************************/
5418 	fwd_sz -= sizeof(*fwd);
5419 	if (m && fwd_sz) {
5420 		/* New method. */
5421 		unsigned int num_str;
5422 		uint32_t sequence;
5423 		uint16_t stream;
5424 		int old;
5425 		struct sctp_strseq *stseq, strseqbuf;
5426 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5427 
5428 		offset += sizeof(*fwd);
5429 
5430 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5431 		if (asoc->idata_supported) {
5432 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5433 			old = 0;
5434 		} else {
5435 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5436 			old = 1;
5437 		}
5438 		for (i = 0; i < num_str; i++) {
5439 			if (asoc->idata_supported) {
5440 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5441 				    sizeof(struct sctp_strseq_mid),
5442 				    (uint8_t *) & strseqbuf_m);
5443 				offset += sizeof(struct sctp_strseq_mid);
5444 				if (stseq_m == NULL) {
5445 					break;
5446 				}
5447 				stream = ntohs(stseq_m->stream);
5448 				sequence = ntohl(stseq_m->msg_id);
5449 			} else {
5450 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5451 				    sizeof(struct sctp_strseq),
5452 				    (uint8_t *) & strseqbuf);
5453 				offset += sizeof(struct sctp_strseq);
5454 				if (stseq == NULL) {
5455 					break;
5456 				}
5457 				stream = ntohs(stseq->stream);
5458 				sequence = (uint32_t) ntohs(stseq->sequence);
5459 			}
5460 			/* Convert */
5461 
5462 			/* now process */
5463 
5464 			/*
5465 			 * Ok we now look for the stream/seq on the read
5466 			 * queue where its not all delivered. If we find it
5467 			 * we transmute the read entry into a PDI_ABORTED.
5468 			 */
5469 			if (stream >= asoc->streamincnt) {
5470 				/* screwed up streams, stop!  */
5471 				break;
5472 			}
5473 			if ((asoc->str_of_pdapi == stream) &&
5474 			    (asoc->ssn_of_pdapi == sequence)) {
5475 				/*
5476 				 * If this is the one we were partially
5477 				 * delivering now then we no longer are.
5478 				 * Note this will change with the reassembly
5479 				 * re-write.
5480 				 */
5481 				asoc->fragmented_delivery_inprogress = 0;
5482 			}
5483 			strm = &asoc->strmin[stream];
5484 			sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5485 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5486 				if ((ctl->sinfo_stream == stream) &&
5487 				    (ctl->sinfo_ssn == sequence)) {
5488 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5489 					ctl->pdapi_aborted = 1;
5490 					sv = stcb->asoc.control_pdapi;
5491 					ctl->end_added = 1;
5492 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5493 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5494 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5495 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5496 					} else if (ctl->on_strm_q) {
5497 						panic("strm:%p ctl:%p unknown %d",
5498 						    strm, ctl, ctl->on_strm_q);
5499 					}
5500 					ctl->on_strm_q = 0;
5501 					stcb->asoc.control_pdapi = ctl;
5502 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5503 					    stcb,
5504 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5505 					    (void *)&str_seq,
5506 					    SCTP_SO_NOT_LOCKED);
5507 					stcb->asoc.control_pdapi = sv;
5508 					break;
5509 				} else if ((ctl->sinfo_stream == stream) &&
5510 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5511 					/* We are past our victim SSN */
5512 					break;
5513 				}
5514 			}
5515 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5516 				/* Update the sequence number */
5517 				strm->last_sequence_delivered = sequence;
5518 			}
5519 			/* now kick the stream the new way */
5520 			/* sa_ignore NO_NULL_CHK */
5521 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5522 		}
5523 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5524 	}
5525 	/*
5526 	 * Now slide thing forward.
5527 	 */
5528 	sctp_slide_mapping_arrays(stcb);
5529 }
5530