xref: /freebsd/sys/netinet/sctp_indata.c (revision 2c8d04d0228871c24017509cf039e7c5d97d97be)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_queued_to_read *control,
417     struct sctp_tmit_chunk *chk,
418     int *abort_flag, int opspot)
419 {
420 	char msg[SCTP_DIAG_INFO_LEN];
421 	struct mbuf *oper;
422 
423 	if (stcb->asoc.idata_supported) {
424 		snprintf(msg, sizeof(msg),
425 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 		    opspot,
427 		    control->fsn_included,
428 		    chk->rec.data.TSN_seq,
429 		    chk->rec.data.stream_number,
430 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
431 	} else {
432 		snprintf(msg, sizeof(msg),
433 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 		    opspot,
435 		    control->fsn_included,
436 		    chk->rec.data.TSN_seq,
437 		    chk->rec.data.stream_number,
438 		    chk->rec.data.fsn_num,
439 		    (uint16_t) chk->rec.data.stream_seq);
440 	}
441 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 	sctp_m_freem(chk->data);
443 	chk->data = NULL;
444 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
447 	*abort_flag = 1;
448 }
449 
450 static void
451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 {
453 	/*
454 	 * The control could not be placed and must be cleaned.
455 	 */
456 	struct sctp_tmit_chunk *chk, *nchk;
457 
458 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 		if (chk->data)
461 			sctp_m_freem(chk->data);
462 		chk->data = NULL;
463 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	}
465 	sctp_free_a_readq(stcb, control);
466 }
467 
468 /*
469  * Queue the chunk either right into the socket buffer if it is the next one
470  * to go OR put it in the correct place in the delivery queue.  If we do
471  * append to the so_buf, keep doing so until we are out of order as
472  * long as the control's entered are non-fragmented.
473  */
474 static void
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476     struct sctp_stream_in *strm,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream usage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	char msg[SCTP_DIAG_INFO_LEN];
503 
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    control->sinfo_ssn, strm->last_sequence_delivered);
511 protocol_error:
512 		/*
513 		 * throw it in the stream so it gets cleaned up in
514 		 * association destruction
515 		 */
516 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 		    strm->last_sequence_delivered, control->sinfo_tsn,
519 		    control->sinfo_stream, control->sinfo_ssn);
520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
523 		*abort_flag = 1;
524 		return;
525 
526 	}
527 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
528 		goto protocol_error;
529 	}
530 	queue_needed = 1;
531 	asoc->size_on_all_streams += control->length;
532 	sctp_ucount_incr(asoc->cnt_on_all_streams);
533 	nxt_todel = strm->last_sequence_delivered + 1;
534 	if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 		struct socket *so;
537 
538 		so = SCTP_INP_SO(stcb->sctp_ep);
539 		atomic_add_int(&stcb->asoc.refcnt, 1);
540 		SCTP_TCB_UNLOCK(stcb);
541 		SCTP_SOCKET_LOCK(so, 1);
542 		SCTP_TCB_LOCK(stcb);
543 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 			SCTP_SOCKET_UNLOCK(so, 1);
546 			return;
547 		}
548 #endif
549 		/* can be delivered right away? */
550 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
552 		}
553 		/* EY it wont be queued if it could be delivered directly */
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
560 		    control,
561 		    &stcb->sctp_socket->so_rcv, 1,
562 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
563 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
564 			/* all delivered */
565 			nxt_todel = strm->last_sequence_delivered + 1;
566 			if ((nxt_todel == control->sinfo_ssn) &&
567 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				if (control->on_strm_q == SCTP_ON_ORDERED) {
571 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 #ifdef INVARIANTS
573 				} else {
574 					panic("Huh control: %p is on_strm_q: %d",
575 					    control, control->on_strm_q);
576 #endif
577 				}
578 				control->on_strm_q = 0;
579 				strm->last_sequence_delivered++;
580 				/*
581 				 * We ignore the return of deliver_data here
582 				 * since we always can hold the chunk on the
583 				 * d-queue. And we have a finite number that
584 				 * can be delivered from the strq.
585 				 */
586 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 					sctp_log_strm_del(control, NULL,
588 					    SCTP_STR_LOG_FROM_IMMED_DEL);
589 				}
590 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 				sctp_add_to_readq(stcb->sctp_ep, stcb,
592 				    control,
593 				    &stcb->sctp_socket->so_rcv, 1,
594 				    SCTP_READ_LOCK_NOT_HELD,
595 				    SCTP_SO_LOCKED);
596 				continue;
597 			} else if (nxt_todel == control->sinfo_ssn) {
598 				*need_reasm = 1;
599 			}
600 			break;
601 		}
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 		SCTP_SOCKET_UNLOCK(so, 1);
604 #endif
605 	}
606 	if (queue_needed) {
607 		/*
608 		 * Ok, we did not deliver this guy, find the correct place
609 		 * to put it on the queue.
610 		 */
611 		if (sctp_place_control_in_stream(strm, asoc, control)) {
612 			snprintf(msg, sizeof(msg),
613 			    "Queue to str msg_id: %u duplicate",
614 			    control->msg_id);
615 			clean_up_control(stcb, control);
616 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 			*abort_flag = 1;
620 		}
621 	}
622 }
623 
624 
625 static void
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
627 {
628 	struct mbuf *m, *prev = NULL;
629 	struct sctp_tcb *stcb;
630 
631 	stcb = control->stcb;
632 	control->held_length = 0;
633 	control->length = 0;
634 	m = control->data;
635 	while (m) {
636 		if (SCTP_BUF_LEN(m) == 0) {
637 			/* Skip mbufs with NO length */
638 			if (prev == NULL) {
639 				/* First one */
640 				control->data = sctp_m_free(m);
641 				m = control->data;
642 			} else {
643 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 				m = SCTP_BUF_NEXT(prev);
645 			}
646 			if (m == NULL) {
647 				control->tail_mbuf = prev;
648 			}
649 			continue;
650 		}
651 		prev = m;
652 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 		if (control->on_read_q) {
654 			/*
655 			 * On read queue so we must increment the SB stuff,
656 			 * we assume caller has done any locks of SB.
657 			 */
658 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
659 		}
660 		m = SCTP_BUF_NEXT(m);
661 	}
662 	if (prev) {
663 		control->tail_mbuf = prev;
664 	}
665 }
666 
667 static void
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
669 {
670 	struct mbuf *prev = NULL;
671 	struct sctp_tcb *stcb;
672 
673 	stcb = control->stcb;
674 	if (stcb == NULL) {
675 #ifdef INVARIANTS
676 		panic("Control broken");
677 #else
678 		return;
679 #endif
680 	}
681 	if (control->tail_mbuf == NULL) {
682 		/* TSNH */
683 		control->data = m;
684 		sctp_setup_tail_pointer(control);
685 		return;
686 	}
687 	control->tail_mbuf->m_next = m;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->tail_mbuf->m_next = sctp_m_free(m);
694 				m = control->tail_mbuf->m_next;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		if (control->on_read_q) {
706 			/*
707 			 * On read queue so we must increment the SB stuff,
708 			 * we assume caller has done any locks of SB.
709 			 */
710 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
711 		}
712 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
722 {
723 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 	nc->sinfo_stream = control->sinfo_stream;
725 	nc->sinfo_ssn = control->sinfo_ssn;
726 	TAILQ_INIT(&nc->reasm);
727 	nc->top_fsn = control->top_fsn;
728 	nc->msg_id = control->msg_id;
729 	nc->sinfo_flags = control->sinfo_flags;
730 	nc->sinfo_ppid = control->sinfo_ppid;
731 	nc->sinfo_context = control->sinfo_context;
732 	nc->fsn_included = 0xffffffff;
733 	nc->sinfo_tsn = control->sinfo_tsn;
734 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 	nc->whoFrom = control->whoFrom;
737 	atomic_add_int(&nc->whoFrom->ref_count, 1);
738 	nc->stcb = control->stcb;
739 	nc->port_from = control->port_from;
740 }
741 
742 static int
743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
744     struct sctp_queued_to_read *control, uint32_t pd_point)
745 {
746 	/*
747 	 * Special handling for the old un-ordered data chunk. All the
748 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
749 	 * watching to see if we have it all. If you return one, no other
750 	 * control entries on the un-ordered queue will be looked at. In
751 	 * theory there should be no others entries in reality, unless the
752 	 * guy is sending both unordered NDATA and unordered DATA...
753 	 */
754 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
755 	uint32_t fsn;
756 	struct sctp_queued_to_read *nc;
757 	int cnt_added;
758 
759 	if (control->first_frag_seen == 0) {
760 		/* Nothing we can do, we have not seen the first piece yet */
761 		return (1);
762 	}
763 	/* Collapse any we can */
764 	cnt_added = 0;
765 restart:
766 	fsn = control->fsn_included + 1;
767 	/* Now what can we add? */
768 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
769 		if (chk->rec.data.fsn_num == fsn) {
770 			/* Ok lets add it */
771 			sctp_alloc_a_readq(stcb, nc);
772 			if (nc == NULL) {
773 				break;
774 			}
775 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
776 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
777 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
778 			fsn++;
779 			cnt_added++;
780 			chk = NULL;
781 			if (control->end_added) {
782 				/* We are done */
783 				if (!TAILQ_EMPTY(&control->reasm)) {
784 					/*
785 					 * Ok we have to move anything left
786 					 * on the control queue to a new
787 					 * control.
788 					 */
789 					sctp_build_readq_entry_from_ctl(nc, control);
790 					tchk = TAILQ_FIRST(&control->reasm);
791 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
792 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
793 						nc->first_frag_seen = 1;
794 						nc->fsn_included = tchk->rec.data.fsn_num;
795 						nc->data = tchk->data;
796 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
797 						tchk->data = NULL;
798 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
799 						sctp_setup_tail_pointer(nc);
800 						tchk = TAILQ_FIRST(&control->reasm);
801 					}
802 					/* Spin the rest onto the queue */
803 					while (tchk) {
804 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
805 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
806 						tchk = TAILQ_FIRST(&control->reasm);
807 					}
808 					/*
809 					 * Now lets add it to the queue
810 					 * after removing control
811 					 */
812 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
813 					nc->on_strm_q = SCTP_ON_UNORDERED;
814 					if (control->on_strm_q) {
815 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
816 						control->on_strm_q = 0;
817 					}
818 				}
819 				if (control->pdapi_started) {
820 					strm->pd_api_started = 0;
821 					control->pdapi_started = 0;
822 				}
823 				if (control->on_strm_q) {
824 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
825 					control->on_strm_q = 0;
826 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
827 				}
828 				if (control->on_read_q == 0) {
829 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
830 					    &stcb->sctp_socket->so_rcv, control->end_added,
831 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
832 				}
833 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
834 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
835 					/*
836 					 * Switch to the new guy and
837 					 * continue
838 					 */
839 					control = nc;
840 					goto restart;
841 				} else {
842 					sctp_free_a_readq(stcb, nc);
843 				}
844 				return (1);
845 			} else {
846 				sctp_free_a_readq(stcb, nc);
847 			}
848 		} else {
849 			/* Can't add more */
850 			break;
851 		}
852 	}
853 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
854 		strm->pd_api_started = 1;
855 		control->pdapi_started = 1;
856 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
857 		    &stcb->sctp_socket->so_rcv, control->end_added,
858 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
859 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
860 		return (0);
861 	} else {
862 		return (1);
863 	}
864 }
865 
866 static void
867 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
868     struct sctp_queued_to_read *control,
869     struct sctp_tmit_chunk *chk,
870     int *abort_flag)
871 {
872 	struct sctp_tmit_chunk *at;
873 	int inserted = 0;
874 
875 	/*
876 	 * Here we need to place the chunk into the control structure sorted
877 	 * in the correct order.
878 	 */
879 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
880 		/* Its the very first one. */
881 		SCTPDBG(SCTP_DEBUG_XXX,
882 		    "chunk is a first fsn: %u becomes fsn_included\n",
883 		    chk->rec.data.fsn_num);
884 		if (control->first_frag_seen) {
885 			/*
886 			 * In old un-ordered we can reassembly on one
887 			 * control multiple messages. As long as the next
888 			 * FIRST is greater then the old first (TSN i.e. FSN
889 			 * wise)
890 			 */
891 			struct mbuf *tdata;
892 			uint32_t tmp;
893 
894 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
895 				/*
896 				 * Easy way the start of a new guy beyond
897 				 * the lowest
898 				 */
899 				goto place_chunk;
900 			}
901 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
902 			    (control->pdapi_started)) {
903 				/*
904 				 * Ok this should not happen, if it does we
905 				 * started the pd-api on the higher TSN
906 				 * (since the equals part is a TSN failure
907 				 * it must be that).
908 				 *
909 				 * We are completly hosed in that case since I
910 				 * have no way to recover. This really will
911 				 * only happen if we can get more TSN's
912 				 * higher before the pd-api-point.
913 				 */
914 				sctp_abort_in_reasm(stcb, control, chk,
915 				    abort_flag,
916 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
917 
918 				return;
919 			}
920 			/*
921 			 * Ok we have two firsts and the one we just got is
922 			 * smaller than the one we previously placed.. yuck!
923 			 * We must swap them out.
924 			 */
925 			/* swap the mbufs */
926 			tdata = control->data;
927 			control->data = chk->data;
928 			chk->data = tdata;
929 			/* Swap the lengths */
930 			tmp = control->length;
931 			control->length = chk->send_size;
932 			chk->send_size = tmp;
933 			/* Fix the FSN included */
934 			tmp = control->fsn_included;
935 			control->fsn_included = chk->rec.data.fsn_num;
936 			chk->rec.data.fsn_num = tmp;
937 			goto place_chunk;
938 		}
939 		control->first_frag_seen = 1;
940 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
941 		control->data = chk->data;
942 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
943 		chk->data = NULL;
944 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
945 		sctp_setup_tail_pointer(control);
946 		return;
947 	}
948 place_chunk:
949 	if (TAILQ_EMPTY(&control->reasm)) {
950 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
951 		asoc->size_on_reasm_queue += chk->send_size;
952 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
953 		return;
954 	}
955 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
956 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
957 			/*
958 			 * This one in queue is bigger than the new one,
959 			 * insert the new one before at.
960 			 */
961 			asoc->size_on_reasm_queue += chk->send_size;
962 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
963 			inserted = 1;
964 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
965 			break;
966 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
967 			/*
968 			 * They sent a duplicate fsn number. This really
969 			 * should not happen since the FSN is a TSN and it
970 			 * should have been dropped earlier.
971 			 */
972 			sctp_abort_in_reasm(stcb, control, chk,
973 			    abort_flag,
974 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
975 			return;
976 		}
977 	}
978 	if (inserted == 0) {
979 		/* Its at the end */
980 		asoc->size_on_reasm_queue += chk->send_size;
981 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
982 		control->top_fsn = chk->rec.data.fsn_num;
983 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
984 	}
985 }
986 
987 static int
988 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
989 {
990 	/*
991 	 * Given a stream, strm, see if any of the SSN's on it that are
992 	 * fragmented are ready to deliver. If so go ahead and place them on
993 	 * the read queue. In so placing if we have hit the end, then we
994 	 * need to remove them from the stream's queue.
995 	 */
996 	struct sctp_queued_to_read *control, *nctl = NULL;
997 	uint32_t next_to_del;
998 	uint32_t pd_point;
999 	int ret = 0;
1000 
1001 	if (stcb->sctp_socket) {
1002 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1003 		    stcb->sctp_ep->partial_delivery_point);
1004 	} else {
1005 		pd_point = stcb->sctp_ep->partial_delivery_point;
1006 	}
1007 	control = TAILQ_FIRST(&strm->uno_inqueue);
1008 	if ((control) &&
1009 	    (asoc->idata_supported == 0)) {
1010 		/* Special handling needed for "old" data format */
1011 		if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1012 			goto done_un;
1013 		}
1014 	}
1015 	if (strm->pd_api_started) {
1016 		/* Can't add more */
1017 		return (0);
1018 	}
1019 	while (control) {
1020 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1021 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1022 		nctl = TAILQ_NEXT(control, next_instrm);
1023 		if (control->end_added) {
1024 			/* We just put the last bit on */
1025 			if (control->on_strm_q) {
1026 #ifdef INVARIANTS
1027 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1028 					panic("Huh control: %p on_q: %d -- not unordered?",
1029 					    control, control->on_strm_q);
1030 				}
1031 #endif
1032 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1033 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1034 				control->on_strm_q = 0;
1035 			}
1036 			if (control->on_read_q == 0) {
1037 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1038 				    control,
1039 				    &stcb->sctp_socket->so_rcv, control->end_added,
1040 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1041 			}
1042 		} else {
1043 			/* Can we do a PD-API for this un-ordered guy? */
1044 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1045 				strm->pd_api_started = 1;
1046 				control->pdapi_started = 1;
1047 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1048 				    control,
1049 				    &stcb->sctp_socket->so_rcv, control->end_added,
1050 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1051 
1052 				break;
1053 			}
1054 		}
1055 		control = nctl;
1056 	}
1057 done_un:
1058 	control = TAILQ_FIRST(&strm->inqueue);
1059 	if (strm->pd_api_started) {
1060 		/* Can't add more */
1061 		return (0);
1062 	}
1063 	if (control == NULL) {
1064 		return (ret);
1065 	}
1066 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1067 		/*
1068 		 * Ok the guy at the top was being partially delivered
1069 		 * completed, so we remove it. Note the pd_api flag was
1070 		 * taken off when the chunk was merged on in
1071 		 * sctp_queue_data_for_reasm below.
1072 		 */
1073 		nctl = TAILQ_NEXT(control, next_instrm);
1074 		SCTPDBG(SCTP_DEBUG_XXX,
1075 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1076 		    control, control->end_added, control->sinfo_ssn,
1077 		    control->top_fsn, control->fsn_included,
1078 		    strm->last_sequence_delivered);
1079 		if (control->end_added) {
1080 			if (control->on_strm_q) {
1081 #ifdef INVARIANTS
1082 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1083 					panic("Huh control: %p on_q: %d -- not ordered?",
1084 					    control, control->on_strm_q);
1085 				}
1086 #endif
1087 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1088 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1089 				control->on_strm_q = 0;
1090 			}
1091 			if (strm->pd_api_started && control->pdapi_started) {
1092 				control->pdapi_started = 0;
1093 				strm->pd_api_started = 0;
1094 			}
1095 			if (control->on_read_q == 0) {
1096 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1097 				    control,
1098 				    &stcb->sctp_socket->so_rcv, control->end_added,
1099 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1100 			}
1101 			control = nctl;
1102 		}
1103 	}
1104 	if (strm->pd_api_started) {
1105 		/*
1106 		 * Can't add more must have gotten an un-ordered above being
1107 		 * partially delivered.
1108 		 */
1109 		return (0);
1110 	}
1111 deliver_more:
1112 	next_to_del = strm->last_sequence_delivered + 1;
1113 	if (control) {
1114 		SCTPDBG(SCTP_DEBUG_XXX,
1115 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1116 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1117 		    next_to_del);
1118 		nctl = TAILQ_NEXT(control, next_instrm);
1119 		if ((control->sinfo_ssn == next_to_del) &&
1120 		    (control->first_frag_seen)) {
1121 			int done;
1122 
1123 			/* Ok we can deliver it onto the stream. */
1124 			if (control->end_added) {
1125 				/* We are done with it afterwards */
1126 				if (control->on_strm_q) {
1127 #ifdef INVARIANTS
1128 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1129 						panic("Huh control: %p on_q: %d -- not ordered?",
1130 						    control, control->on_strm_q);
1131 					}
1132 #endif
1133 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1134 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1135 					control->on_strm_q = 0;
1136 				}
1137 				ret++;
1138 			}
1139 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1140 				/*
1141 				 * A singleton now slipping through - mark
1142 				 * it non-revokable too
1143 				 */
1144 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1145 			} else if (control->end_added == 0) {
1146 				/*
1147 				 * Check if we can defer adding until its
1148 				 * all there
1149 				 */
1150 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1151 					/*
1152 					 * Don't need it or cannot add more
1153 					 * (one being delivered that way)
1154 					 */
1155 					goto out;
1156 				}
1157 			}
1158 			done = (control->end_added) && (control->last_frag_seen);
1159 			if (control->on_read_q == 0) {
1160 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1161 				    control,
1162 				    &stcb->sctp_socket->so_rcv, control->end_added,
1163 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1164 			}
1165 			strm->last_sequence_delivered = next_to_del;
1166 			if (done) {
1167 				control = nctl;
1168 				goto deliver_more;
1169 			} else {
1170 				/* We are now doing PD API */
1171 				strm->pd_api_started = 1;
1172 				control->pdapi_started = 1;
1173 			}
1174 		}
1175 	}
1176 out:
1177 	return (ret);
1178 }
1179 
1180 void
1181 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1182     struct sctp_stream_in *strm,
1183     struct sctp_tcb *stcb, struct sctp_association *asoc,
1184     struct sctp_tmit_chunk *chk)
1185 {
1186 	/*
1187 	 * Given a control and a chunk, merge the data from the chk onto the
1188 	 * control and free up the chunk resources.
1189 	 */
1190 	int i_locked = 0;
1191 
1192 	if (control->on_read_q) {
1193 		/*
1194 		 * Its being pd-api'd so we must do some locks.
1195 		 */
1196 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1197 		i_locked = 1;
1198 	}
1199 	if (control->data == NULL) {
1200 		control->data = chk->data;
1201 		sctp_setup_tail_pointer(control);
1202 	} else {
1203 		sctp_add_to_tail_pointer(control, chk->data);
1204 	}
1205 	control->fsn_included = chk->rec.data.fsn_num;
1206 	asoc->size_on_reasm_queue -= chk->send_size;
1207 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1208 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1209 	chk->data = NULL;
1210 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1211 		control->first_frag_seen = 1;
1212 	}
1213 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1214 		/* Its complete */
1215 		if ((control->on_strm_q) && (control->on_read_q)) {
1216 			if (control->pdapi_started) {
1217 				control->pdapi_started = 0;
1218 				strm->pd_api_started = 0;
1219 			}
1220 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1221 				/* Unordered */
1222 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1223 				control->on_strm_q = 0;
1224 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1225 				/* Ordered */
1226 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1227 				control->on_strm_q = 0;
1228 #ifdef INVARIANTS
1229 			} else if (control->on_strm_q) {
1230 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1231 				    control->on_strm_q);
1232 #endif
1233 			}
1234 		}
1235 		control->end_added = 1;
1236 		control->last_frag_seen = 1;
1237 	}
1238 	if (i_locked) {
1239 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1240 	}
1241 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1242 }
1243 
1244 /*
1245  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1246  * queue, see if anthing can be delivered. If so pull it off (or as much as
1247  * we can. If we run out of space then we must dump what we can and set the
1248  * appropriate flag to say we queued what we could.
1249  */
1250 static void
1251 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1252     struct sctp_stream_in *strm,
1253     struct sctp_queued_to_read *control,
1254     struct sctp_tmit_chunk *chk,
1255     int created_control,
1256     int *abort_flag, uint32_t tsn)
1257 {
1258 	uint32_t next_fsn;
1259 	struct sctp_tmit_chunk *at, *nat;
1260 	int do_wakeup, unordered;
1261 
1262 	/*
1263 	 * For old un-ordered data chunks.
1264 	 */
1265 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1266 		unordered = 1;
1267 	} else {
1268 		unordered = 0;
1269 	}
1270 	/* Must be added to the stream-in queue */
1271 	if (created_control) {
1272 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1273 			/* Duplicate SSN? */
1274 			clean_up_control(stcb, control);
1275 			sctp_abort_in_reasm(stcb, control, chk,
1276 			    abort_flag,
1277 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1278 			return;
1279 		}
1280 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1281 			/*
1282 			 * Ok we created this control and now lets validate
1283 			 * that its legal i.e. there is a B bit set, if not
1284 			 * and we have up to the cum-ack then its invalid.
1285 			 */
1286 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1287 				sctp_abort_in_reasm(stcb, control, chk,
1288 				    abort_flag,
1289 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1290 				return;
1291 			}
1292 		}
1293 	}
1294 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1295 		sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag);
1296 		return;
1297 	}
1298 	/*
1299 	 * Ok we must queue the chunk into the reasembly portion: o if its
1300 	 * the first it goes to the control mbuf. o if its not first but the
1301 	 * next in sequence it goes to the control, and each succeeding one
1302 	 * in order also goes. o if its not in order we place it on the list
1303 	 * in its place.
1304 	 */
1305 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1306 		/* Its the very first one. */
1307 		SCTPDBG(SCTP_DEBUG_XXX,
1308 		    "chunk is a first fsn: %u becomes fsn_included\n",
1309 		    chk->rec.data.fsn_num);
1310 		if (control->first_frag_seen) {
1311 			/*
1312 			 * Error on senders part, they either sent us two
1313 			 * data chunks with FIRST, or they sent two
1314 			 * un-ordered chunks that were fragmented at the
1315 			 * same time in the same stream.
1316 			 */
1317 			sctp_abort_in_reasm(stcb, control, chk,
1318 			    abort_flag,
1319 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1320 			return;
1321 		}
1322 		control->first_frag_seen = 1;
1323 		control->fsn_included = chk->rec.data.fsn_num;
1324 		control->data = chk->data;
1325 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1326 		chk->data = NULL;
1327 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1328 		sctp_setup_tail_pointer(control);
1329 	} else {
1330 		/* Place the chunk in our list */
1331 		int inserted = 0;
1332 
1333 		if (control->last_frag_seen == 0) {
1334 			/* Still willing to raise highest FSN seen */
1335 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1336 				SCTPDBG(SCTP_DEBUG_XXX,
1337 				    "We have a new top_fsn: %u\n",
1338 				    chk->rec.data.fsn_num);
1339 				control->top_fsn = chk->rec.data.fsn_num;
1340 			}
1341 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342 				SCTPDBG(SCTP_DEBUG_XXX,
1343 				    "The last fsn is now in place fsn: %u\n",
1344 				    chk->rec.data.fsn_num);
1345 				control->last_frag_seen = 1;
1346 			}
1347 			if (asoc->idata_supported || control->first_frag_seen) {
1348 				/*
1349 				 * For IDATA we always check since we know
1350 				 * that the first fragment is 0. For old
1351 				 * DATA we have to receive the first before
1352 				 * we know the first FSN (which is the TSN).
1353 				 */
1354 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1355 					/*
1356 					 * We have already delivered up to
1357 					 * this so its a dup
1358 					 */
1359 					sctp_abort_in_reasm(stcb, control, chk,
1360 					    abort_flag,
1361 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1362 					return;
1363 				}
1364 			}
1365 		} else {
1366 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1367 				/* Second last? huh? */
1368 				SCTPDBG(SCTP_DEBUG_XXX,
1369 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1370 				    chk->rec.data.fsn_num, control->top_fsn);
1371 				sctp_abort_in_reasm(stcb, control,
1372 				    chk, abort_flag,
1373 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1374 				return;
1375 			}
1376 			if (asoc->idata_supported || control->first_frag_seen) {
1377 				/*
1378 				 * For IDATA we always check since we know
1379 				 * that the first fragment is 0. For old
1380 				 * DATA we have to receive the first before
1381 				 * we know the first FSN (which is the TSN).
1382 				 */
1383 
1384 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1385 					/*
1386 					 * We have already delivered up to
1387 					 * this so its a dup
1388 					 */
1389 					SCTPDBG(SCTP_DEBUG_XXX,
1390 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1391 					    chk->rec.data.fsn_num, control->fsn_included);
1392 					sctp_abort_in_reasm(stcb, control, chk,
1393 					    abort_flag,
1394 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1395 					return;
1396 				}
1397 			}
1398 			/*
1399 			 * validate not beyond top FSN if we have seen last
1400 			 * one
1401 			 */
1402 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1403 				SCTPDBG(SCTP_DEBUG_XXX,
1404 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1405 				    chk->rec.data.fsn_num,
1406 				    control->top_fsn);
1407 				sctp_abort_in_reasm(stcb, control, chk,
1408 				    abort_flag,
1409 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1410 				return;
1411 			}
1412 		}
1413 		/*
1414 		 * If we reach here, we need to place the new chunk in the
1415 		 * reassembly for this control.
1416 		 */
1417 		SCTPDBG(SCTP_DEBUG_XXX,
1418 		    "chunk is a not first fsn: %u needs to be inserted\n",
1419 		    chk->rec.data.fsn_num);
1420 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1421 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1422 				/*
1423 				 * This one in queue is bigger than the new
1424 				 * one, insert the new one before at.
1425 				 */
1426 				SCTPDBG(SCTP_DEBUG_XXX,
1427 				    "Insert it before fsn: %u\n",
1428 				    at->rec.data.fsn_num);
1429 				asoc->size_on_reasm_queue += chk->send_size;
1430 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1431 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1432 				inserted = 1;
1433 				break;
1434 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1435 				/*
1436 				 * Gak, He sent me a duplicate str seq
1437 				 * number
1438 				 */
1439 				/*
1440 				 * foo bar, I guess I will just free this
1441 				 * new guy, should we abort too? FIX ME
1442 				 * MAYBE? Or it COULD be that the SSN's have
1443 				 * wrapped. Maybe I should compare to TSN
1444 				 * somehow... sigh for now just blow away
1445 				 * the chunk!
1446 				 */
1447 				SCTPDBG(SCTP_DEBUG_XXX,
1448 				    "Duplicate to fsn: %u -- abort\n",
1449 				    at->rec.data.fsn_num);
1450 				sctp_abort_in_reasm(stcb, control,
1451 				    chk, abort_flag,
1452 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1453 				return;
1454 			}
1455 		}
1456 		if (inserted == 0) {
1457 			/* Goes on the end */
1458 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1459 			    chk->rec.data.fsn_num);
1460 			asoc->size_on_reasm_queue += chk->send_size;
1461 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1462 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1463 		}
1464 	}
1465 	/*
1466 	 * Ok lets see if we can suck any up into the control structure that
1467 	 * are in seq if it makes sense.
1468 	 */
1469 	do_wakeup = 0;
1470 	/*
1471 	 * If the first fragment has not been seen there is no sense in
1472 	 * looking.
1473 	 */
1474 	if (control->first_frag_seen) {
1475 		next_fsn = control->fsn_included + 1;
1476 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1477 			if (at->rec.data.fsn_num == next_fsn) {
1478 				/* We can add this one now to the control */
1479 				SCTPDBG(SCTP_DEBUG_XXX,
1480 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1481 				    control, at,
1482 				    at->rec.data.fsn_num,
1483 				    next_fsn, control->fsn_included);
1484 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1485 				sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1486 				if (control->on_read_q) {
1487 					do_wakeup = 1;
1488 				}
1489 				next_fsn++;
1490 				if (control->end_added && control->pdapi_started) {
1491 					if (strm->pd_api_started) {
1492 						strm->pd_api_started = 0;
1493 						control->pdapi_started = 0;
1494 					}
1495 					if (control->on_read_q == 0) {
1496 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1497 						    control,
1498 						    &stcb->sctp_socket->so_rcv, control->end_added,
1499 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1500 						do_wakeup = 1;
1501 					}
1502 					break;
1503 				}
1504 			} else {
1505 				break;
1506 			}
1507 		}
1508 	}
1509 	if (do_wakeup) {
1510 		/* Need to wakeup the reader */
1511 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1512 	}
1513 }
1514 
1515 static struct sctp_queued_to_read *
1516 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1517 {
1518 	struct sctp_queued_to_read *control;
1519 
1520 	if (ordered) {
1521 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1522 			if (control->msg_id == msg_id) {
1523 				break;
1524 			}
1525 		}
1526 	} else {
1527 		if (old) {
1528 			control = TAILQ_FIRST(&strm->uno_inqueue);
1529 			return (control);
1530 		}
1531 		TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1532 			if (control->msg_id == msg_id) {
1533 				break;
1534 			}
1535 		}
1536 	}
1537 	return (control);
1538 }
1539 
1540 static int
1541 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1542     struct mbuf **m, int offset, int chk_length,
1543     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1544     int *break_flag, int last_chunk, uint8_t chtype)
1545 {
1546 	/* Process a data chunk */
1547 	/* struct sctp_tmit_chunk *chk; */
1548 	struct sctp_data_chunk *ch;
1549 	struct sctp_idata_chunk *nch, chunk_buf;
1550 	struct sctp_tmit_chunk *chk;
1551 	uint32_t tsn, fsn, gap, msg_id;
1552 	struct mbuf *dmbuf;
1553 	int the_len;
1554 	int need_reasm_check = 0;
1555 	uint16_t strmno;
1556 	struct mbuf *op_err;
1557 	char msg[SCTP_DIAG_INFO_LEN];
1558 	struct sctp_queued_to_read *control = NULL;
1559 	uint32_t protocol_id;
1560 	uint8_t chunk_flags;
1561 	struct sctp_stream_reset_list *liste;
1562 	struct sctp_stream_in *strm;
1563 	int ordered;
1564 	size_t clen;
1565 	int created_control = 0;
1566 	uint8_t old_data;
1567 
1568 	chk = NULL;
1569 	if (chtype == SCTP_IDATA) {
1570 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1571 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1572 		ch = (struct sctp_data_chunk *)nch;
1573 		clen = sizeof(struct sctp_idata_chunk);
1574 		tsn = ntohl(ch->dp.tsn);
1575 		msg_id = ntohl(nch->dp.msg_id);
1576 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1577 			fsn = 0;
1578 		else
1579 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1580 		old_data = 0;
1581 	} else {
1582 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1583 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1584 		tsn = ntohl(ch->dp.tsn);
1585 		clen = sizeof(struct sctp_data_chunk);
1586 		fsn = tsn;
1587 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1588 		nch = NULL;
1589 		old_data = 1;
1590 	}
1591 	chunk_flags = ch->ch.chunk_flags;
1592 	if ((size_t)chk_length == clen) {
1593 		/*
1594 		 * Need to send an abort since we had a empty data chunk.
1595 		 */
1596 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1597 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1598 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1599 		*abort_flag = 1;
1600 		return (0);
1601 	}
1602 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1603 		asoc->send_sack = 1;
1604 	}
1605 	protocol_id = ch->dp.protocol_id;
1606 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1607 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1608 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1609 	}
1610 	if (stcb == NULL) {
1611 		return (0);
1612 	}
1613 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1614 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1615 		/* It is a duplicate */
1616 		SCTP_STAT_INCR(sctps_recvdupdata);
1617 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1618 			/* Record a dup for the next outbound sack */
1619 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1620 			asoc->numduptsns++;
1621 		}
1622 		asoc->send_sack = 1;
1623 		return (0);
1624 	}
1625 	/* Calculate the number of TSN's between the base and this TSN */
1626 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1627 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1628 		/* Can't hold the bit in the mapping at max array, toss it */
1629 		return (0);
1630 	}
1631 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1632 		SCTP_TCB_LOCK_ASSERT(stcb);
1633 		if (sctp_expand_mapping_array(asoc, gap)) {
1634 			/* Can't expand, drop it */
1635 			return (0);
1636 		}
1637 	}
1638 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1639 		*high_tsn = tsn;
1640 	}
1641 	/* See if we have received this one already */
1642 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1643 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1644 		SCTP_STAT_INCR(sctps_recvdupdata);
1645 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1646 			/* Record a dup for the next outbound sack */
1647 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1648 			asoc->numduptsns++;
1649 		}
1650 		asoc->send_sack = 1;
1651 		return (0);
1652 	}
1653 	/*
1654 	 * Check to see about the GONE flag, duplicates would cause a sack
1655 	 * to be sent up above
1656 	 */
1657 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1658 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1659 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1660 		/*
1661 		 * wait a minute, this guy is gone, there is no longer a
1662 		 * receiver. Send peer an ABORT!
1663 		 */
1664 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1665 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1666 		*abort_flag = 1;
1667 		return (0);
1668 	}
1669 	/*
1670 	 * Now before going further we see if there is room. If NOT then we
1671 	 * MAY let one through only IF this TSN is the one we are waiting
1672 	 * for on a partial delivery API.
1673 	 */
1674 
1675 	/* Is the stream valid? */
1676 	strmno = ntohs(ch->dp.stream_id);
1677 
1678 	if (strmno >= asoc->streamincnt) {
1679 		struct sctp_error_invalid_stream *cause;
1680 
1681 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1682 		    0, M_NOWAIT, 1, MT_DATA);
1683 		if (op_err != NULL) {
1684 			/* add some space up front so prepend will work well */
1685 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1686 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1687 			/*
1688 			 * Error causes are just param's and this one has
1689 			 * two back to back phdr, one with the error type
1690 			 * and size, the other with the streamid and a rsvd
1691 			 */
1692 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1693 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1694 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1695 			cause->stream_id = ch->dp.stream_id;
1696 			cause->reserved = htons(0);
1697 			sctp_queue_op_err(stcb, op_err);
1698 		}
1699 		SCTP_STAT_INCR(sctps_badsid);
1700 		SCTP_TCB_LOCK_ASSERT(stcb);
1701 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1702 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1703 			asoc->highest_tsn_inside_nr_map = tsn;
1704 		}
1705 		if (tsn == (asoc->cumulative_tsn + 1)) {
1706 			/* Update cum-ack */
1707 			asoc->cumulative_tsn = tsn;
1708 		}
1709 		return (0);
1710 	}
1711 	strm = &asoc->strmin[strmno];
1712 	/*
1713 	 * If its a fragmented message, lets see if we can find the control
1714 	 * on the reassembly queues.
1715 	 */
1716 	if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1717 		/*
1718 		 * The first *must* be fsn 0, and other (middle/end) pieces
1719 		 * can *not* be fsn 0.
1720 		 */
1721 		goto err_out;
1722 	}
1723 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1724 		/* See if we can find the re-assembly entity */
1725 		control = find_reasm_entry(strm, msg_id, ordered, old_data);
1726 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1727 		    chunk_flags, control);
1728 		if (control) {
1729 			/* We found something, does it belong? */
1730 			if (ordered && (msg_id != control->sinfo_ssn)) {
1731 		err_out:
1732 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1733 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1734 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1735 				*abort_flag = 1;
1736 				return (0);
1737 			}
1738 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1739 				/*
1740 				 * We can't have a switched order with an
1741 				 * unordered chunk
1742 				 */
1743 				goto err_out;
1744 			}
1745 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1746 				/*
1747 				 * We can't have a switched unordered with a
1748 				 * ordered chunk
1749 				 */
1750 				goto err_out;
1751 			}
1752 		}
1753 	} else {
1754 		/*
1755 		 * Its a complete segment. Lets validate we don't have a
1756 		 * re-assembly going on with the same Stream/Seq (for
1757 		 * ordered) or in the same Stream for unordered.
1758 		 */
1759 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1760 		    chunk_flags);
1761 		if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1762 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1763 			    chunk_flags,
1764 			    msg_id);
1765 
1766 			goto err_out;
1767 		}
1768 	}
1769 	/* now do the tests */
1770 	if (((asoc->cnt_on_all_streams +
1771 	    asoc->cnt_on_reasm_queue +
1772 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1773 	    (((int)asoc->my_rwnd) <= 0)) {
1774 		/*
1775 		 * When we have NO room in the rwnd we check to make sure
1776 		 * the reader is doing its job...
1777 		 */
1778 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1779 			/* some to read, wake-up */
1780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1781 			struct socket *so;
1782 
1783 			so = SCTP_INP_SO(stcb->sctp_ep);
1784 			atomic_add_int(&stcb->asoc.refcnt, 1);
1785 			SCTP_TCB_UNLOCK(stcb);
1786 			SCTP_SOCKET_LOCK(so, 1);
1787 			SCTP_TCB_LOCK(stcb);
1788 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1789 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1790 				/* assoc was freed while we were unlocked */
1791 				SCTP_SOCKET_UNLOCK(so, 1);
1792 				return (0);
1793 			}
1794 #endif
1795 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1797 			SCTP_SOCKET_UNLOCK(so, 1);
1798 #endif
1799 		}
1800 		/* now is it in the mapping array of what we have accepted? */
1801 		if (nch == NULL) {
1802 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1803 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1804 				/* Nope not in the valid range dump it */
1805 		dump_packet:
1806 				sctp_set_rwnd(stcb, asoc);
1807 				if ((asoc->cnt_on_all_streams +
1808 				    asoc->cnt_on_reasm_queue +
1809 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1810 					SCTP_STAT_INCR(sctps_datadropchklmt);
1811 				} else {
1812 					SCTP_STAT_INCR(sctps_datadroprwnd);
1813 				}
1814 				*break_flag = 1;
1815 				return (0);
1816 			}
1817 		} else {
1818 			if (control == NULL) {
1819 				goto dump_packet;
1820 			}
1821 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1822 				goto dump_packet;
1823 			}
1824 		}
1825 	}
1826 #ifdef SCTP_ASOCLOG_OF_TSNS
1827 	SCTP_TCB_LOCK_ASSERT(stcb);
1828 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1829 		asoc->tsn_in_at = 0;
1830 		asoc->tsn_in_wrapped = 1;
1831 	}
1832 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1833 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1834 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1835 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1836 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1837 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1838 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1839 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1840 	asoc->tsn_in_at++;
1841 #endif
1842 	/*
1843 	 * Before we continue lets validate that we are not being fooled by
1844 	 * an evil attacker. We can only have Nk chunks based on our TSN
1845 	 * spread allowed by the mapping array N * 8 bits, so there is no
1846 	 * way our stream sequence numbers could have wrapped. We of course
1847 	 * only validate the FIRST fragment so the bit must be set.
1848 	 */
1849 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1850 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1851 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1852 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1853 		/* The incoming sseq is behind where we last delivered? */
1854 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1855 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1856 
1857 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1858 		    asoc->strmin[strmno].last_sequence_delivered,
1859 		    tsn, strmno, msg_id);
1860 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1861 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1862 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1863 		*abort_flag = 1;
1864 		return (0);
1865 	}
1866 	/************************************
1867 	 * From here down we may find ch-> invalid
1868 	 * so its a good idea NOT to use it.
1869 	 *************************************/
1870 	if (nch) {
1871 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1872 	} else {
1873 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1874 	}
1875 	if (last_chunk == 0) {
1876 		if (nch) {
1877 			dmbuf = SCTP_M_COPYM(*m,
1878 			    (offset + sizeof(struct sctp_idata_chunk)),
1879 			    the_len, M_NOWAIT);
1880 		} else {
1881 			dmbuf = SCTP_M_COPYM(*m,
1882 			    (offset + sizeof(struct sctp_data_chunk)),
1883 			    the_len, M_NOWAIT);
1884 		}
1885 #ifdef SCTP_MBUF_LOGGING
1886 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1887 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1888 		}
1889 #endif
1890 	} else {
1891 		/* We can steal the last chunk */
1892 		int l_len;
1893 
1894 		dmbuf = *m;
1895 		/* lop off the top part */
1896 		if (nch) {
1897 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1898 		} else {
1899 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1900 		}
1901 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1902 			l_len = SCTP_BUF_LEN(dmbuf);
1903 		} else {
1904 			/*
1905 			 * need to count up the size hopefully does not hit
1906 			 * this to often :-0
1907 			 */
1908 			struct mbuf *lat;
1909 
1910 			l_len = 0;
1911 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1912 				l_len += SCTP_BUF_LEN(lat);
1913 			}
1914 		}
1915 		if (l_len > the_len) {
1916 			/* Trim the end round bytes off  too */
1917 			m_adj(dmbuf, -(l_len - the_len));
1918 		}
1919 	}
1920 	if (dmbuf == NULL) {
1921 		SCTP_STAT_INCR(sctps_nomem);
1922 		return (0);
1923 	}
1924 	/*
1925 	 * Now no matter what we need a control, get one if we don't have
1926 	 * one (we may have gotten it above when we found the message was
1927 	 * fragmented
1928 	 */
1929 	if (control == NULL) {
1930 		sctp_alloc_a_readq(stcb, control);
1931 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1932 		    protocol_id,
1933 		    strmno, msg_id,
1934 		    chunk_flags,
1935 		    NULL, fsn, msg_id);
1936 		if (control == NULL) {
1937 			SCTP_STAT_INCR(sctps_nomem);
1938 			return (0);
1939 		}
1940 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1941 			control->data = dmbuf;
1942 			control->tail_mbuf = NULL;
1943 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1944 			control->top_fsn = control->fsn_included = fsn;
1945 		}
1946 		created_control = 1;
1947 	}
1948 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1949 	    chunk_flags, ordered, msg_id, control);
1950 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1951 	    TAILQ_EMPTY(&asoc->resetHead) &&
1952 	    ((ordered == 0) ||
1953 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1954 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1955 		/* Candidate for express delivery */
1956 		/*
1957 		 * Its not fragmented, No PD-API is up, Nothing in the
1958 		 * delivery queue, Its un-ordered OR ordered and the next to
1959 		 * deliver AND nothing else is stuck on the stream queue,
1960 		 * And there is room for it in the socket buffer. Lets just
1961 		 * stuff it up the buffer....
1962 		 */
1963 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1964 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1965 			asoc->highest_tsn_inside_nr_map = tsn;
1966 		}
1967 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1968 		    control, msg_id);
1969 
1970 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1971 		    control, &stcb->sctp_socket->so_rcv,
1972 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1973 
1974 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1975 			/* for ordered, bump what we delivered */
1976 			strm->last_sequence_delivered++;
1977 		}
1978 		SCTP_STAT_INCR(sctps_recvexpress);
1979 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1980 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1981 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1982 		}
1983 		control = NULL;
1984 		goto finish_express_del;
1985 	}
1986 	/* Now will we need a chunk too? */
1987 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1988 		sctp_alloc_a_chunk(stcb, chk);
1989 		if (chk == NULL) {
1990 			/* No memory so we drop the chunk */
1991 			SCTP_STAT_INCR(sctps_nomem);
1992 			if (last_chunk == 0) {
1993 				/* we copied it, free the copy */
1994 				sctp_m_freem(dmbuf);
1995 			}
1996 			return (0);
1997 		}
1998 		chk->rec.data.TSN_seq = tsn;
1999 		chk->no_fr_allowed = 0;
2000 		chk->rec.data.fsn_num = fsn;
2001 		chk->rec.data.stream_seq = msg_id;
2002 		chk->rec.data.stream_number = strmno;
2003 		chk->rec.data.payloadtype = protocol_id;
2004 		chk->rec.data.context = stcb->asoc.context;
2005 		chk->rec.data.doing_fast_retransmit = 0;
2006 		chk->rec.data.rcv_flags = chunk_flags;
2007 		chk->asoc = asoc;
2008 		chk->send_size = the_len;
2009 		chk->whoTo = net;
2010 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2011 		    chk,
2012 		    control, msg_id);
2013 		atomic_add_int(&net->ref_count, 1);
2014 		chk->data = dmbuf;
2015 	}
2016 	/* Set the appropriate TSN mark */
2017 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2018 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2019 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2020 			asoc->highest_tsn_inside_nr_map = tsn;
2021 		}
2022 	} else {
2023 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2024 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2025 			asoc->highest_tsn_inside_map = tsn;
2026 		}
2027 	}
2028 	/* Now is it complete (i.e. not fragmented)? */
2029 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2030 		/*
2031 		 * Special check for when streams are resetting. We could be
2032 		 * more smart about this and check the actual stream to see
2033 		 * if it is not being reset.. that way we would not create a
2034 		 * HOLB when amongst streams being reset and those not being
2035 		 * reset.
2036 		 *
2037 		 */
2038 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2039 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2040 			/*
2041 			 * yep its past where we need to reset... go ahead
2042 			 * and queue it.
2043 			 */
2044 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2045 				/* first one on */
2046 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2047 			} else {
2048 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2049 				unsigned char inserted = 0;
2050 
2051 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2052 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2053 
2054 						continue;
2055 					} else {
2056 						/* found it */
2057 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2058 						inserted = 1;
2059 						break;
2060 					}
2061 				}
2062 				if (inserted == 0) {
2063 					/*
2064 					 * must be put at end, use prevP
2065 					 * (all setup from loop) to setup
2066 					 * nextP.
2067 					 */
2068 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2069 				}
2070 			}
2071 			goto finish_express_del;
2072 		}
2073 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2074 			/* queue directly into socket buffer */
2075 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2076 			    control, msg_id);
2077 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2078 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2079 			    control,
2080 			    &stcb->sctp_socket->so_rcv, 1,
2081 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2082 
2083 		} else {
2084 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2085 			    msg_id);
2086 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2087 			if (*abort_flag) {
2088 				if (last_chunk) {
2089 					*m = NULL;
2090 				}
2091 				return (0);
2092 			}
2093 		}
2094 		goto finish_express_del;
2095 	}
2096 	/* If we reach here its a reassembly */
2097 	need_reasm_check = 1;
2098 	SCTPDBG(SCTP_DEBUG_XXX,
2099 	    "Queue data to stream for reasm control: %p msg_id: %u\n",
2100 	    control, msg_id);
2101 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2102 	if (*abort_flag) {
2103 		/*
2104 		 * the assoc is now gone and chk was put onto the reasm
2105 		 * queue, which has all been freed.
2106 		 */
2107 		if (last_chunk) {
2108 			*m = NULL;
2109 		}
2110 		return (0);
2111 	}
2112 finish_express_del:
2113 	/* Here we tidy up things */
2114 	if (tsn == (asoc->cumulative_tsn + 1)) {
2115 		/* Update cum-ack */
2116 		asoc->cumulative_tsn = tsn;
2117 	}
2118 	if (last_chunk) {
2119 		*m = NULL;
2120 	}
2121 	if (ordered) {
2122 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2123 	} else {
2124 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2125 	}
2126 	SCTP_STAT_INCR(sctps_recvdata);
2127 	/* Set it present please */
2128 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2130 	}
2131 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2132 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2133 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2134 	}
2135 	/* check the special flag for stream resets */
2136 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2137 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2138 		/*
2139 		 * we have finished working through the backlogged TSN's now
2140 		 * time to reset streams. 1: call reset function. 2: free
2141 		 * pending_reply space 3: distribute any chunks in
2142 		 * pending_reply_queue.
2143 		 */
2144 		struct sctp_queued_to_read *ctl, *nctl;
2145 
2146 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2147 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2148 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2149 		SCTP_FREE(liste, SCTP_M_STRESET);
2150 		/* sa_ignore FREED_MEMORY */
2151 		liste = TAILQ_FIRST(&asoc->resetHead);
2152 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2153 			/* All can be removed */
2154 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2155 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2156 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2157 				if (*abort_flag) {
2158 					return (0);
2159 				}
2160 			}
2161 		} else {
2162 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2163 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2164 					break;
2165 				}
2166 				/*
2167 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2168 				 * process it which is the NOT of
2169 				 * ctl->sinfo_tsn > liste->tsn
2170 				 */
2171 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2172 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2173 				if (*abort_flag) {
2174 					return (0);
2175 				}
2176 			}
2177 		}
2178 		/*
2179 		 * Now service re-assembly to pick up anything that has been
2180 		 * held on reassembly queue?
2181 		 */
2182 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2183 		need_reasm_check = 0;
2184 	}
2185 	if (need_reasm_check) {
2186 		/* Another one waits ? */
2187 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2188 	}
2189 	return (1);
2190 }
2191 
2192 static const int8_t sctp_map_lookup_tab[256] = {
2193 	0, 1, 0, 2, 0, 1, 0, 3,
2194 	0, 1, 0, 2, 0, 1, 0, 4,
2195 	0, 1, 0, 2, 0, 1, 0, 3,
2196 	0, 1, 0, 2, 0, 1, 0, 5,
2197 	0, 1, 0, 2, 0, 1, 0, 3,
2198 	0, 1, 0, 2, 0, 1, 0, 4,
2199 	0, 1, 0, 2, 0, 1, 0, 3,
2200 	0, 1, 0, 2, 0, 1, 0, 6,
2201 	0, 1, 0, 2, 0, 1, 0, 3,
2202 	0, 1, 0, 2, 0, 1, 0, 4,
2203 	0, 1, 0, 2, 0, 1, 0, 3,
2204 	0, 1, 0, 2, 0, 1, 0, 5,
2205 	0, 1, 0, 2, 0, 1, 0, 3,
2206 	0, 1, 0, 2, 0, 1, 0, 4,
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 7,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 4,
2211 	0, 1, 0, 2, 0, 1, 0, 3,
2212 	0, 1, 0, 2, 0, 1, 0, 5,
2213 	0, 1, 0, 2, 0, 1, 0, 3,
2214 	0, 1, 0, 2, 0, 1, 0, 4,
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 6,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 4,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 5,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 4,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 8
2225 };
2226 
2227 
2228 void
2229 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2230 {
2231 	/*
2232 	 * Now we also need to check the mapping array in a couple of ways.
2233 	 * 1) Did we move the cum-ack point?
2234 	 *
2235 	 * When you first glance at this you might think that all entries that
2236 	 * make up the position of the cum-ack would be in the nr-mapping
2237 	 * array only.. i.e. things up to the cum-ack are always
2238 	 * deliverable. Thats true with one exception, when its a fragmented
2239 	 * message we may not deliver the data until some threshold (or all
2240 	 * of it) is in place. So we must OR the nr_mapping_array and
2241 	 * mapping_array to get a true picture of the cum-ack.
2242 	 */
2243 	struct sctp_association *asoc;
2244 	int at;
2245 	uint8_t val;
2246 	int slide_from, slide_end, lgap, distance;
2247 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2248 
2249 	asoc = &stcb->asoc;
2250 
2251 	old_cumack = asoc->cumulative_tsn;
2252 	old_base = asoc->mapping_array_base_tsn;
2253 	old_highest = asoc->highest_tsn_inside_map;
2254 	/*
2255 	 * We could probably improve this a small bit by calculating the
2256 	 * offset of the current cum-ack as the starting point.
2257 	 */
2258 	at = 0;
2259 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2260 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2261 		if (val == 0xff) {
2262 			at += 8;
2263 		} else {
2264 			/* there is a 0 bit */
2265 			at += sctp_map_lookup_tab[val];
2266 			break;
2267 		}
2268 	}
2269 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2270 
2271 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2272 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2273 #ifdef INVARIANTS
2274 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2275 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2276 #else
2277 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2278 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2279 		sctp_print_mapping_array(asoc);
2280 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2281 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2282 		}
2283 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2284 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2285 #endif
2286 	}
2287 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2288 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2289 	} else {
2290 		highest_tsn = asoc->highest_tsn_inside_map;
2291 	}
2292 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2293 		/* The complete array was completed by a single FR */
2294 		/* highest becomes the cum-ack */
2295 		int clr;
2296 
2297 #ifdef INVARIANTS
2298 		unsigned int i;
2299 
2300 #endif
2301 
2302 		/* clear the array */
2303 		clr = ((at + 7) >> 3);
2304 		if (clr > asoc->mapping_array_size) {
2305 			clr = asoc->mapping_array_size;
2306 		}
2307 		memset(asoc->mapping_array, 0, clr);
2308 		memset(asoc->nr_mapping_array, 0, clr);
2309 #ifdef INVARIANTS
2310 		for (i = 0; i < asoc->mapping_array_size; i++) {
2311 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2312 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2313 				sctp_print_mapping_array(asoc);
2314 			}
2315 		}
2316 #endif
2317 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2318 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2319 	} else if (at >= 8) {
2320 		/* we can slide the mapping array down */
2321 		/* slide_from holds where we hit the first NON 0xff byte */
2322 
2323 		/*
2324 		 * now calculate the ceiling of the move using our highest
2325 		 * TSN value
2326 		 */
2327 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2328 		slide_end = (lgap >> 3);
2329 		if (slide_end < slide_from) {
2330 			sctp_print_mapping_array(asoc);
2331 #ifdef INVARIANTS
2332 			panic("impossible slide");
2333 #else
2334 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2335 			    lgap, slide_end, slide_from, at);
2336 			return;
2337 #endif
2338 		}
2339 		if (slide_end > asoc->mapping_array_size) {
2340 #ifdef INVARIANTS
2341 			panic("would overrun buffer");
2342 #else
2343 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2344 			    asoc->mapping_array_size, slide_end);
2345 			slide_end = asoc->mapping_array_size;
2346 #endif
2347 		}
2348 		distance = (slide_end - slide_from) + 1;
2349 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2350 			sctp_log_map(old_base, old_cumack, old_highest,
2351 			    SCTP_MAP_PREPARE_SLIDE);
2352 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2353 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2354 		}
2355 		if (distance + slide_from > asoc->mapping_array_size ||
2356 		    distance < 0) {
2357 			/*
2358 			 * Here we do NOT slide forward the array so that
2359 			 * hopefully when more data comes in to fill it up
2360 			 * we will be able to slide it forward. Really I
2361 			 * don't think this should happen :-0
2362 			 */
2363 
2364 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2365 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2366 				    (uint32_t) asoc->mapping_array_size,
2367 				    SCTP_MAP_SLIDE_NONE);
2368 			}
2369 		} else {
2370 			int ii;
2371 
2372 			for (ii = 0; ii < distance; ii++) {
2373 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2374 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2375 
2376 			}
2377 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2378 				asoc->mapping_array[ii] = 0;
2379 				asoc->nr_mapping_array[ii] = 0;
2380 			}
2381 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2382 				asoc->highest_tsn_inside_map += (slide_from << 3);
2383 			}
2384 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2385 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2386 			}
2387 			asoc->mapping_array_base_tsn += (slide_from << 3);
2388 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2389 				sctp_log_map(asoc->mapping_array_base_tsn,
2390 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2391 				    SCTP_MAP_SLIDE_RESULT);
2392 			}
2393 		}
2394 	}
2395 }
2396 
2397 void
2398 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2399 {
2400 	struct sctp_association *asoc;
2401 	uint32_t highest_tsn;
2402 
2403 	asoc = &stcb->asoc;
2404 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2405 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2406 	} else {
2407 		highest_tsn = asoc->highest_tsn_inside_map;
2408 	}
2409 
2410 	/*
2411 	 * Now we need to see if we need to queue a sack or just start the
2412 	 * timer (if allowed).
2413 	 */
2414 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2415 		/*
2416 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2417 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2418 		 * SACK
2419 		 */
2420 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2421 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2422 			    stcb->sctp_ep, stcb, NULL,
2423 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2424 		}
2425 		sctp_send_shutdown(stcb,
2426 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2427 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2428 	} else {
2429 		int is_a_gap;
2430 
2431 		/* is there a gap now ? */
2432 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2433 
2434 		/*
2435 		 * CMT DAC algorithm: increase number of packets received
2436 		 * since last ack
2437 		 */
2438 		stcb->asoc.cmt_dac_pkts_rcvd++;
2439 
2440 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2441 							 * SACK */
2442 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2443 							 * longer is one */
2444 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2445 		    (is_a_gap) ||	/* is still a gap */
2446 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2447 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2448 		    ) {
2449 
2450 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2451 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2452 			    (stcb->asoc.send_sack == 0) &&
2453 			    (stcb->asoc.numduptsns == 0) &&
2454 			    (stcb->asoc.delayed_ack) &&
2455 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2456 
2457 				/*
2458 				 * CMT DAC algorithm: With CMT, delay acks
2459 				 * even in the face of
2460 				 *
2461 				 * reordering. Therefore, if acks that do not
2462 				 * have to be sent because of the above
2463 				 * reasons, will be delayed. That is, acks
2464 				 * that would have been sent due to gap
2465 				 * reports will be delayed with DAC. Start
2466 				 * the delayed ack timer.
2467 				 */
2468 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2469 				    stcb->sctp_ep, stcb, NULL);
2470 			} else {
2471 				/*
2472 				 * Ok we must build a SACK since the timer
2473 				 * is pending, we got our first packet OR
2474 				 * there are gaps or duplicates.
2475 				 */
2476 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2477 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2478 			}
2479 		} else {
2480 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2481 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2482 				    stcb->sctp_ep, stcb, NULL);
2483 			}
2484 		}
2485 	}
2486 }
2487 
2488 int
2489 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2490     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2491     struct sctp_nets *net, uint32_t * high_tsn)
2492 {
2493 	struct sctp_chunkhdr *ch, chunk_buf;
2494 	struct sctp_association *asoc;
2495 	int num_chunks = 0;	/* number of control chunks processed */
2496 	int stop_proc = 0;
2497 	int chk_length, break_flag, last_chunk;
2498 	int abort_flag = 0, was_a_gap;
2499 	struct mbuf *m;
2500 	uint32_t highest_tsn;
2501 
2502 	/* set the rwnd */
2503 	sctp_set_rwnd(stcb, &stcb->asoc);
2504 
2505 	m = *mm;
2506 	SCTP_TCB_LOCK_ASSERT(stcb);
2507 	asoc = &stcb->asoc;
2508 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2509 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2510 	} else {
2511 		highest_tsn = asoc->highest_tsn_inside_map;
2512 	}
2513 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2514 	/*
2515 	 * setup where we got the last DATA packet from for any SACK that
2516 	 * may need to go out. Don't bump the net. This is done ONLY when a
2517 	 * chunk is assigned.
2518 	 */
2519 	asoc->last_data_chunk_from = net;
2520 
2521 	/*-
2522 	 * Now before we proceed we must figure out if this is a wasted
2523 	 * cluster... i.e. it is a small packet sent in and yet the driver
2524 	 * underneath allocated a full cluster for it. If so we must copy it
2525 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2526 	 * with cluster starvation. Note for __Panda__ we don't do this
2527 	 * since it has clusters all the way down to 64 bytes.
2528 	 */
2529 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2530 		/* we only handle mbufs that are singletons.. not chains */
2531 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2532 		if (m) {
2533 			/* ok lets see if we can copy the data up */
2534 			caddr_t *from, *to;
2535 
2536 			/* get the pointers and copy */
2537 			to = mtod(m, caddr_t *);
2538 			from = mtod((*mm), caddr_t *);
2539 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2540 			/* copy the length and free up the old */
2541 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2542 			sctp_m_freem(*mm);
2543 			/* success, back copy */
2544 			*mm = m;
2545 		} else {
2546 			/* We are in trouble in the mbuf world .. yikes */
2547 			m = *mm;
2548 		}
2549 	}
2550 	/* get pointer to the first chunk header */
2551 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2552 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2553 	if (ch == NULL) {
2554 		return (1);
2555 	}
2556 	/*
2557 	 * process all DATA chunks...
2558 	 */
2559 	*high_tsn = asoc->cumulative_tsn;
2560 	break_flag = 0;
2561 	asoc->data_pkts_seen++;
2562 	while (stop_proc == 0) {
2563 		/* validate chunk length */
2564 		chk_length = ntohs(ch->chunk_length);
2565 		if (length - *offset < chk_length) {
2566 			/* all done, mutulated chunk */
2567 			stop_proc = 1;
2568 			continue;
2569 		}
2570 		if ((asoc->idata_supported == 1) &&
2571 		    (ch->chunk_type == SCTP_DATA)) {
2572 			struct mbuf *op_err;
2573 			char msg[SCTP_DIAG_INFO_LEN];
2574 
2575 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2576 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2577 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2578 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2579 			return (2);
2580 		}
2581 		if ((asoc->idata_supported == 0) &&
2582 		    (ch->chunk_type == SCTP_IDATA)) {
2583 			struct mbuf *op_err;
2584 			char msg[SCTP_DIAG_INFO_LEN];
2585 
2586 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2587 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2588 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2589 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2590 			return (2);
2591 		}
2592 		if ((ch->chunk_type == SCTP_DATA) ||
2593 		    (ch->chunk_type == SCTP_IDATA)) {
2594 			int clen;
2595 
2596 			if (ch->chunk_type == SCTP_DATA) {
2597 				clen = sizeof(struct sctp_data_chunk);
2598 			} else {
2599 				clen = sizeof(struct sctp_idata_chunk);
2600 			}
2601 			if (chk_length < clen) {
2602 				/*
2603 				 * Need to send an abort since we had a
2604 				 * invalid data chunk.
2605 				 */
2606 				struct mbuf *op_err;
2607 				char msg[SCTP_DIAG_INFO_LEN];
2608 
2609 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2610 				    chk_length);
2611 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2612 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2613 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2614 				return (2);
2615 			}
2616 #ifdef SCTP_AUDITING_ENABLED
2617 			sctp_audit_log(0xB1, 0);
2618 #endif
2619 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2620 				last_chunk = 1;
2621 			} else {
2622 				last_chunk = 0;
2623 			}
2624 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2625 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2626 			    last_chunk, ch->chunk_type)) {
2627 				num_chunks++;
2628 			}
2629 			if (abort_flag)
2630 				return (2);
2631 
2632 			if (break_flag) {
2633 				/*
2634 				 * Set because of out of rwnd space and no
2635 				 * drop rep space left.
2636 				 */
2637 				stop_proc = 1;
2638 				continue;
2639 			}
2640 		} else {
2641 			/* not a data chunk in the data region */
2642 			switch (ch->chunk_type) {
2643 			case SCTP_INITIATION:
2644 			case SCTP_INITIATION_ACK:
2645 			case SCTP_SELECTIVE_ACK:
2646 			case SCTP_NR_SELECTIVE_ACK:
2647 			case SCTP_HEARTBEAT_REQUEST:
2648 			case SCTP_HEARTBEAT_ACK:
2649 			case SCTP_ABORT_ASSOCIATION:
2650 			case SCTP_SHUTDOWN:
2651 			case SCTP_SHUTDOWN_ACK:
2652 			case SCTP_OPERATION_ERROR:
2653 			case SCTP_COOKIE_ECHO:
2654 			case SCTP_COOKIE_ACK:
2655 			case SCTP_ECN_ECHO:
2656 			case SCTP_ECN_CWR:
2657 			case SCTP_SHUTDOWN_COMPLETE:
2658 			case SCTP_AUTHENTICATION:
2659 			case SCTP_ASCONF_ACK:
2660 			case SCTP_PACKET_DROPPED:
2661 			case SCTP_STREAM_RESET:
2662 			case SCTP_FORWARD_CUM_TSN:
2663 			case SCTP_ASCONF:
2664 				{
2665 					/*
2666 					 * Now, what do we do with KNOWN
2667 					 * chunks that are NOT in the right
2668 					 * place?
2669 					 *
2670 					 * For now, I do nothing but ignore
2671 					 * them. We may later want to add
2672 					 * sysctl stuff to switch out and do
2673 					 * either an ABORT() or possibly
2674 					 * process them.
2675 					 */
2676 					struct mbuf *op_err;
2677 					char msg[SCTP_DIAG_INFO_LEN];
2678 
2679 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2680 					    ch->chunk_type);
2681 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2682 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2683 					return (2);
2684 				}
2685 			default:
2686 				/* unknown chunk type, use bit rules */
2687 				if (ch->chunk_type & 0x40) {
2688 					/* Add a error report to the queue */
2689 					struct mbuf *op_err;
2690 					struct sctp_gen_error_cause *cause;
2691 
2692 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2693 					    0, M_NOWAIT, 1, MT_DATA);
2694 					if (op_err != NULL) {
2695 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2696 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2697 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2698 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2699 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2700 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2701 							sctp_queue_op_err(stcb, op_err);
2702 						} else {
2703 							sctp_m_freem(op_err);
2704 						}
2705 					}
2706 				}
2707 				if ((ch->chunk_type & 0x80) == 0) {
2708 					/* discard the rest of this packet */
2709 					stop_proc = 1;
2710 				}	/* else skip this bad chunk and
2711 					 * continue... */
2712 				break;
2713 			}	/* switch of chunk type */
2714 		}
2715 		*offset += SCTP_SIZE32(chk_length);
2716 		if ((*offset >= length) || stop_proc) {
2717 			/* no more data left in the mbuf chain */
2718 			stop_proc = 1;
2719 			continue;
2720 		}
2721 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2722 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2723 		if (ch == NULL) {
2724 			*offset = length;
2725 			stop_proc = 1;
2726 			continue;
2727 		}
2728 	}
2729 	if (break_flag) {
2730 		/*
2731 		 * we need to report rwnd overrun drops.
2732 		 */
2733 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2734 	}
2735 	if (num_chunks) {
2736 		/*
2737 		 * Did we get data, if so update the time for auto-close and
2738 		 * give peer credit for being alive.
2739 		 */
2740 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2741 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2742 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2743 			    stcb->asoc.overall_error_count,
2744 			    0,
2745 			    SCTP_FROM_SCTP_INDATA,
2746 			    __LINE__);
2747 		}
2748 		stcb->asoc.overall_error_count = 0;
2749 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2750 	}
2751 	/* now service all of the reassm queue if needed */
2752 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2753 		/* Assure that we ack right away */
2754 		stcb->asoc.send_sack = 1;
2755 	}
2756 	/* Start a sack timer or QUEUE a SACK for sending */
2757 	sctp_sack_check(stcb, was_a_gap);
2758 	return (0);
2759 }
2760 
2761 static int
2762 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2763     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2764     int *num_frs,
2765     uint32_t * biggest_newly_acked_tsn,
2766     uint32_t * this_sack_lowest_newack,
2767     int *rto_ok)
2768 {
2769 	struct sctp_tmit_chunk *tp1;
2770 	unsigned int theTSN;
2771 	int j, wake_him = 0, circled = 0;
2772 
2773 	/* Recover the tp1 we last saw */
2774 	tp1 = *p_tp1;
2775 	if (tp1 == NULL) {
2776 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2777 	}
2778 	for (j = frag_strt; j <= frag_end; j++) {
2779 		theTSN = j + last_tsn;
2780 		while (tp1) {
2781 			if (tp1->rec.data.doing_fast_retransmit)
2782 				(*num_frs) += 1;
2783 
2784 			/*-
2785 			 * CMT: CUCv2 algorithm. For each TSN being
2786 			 * processed from the sent queue, track the
2787 			 * next expected pseudo-cumack, or
2788 			 * rtx_pseudo_cumack, if required. Separate
2789 			 * cumack trackers for first transmissions,
2790 			 * and retransmissions.
2791 			 */
2792 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2793 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2794 			    (tp1->snd_count == 1)) {
2795 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2796 				tp1->whoTo->find_pseudo_cumack = 0;
2797 			}
2798 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2799 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2800 			    (tp1->snd_count > 1)) {
2801 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2802 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2803 			}
2804 			if (tp1->rec.data.TSN_seq == theTSN) {
2805 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2806 					/*-
2807 					 * must be held until
2808 					 * cum-ack passes
2809 					 */
2810 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2811 						/*-
2812 						 * If it is less than RESEND, it is
2813 						 * now no-longer in flight.
2814 						 * Higher values may already be set
2815 						 * via previous Gap Ack Blocks...
2816 						 * i.e. ACKED or RESEND.
2817 						 */
2818 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2819 						    *biggest_newly_acked_tsn)) {
2820 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2821 						}
2822 						/*-
2823 						 * CMT: SFR algo (and HTNA) - set
2824 						 * saw_newack to 1 for dest being
2825 						 * newly acked. update
2826 						 * this_sack_highest_newack if
2827 						 * appropriate.
2828 						 */
2829 						if (tp1->rec.data.chunk_was_revoked == 0)
2830 							tp1->whoTo->saw_newack = 1;
2831 
2832 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2833 						    tp1->whoTo->this_sack_highest_newack)) {
2834 							tp1->whoTo->this_sack_highest_newack =
2835 							    tp1->rec.data.TSN_seq;
2836 						}
2837 						/*-
2838 						 * CMT DAC algo: also update
2839 						 * this_sack_lowest_newack
2840 						 */
2841 						if (*this_sack_lowest_newack == 0) {
2842 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2843 								sctp_log_sack(*this_sack_lowest_newack,
2844 								    last_tsn,
2845 								    tp1->rec.data.TSN_seq,
2846 								    0,
2847 								    0,
2848 								    SCTP_LOG_TSN_ACKED);
2849 							}
2850 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2851 						}
2852 						/*-
2853 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2854 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2855 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2856 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2857 						 * Separate pseudo_cumack trackers for first transmissions and
2858 						 * retransmissions.
2859 						 */
2860 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2861 							if (tp1->rec.data.chunk_was_revoked == 0) {
2862 								tp1->whoTo->new_pseudo_cumack = 1;
2863 							}
2864 							tp1->whoTo->find_pseudo_cumack = 1;
2865 						}
2866 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2867 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2868 						}
2869 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2870 							if (tp1->rec.data.chunk_was_revoked == 0) {
2871 								tp1->whoTo->new_pseudo_cumack = 1;
2872 							}
2873 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2874 						}
2875 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2876 							sctp_log_sack(*biggest_newly_acked_tsn,
2877 							    last_tsn,
2878 							    tp1->rec.data.TSN_seq,
2879 							    frag_strt,
2880 							    frag_end,
2881 							    SCTP_LOG_TSN_ACKED);
2882 						}
2883 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2884 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2885 							    tp1->whoTo->flight_size,
2886 							    tp1->book_size,
2887 							    (uint32_t) (uintptr_t) tp1->whoTo,
2888 							    tp1->rec.data.TSN_seq);
2889 						}
2890 						sctp_flight_size_decrease(tp1);
2891 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2892 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2893 							    tp1);
2894 						}
2895 						sctp_total_flight_decrease(stcb, tp1);
2896 
2897 						tp1->whoTo->net_ack += tp1->send_size;
2898 						if (tp1->snd_count < 2) {
2899 							/*-
2900 							 * True non-retransmited chunk
2901 							 */
2902 							tp1->whoTo->net_ack2 += tp1->send_size;
2903 
2904 							/*-
2905 							 * update RTO too ?
2906 							 */
2907 							if (tp1->do_rtt) {
2908 								if (*rto_ok) {
2909 									tp1->whoTo->RTO =
2910 									    sctp_calculate_rto(stcb,
2911 									    &stcb->asoc,
2912 									    tp1->whoTo,
2913 									    &tp1->sent_rcv_time,
2914 									    sctp_align_safe_nocopy,
2915 									    SCTP_RTT_FROM_DATA);
2916 									*rto_ok = 0;
2917 								}
2918 								if (tp1->whoTo->rto_needed == 0) {
2919 									tp1->whoTo->rto_needed = 1;
2920 								}
2921 								tp1->do_rtt = 0;
2922 							}
2923 						}
2924 					}
2925 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2926 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2927 						    stcb->asoc.this_sack_highest_gap)) {
2928 							stcb->asoc.this_sack_highest_gap =
2929 							    tp1->rec.data.TSN_seq;
2930 						}
2931 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2932 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2933 #ifdef SCTP_AUDITING_ENABLED
2934 							sctp_audit_log(0xB2,
2935 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2936 #endif
2937 						}
2938 					}
2939 					/*-
2940 					 * All chunks NOT UNSENT fall through here and are marked
2941 					 * (leave PR-SCTP ones that are to skip alone though)
2942 					 */
2943 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2944 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2945 						tp1->sent = SCTP_DATAGRAM_MARKED;
2946 					}
2947 					if (tp1->rec.data.chunk_was_revoked) {
2948 						/* deflate the cwnd */
2949 						tp1->whoTo->cwnd -= tp1->book_size;
2950 						tp1->rec.data.chunk_was_revoked = 0;
2951 					}
2952 					/* NR Sack code here */
2953 					if (nr_sacking &&
2954 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2955 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2956 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2957 #ifdef INVARIANTS
2958 						} else {
2959 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2960 #endif
2961 						}
2962 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2963 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2964 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2965 							stcb->asoc.trigger_reset = 1;
2966 						}
2967 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2968 						if (tp1->data) {
2969 							/*
2970 							 * sa_ignore
2971 							 * NO_NULL_CHK
2972 							 */
2973 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2974 							sctp_m_freem(tp1->data);
2975 							tp1->data = NULL;
2976 						}
2977 						wake_him++;
2978 					}
2979 				}
2980 				break;
2981 			}	/* if (tp1->TSN_seq == theTSN) */
2982 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2983 				break;
2984 			}
2985 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2986 			if ((tp1 == NULL) && (circled == 0)) {
2987 				circled++;
2988 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2989 			}
2990 		}		/* end while (tp1) */
2991 		if (tp1 == NULL) {
2992 			circled = 0;
2993 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2994 		}
2995 		/* In case the fragments were not in order we must reset */
2996 	}			/* end for (j = fragStart */
2997 	*p_tp1 = tp1;
2998 	return (wake_him);	/* Return value only used for nr-sack */
2999 }
3000 
3001 
3002 static int
3003 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3004     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3005     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3006     int num_seg, int num_nr_seg, int *rto_ok)
3007 {
3008 	struct sctp_gap_ack_block *frag, block;
3009 	struct sctp_tmit_chunk *tp1;
3010 	int i;
3011 	int num_frs = 0;
3012 	int chunk_freed;
3013 	int non_revocable;
3014 	uint16_t frag_strt, frag_end, prev_frag_end;
3015 
3016 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3017 	prev_frag_end = 0;
3018 	chunk_freed = 0;
3019 
3020 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3021 		if (i == num_seg) {
3022 			prev_frag_end = 0;
3023 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3024 		}
3025 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3026 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3027 		*offset += sizeof(block);
3028 		if (frag == NULL) {
3029 			return (chunk_freed);
3030 		}
3031 		frag_strt = ntohs(frag->start);
3032 		frag_end = ntohs(frag->end);
3033 
3034 		if (frag_strt > frag_end) {
3035 			/* This gap report is malformed, skip it. */
3036 			continue;
3037 		}
3038 		if (frag_strt <= prev_frag_end) {
3039 			/* This gap report is not in order, so restart. */
3040 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3041 		}
3042 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3043 			*biggest_tsn_acked = last_tsn + frag_end;
3044 		}
3045 		if (i < num_seg) {
3046 			non_revocable = 0;
3047 		} else {
3048 			non_revocable = 1;
3049 		}
3050 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3051 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3052 		    this_sack_lowest_newack, rto_ok)) {
3053 			chunk_freed = 1;
3054 		}
3055 		prev_frag_end = frag_end;
3056 	}
3057 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3058 		if (num_frs)
3059 			sctp_log_fr(*biggest_tsn_acked,
3060 			    *biggest_newly_acked_tsn,
3061 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3062 	}
3063 	return (chunk_freed);
3064 }
3065 
3066 static void
3067 sctp_check_for_revoked(struct sctp_tcb *stcb,
3068     struct sctp_association *asoc, uint32_t cumack,
3069     uint32_t biggest_tsn_acked)
3070 {
3071 	struct sctp_tmit_chunk *tp1;
3072 
3073 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3074 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3075 			/*
3076 			 * ok this guy is either ACK or MARKED. If it is
3077 			 * ACKED it has been previously acked but not this
3078 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3079 			 * again.
3080 			 */
3081 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3082 				break;
3083 			}
3084 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3085 				/* it has been revoked */
3086 				tp1->sent = SCTP_DATAGRAM_SENT;
3087 				tp1->rec.data.chunk_was_revoked = 1;
3088 				/*
3089 				 * We must add this stuff back in to assure
3090 				 * timers and such get started.
3091 				 */
3092 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3093 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3094 					    tp1->whoTo->flight_size,
3095 					    tp1->book_size,
3096 					    (uint32_t) (uintptr_t) tp1->whoTo,
3097 					    tp1->rec.data.TSN_seq);
3098 				}
3099 				sctp_flight_size_increase(tp1);
3100 				sctp_total_flight_increase(stcb, tp1);
3101 				/*
3102 				 * We inflate the cwnd to compensate for our
3103 				 * artificial inflation of the flight_size.
3104 				 */
3105 				tp1->whoTo->cwnd += tp1->book_size;
3106 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3107 					sctp_log_sack(asoc->last_acked_seq,
3108 					    cumack,
3109 					    tp1->rec.data.TSN_seq,
3110 					    0,
3111 					    0,
3112 					    SCTP_LOG_TSN_REVOKED);
3113 				}
3114 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3115 				/* it has been re-acked in this SACK */
3116 				tp1->sent = SCTP_DATAGRAM_ACKED;
3117 			}
3118 		}
3119 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3120 			break;
3121 	}
3122 }
3123 
3124 
3125 static void
3126 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3127     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3128 {
3129 	struct sctp_tmit_chunk *tp1;
3130 	int strike_flag = 0;
3131 	struct timeval now;
3132 	int tot_retrans = 0;
3133 	uint32_t sending_seq;
3134 	struct sctp_nets *net;
3135 	int num_dests_sacked = 0;
3136 
3137 	/*
3138 	 * select the sending_seq, this is either the next thing ready to be
3139 	 * sent but not transmitted, OR, the next seq we assign.
3140 	 */
3141 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3142 	if (tp1 == NULL) {
3143 		sending_seq = asoc->sending_seq;
3144 	} else {
3145 		sending_seq = tp1->rec.data.TSN_seq;
3146 	}
3147 
3148 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3149 	if ((asoc->sctp_cmt_on_off > 0) &&
3150 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3151 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3152 			if (net->saw_newack)
3153 				num_dests_sacked++;
3154 		}
3155 	}
3156 	if (stcb->asoc.prsctp_supported) {
3157 		(void)SCTP_GETTIME_TIMEVAL(&now);
3158 	}
3159 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3160 		strike_flag = 0;
3161 		if (tp1->no_fr_allowed) {
3162 			/* this one had a timeout or something */
3163 			continue;
3164 		}
3165 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3166 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3167 				sctp_log_fr(biggest_tsn_newly_acked,
3168 				    tp1->rec.data.TSN_seq,
3169 				    tp1->sent,
3170 				    SCTP_FR_LOG_CHECK_STRIKE);
3171 		}
3172 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3173 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3174 			/* done */
3175 			break;
3176 		}
3177 		if (stcb->asoc.prsctp_supported) {
3178 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3179 				/* Is it expired? */
3180 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3181 					/* Yes so drop it */
3182 					if (tp1->data != NULL) {
3183 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3184 						    SCTP_SO_NOT_LOCKED);
3185 					}
3186 					continue;
3187 				}
3188 			}
3189 		}
3190 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3191 			/* we are beyond the tsn in the sack  */
3192 			break;
3193 		}
3194 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3195 			/* either a RESEND, ACKED, or MARKED */
3196 			/* skip */
3197 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3198 				/* Continue strikin FWD-TSN chunks */
3199 				tp1->rec.data.fwd_tsn_cnt++;
3200 			}
3201 			continue;
3202 		}
3203 		/*
3204 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3205 		 */
3206 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3207 			/*
3208 			 * No new acks were receieved for data sent to this
3209 			 * dest. Therefore, according to the SFR algo for
3210 			 * CMT, no data sent to this dest can be marked for
3211 			 * FR using this SACK.
3212 			 */
3213 			continue;
3214 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3215 		    tp1->whoTo->this_sack_highest_newack)) {
3216 			/*
3217 			 * CMT: New acks were receieved for data sent to
3218 			 * this dest. But no new acks were seen for data
3219 			 * sent after tp1. Therefore, according to the SFR
3220 			 * algo for CMT, tp1 cannot be marked for FR using
3221 			 * this SACK. This step covers part of the DAC algo
3222 			 * and the HTNA algo as well.
3223 			 */
3224 			continue;
3225 		}
3226 		/*
3227 		 * Here we check to see if we were have already done a FR
3228 		 * and if so we see if the biggest TSN we saw in the sack is
3229 		 * smaller than the recovery point. If so we don't strike
3230 		 * the tsn... otherwise we CAN strike the TSN.
3231 		 */
3232 		/*
3233 		 * @@@ JRI: Check for CMT if (accum_moved &&
3234 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3235 		 * 0)) {
3236 		 */
3237 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3238 			/*
3239 			 * Strike the TSN if in fast-recovery and cum-ack
3240 			 * moved.
3241 			 */
3242 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3243 				sctp_log_fr(biggest_tsn_newly_acked,
3244 				    tp1->rec.data.TSN_seq,
3245 				    tp1->sent,
3246 				    SCTP_FR_LOG_STRIKE_CHUNK);
3247 			}
3248 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3249 				tp1->sent++;
3250 			}
3251 			if ((asoc->sctp_cmt_on_off > 0) &&
3252 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3253 				/*
3254 				 * CMT DAC algorithm: If SACK flag is set to
3255 				 * 0, then lowest_newack test will not pass
3256 				 * because it would have been set to the
3257 				 * cumack earlier. If not already to be
3258 				 * rtx'd, If not a mixed sack and if tp1 is
3259 				 * not between two sacked TSNs, then mark by
3260 				 * one more. NOTE that we are marking by one
3261 				 * additional time since the SACK DAC flag
3262 				 * indicates that two packets have been
3263 				 * received after this missing TSN.
3264 				 */
3265 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3266 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3267 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3268 						sctp_log_fr(16 + num_dests_sacked,
3269 						    tp1->rec.data.TSN_seq,
3270 						    tp1->sent,
3271 						    SCTP_FR_LOG_STRIKE_CHUNK);
3272 					}
3273 					tp1->sent++;
3274 				}
3275 			}
3276 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3277 		    (asoc->sctp_cmt_on_off == 0)) {
3278 			/*
3279 			 * For those that have done a FR we must take
3280 			 * special consideration if we strike. I.e the
3281 			 * biggest_newly_acked must be higher than the
3282 			 * sending_seq at the time we did the FR.
3283 			 */
3284 			if (
3285 #ifdef SCTP_FR_TO_ALTERNATE
3286 			/*
3287 			 * If FR's go to new networks, then we must only do
3288 			 * this for singly homed asoc's. However if the FR's
3289 			 * go to the same network (Armando's work) then its
3290 			 * ok to FR multiple times.
3291 			 */
3292 			    (asoc->numnets < 2)
3293 #else
3294 			    (1)
3295 #endif
3296 			    ) {
3297 
3298 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3299 				    tp1->rec.data.fast_retran_tsn)) {
3300 					/*
3301 					 * Strike the TSN, since this ack is
3302 					 * beyond where things were when we
3303 					 * did a FR.
3304 					 */
3305 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3306 						sctp_log_fr(biggest_tsn_newly_acked,
3307 						    tp1->rec.data.TSN_seq,
3308 						    tp1->sent,
3309 						    SCTP_FR_LOG_STRIKE_CHUNK);
3310 					}
3311 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3312 						tp1->sent++;
3313 					}
3314 					strike_flag = 1;
3315 					if ((asoc->sctp_cmt_on_off > 0) &&
3316 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3317 						/*
3318 						 * CMT DAC algorithm: If
3319 						 * SACK flag is set to 0,
3320 						 * then lowest_newack test
3321 						 * will not pass because it
3322 						 * would have been set to
3323 						 * the cumack earlier. If
3324 						 * not already to be rtx'd,
3325 						 * If not a mixed sack and
3326 						 * if tp1 is not between two
3327 						 * sacked TSNs, then mark by
3328 						 * one more. NOTE that we
3329 						 * are marking by one
3330 						 * additional time since the
3331 						 * SACK DAC flag indicates
3332 						 * that two packets have
3333 						 * been received after this
3334 						 * missing TSN.
3335 						 */
3336 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3337 						    (num_dests_sacked == 1) &&
3338 						    SCTP_TSN_GT(this_sack_lowest_newack,
3339 						    tp1->rec.data.TSN_seq)) {
3340 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3341 								sctp_log_fr(32 + num_dests_sacked,
3342 								    tp1->rec.data.TSN_seq,
3343 								    tp1->sent,
3344 								    SCTP_FR_LOG_STRIKE_CHUNK);
3345 							}
3346 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3347 								tp1->sent++;
3348 							}
3349 						}
3350 					}
3351 				}
3352 			}
3353 			/*
3354 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3355 			 * algo covers HTNA.
3356 			 */
3357 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3358 		    biggest_tsn_newly_acked)) {
3359 			/*
3360 			 * We don't strike these: This is the  HTNA
3361 			 * algorithm i.e. we don't strike If our TSN is
3362 			 * larger than the Highest TSN Newly Acked.
3363 			 */
3364 			;
3365 		} else {
3366 			/* Strike the TSN */
3367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3368 				sctp_log_fr(biggest_tsn_newly_acked,
3369 				    tp1->rec.data.TSN_seq,
3370 				    tp1->sent,
3371 				    SCTP_FR_LOG_STRIKE_CHUNK);
3372 			}
3373 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3374 				tp1->sent++;
3375 			}
3376 			if ((asoc->sctp_cmt_on_off > 0) &&
3377 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3378 				/*
3379 				 * CMT DAC algorithm: If SACK flag is set to
3380 				 * 0, then lowest_newack test will not pass
3381 				 * because it would have been set to the
3382 				 * cumack earlier. If not already to be
3383 				 * rtx'd, If not a mixed sack and if tp1 is
3384 				 * not between two sacked TSNs, then mark by
3385 				 * one more. NOTE that we are marking by one
3386 				 * additional time since the SACK DAC flag
3387 				 * indicates that two packets have been
3388 				 * received after this missing TSN.
3389 				 */
3390 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3391 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3392 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393 						sctp_log_fr(48 + num_dests_sacked,
3394 						    tp1->rec.data.TSN_seq,
3395 						    tp1->sent,
3396 						    SCTP_FR_LOG_STRIKE_CHUNK);
3397 					}
3398 					tp1->sent++;
3399 				}
3400 			}
3401 		}
3402 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3403 			struct sctp_nets *alt;
3404 
3405 			/* fix counts and things */
3406 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3407 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3408 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3409 				    tp1->book_size,
3410 				    (uint32_t) (uintptr_t) tp1->whoTo,
3411 				    tp1->rec.data.TSN_seq);
3412 			}
3413 			if (tp1->whoTo) {
3414 				tp1->whoTo->net_ack++;
3415 				sctp_flight_size_decrease(tp1);
3416 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3417 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3418 					    tp1);
3419 				}
3420 			}
3421 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3422 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3423 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3424 			}
3425 			/* add back to the rwnd */
3426 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3427 
3428 			/* remove from the total flight */
3429 			sctp_total_flight_decrease(stcb, tp1);
3430 
3431 			if ((stcb->asoc.prsctp_supported) &&
3432 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3433 				/*
3434 				 * Has it been retransmitted tv_sec times? -
3435 				 * we store the retran count there.
3436 				 */
3437 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3438 					/* Yes, so drop it */
3439 					if (tp1->data != NULL) {
3440 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3441 						    SCTP_SO_NOT_LOCKED);
3442 					}
3443 					/* Make sure to flag we had a FR */
3444 					tp1->whoTo->net_ack++;
3445 					continue;
3446 				}
3447 			}
3448 			/*
3449 			 * SCTP_PRINTF("OK, we are now ready to FR this
3450 			 * guy\n");
3451 			 */
3452 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3454 				    0, SCTP_FR_MARKED);
3455 			}
3456 			if (strike_flag) {
3457 				/* This is a subsequent FR */
3458 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3459 			}
3460 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3461 			if (asoc->sctp_cmt_on_off > 0) {
3462 				/*
3463 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3464 				 * If CMT is being used, then pick dest with
3465 				 * largest ssthresh for any retransmission.
3466 				 */
3467 				tp1->no_fr_allowed = 1;
3468 				alt = tp1->whoTo;
3469 				/* sa_ignore NO_NULL_CHK */
3470 				if (asoc->sctp_cmt_pf > 0) {
3471 					/*
3472 					 * JRS 5/18/07 - If CMT PF is on,
3473 					 * use the PF version of
3474 					 * find_alt_net()
3475 					 */
3476 					alt = sctp_find_alternate_net(stcb, alt, 2);
3477 				} else {
3478 					/*
3479 					 * JRS 5/18/07 - If only CMT is on,
3480 					 * use the CMT version of
3481 					 * find_alt_net()
3482 					 */
3483 					/* sa_ignore NO_NULL_CHK */
3484 					alt = sctp_find_alternate_net(stcb, alt, 1);
3485 				}
3486 				if (alt == NULL) {
3487 					alt = tp1->whoTo;
3488 				}
3489 				/*
3490 				 * CUCv2: If a different dest is picked for
3491 				 * the retransmission, then new
3492 				 * (rtx-)pseudo_cumack needs to be tracked
3493 				 * for orig dest. Let CUCv2 track new (rtx-)
3494 				 * pseudo-cumack always.
3495 				 */
3496 				if (tp1->whoTo) {
3497 					tp1->whoTo->find_pseudo_cumack = 1;
3498 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3499 				}
3500 			} else {/* CMT is OFF */
3501 
3502 #ifdef SCTP_FR_TO_ALTERNATE
3503 				/* Can we find an alternate? */
3504 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3505 #else
3506 				/*
3507 				 * default behavior is to NOT retransmit
3508 				 * FR's to an alternate. Armando Caro's
3509 				 * paper details why.
3510 				 */
3511 				alt = tp1->whoTo;
3512 #endif
3513 			}
3514 
3515 			tp1->rec.data.doing_fast_retransmit = 1;
3516 			tot_retrans++;
3517 			/* mark the sending seq for possible subsequent FR's */
3518 			/*
3519 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3520 			 * (uint32_t)tpi->rec.data.TSN_seq);
3521 			 */
3522 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3523 				/*
3524 				 * If the queue of send is empty then its
3525 				 * the next sequence number that will be
3526 				 * assigned so we subtract one from this to
3527 				 * get the one we last sent.
3528 				 */
3529 				tp1->rec.data.fast_retran_tsn = sending_seq;
3530 			} else {
3531 				/*
3532 				 * If there are chunks on the send queue
3533 				 * (unsent data that has made it from the
3534 				 * stream queues but not out the door, we
3535 				 * take the first one (which will have the
3536 				 * lowest TSN) and subtract one to get the
3537 				 * one we last sent.
3538 				 */
3539 				struct sctp_tmit_chunk *ttt;
3540 
3541 				ttt = TAILQ_FIRST(&asoc->send_queue);
3542 				tp1->rec.data.fast_retran_tsn =
3543 				    ttt->rec.data.TSN_seq;
3544 			}
3545 
3546 			if (tp1->do_rtt) {
3547 				/*
3548 				 * this guy had a RTO calculation pending on
3549 				 * it, cancel it
3550 				 */
3551 				if ((tp1->whoTo != NULL) &&
3552 				    (tp1->whoTo->rto_needed == 0)) {
3553 					tp1->whoTo->rto_needed = 1;
3554 				}
3555 				tp1->do_rtt = 0;
3556 			}
3557 			if (alt != tp1->whoTo) {
3558 				/* yes, there is an alternate. */
3559 				sctp_free_remote_addr(tp1->whoTo);
3560 				/* sa_ignore FREED_MEMORY */
3561 				tp1->whoTo = alt;
3562 				atomic_add_int(&alt->ref_count, 1);
3563 			}
3564 		}
3565 	}
3566 }
3567 
3568 struct sctp_tmit_chunk *
3569 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3570     struct sctp_association *asoc)
3571 {
3572 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3573 	struct timeval now;
3574 	int now_filled = 0;
3575 
3576 	if (asoc->prsctp_supported == 0) {
3577 		return (NULL);
3578 	}
3579 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3580 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3581 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3582 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3583 			/* no chance to advance, out of here */
3584 			break;
3585 		}
3586 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3587 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3588 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3589 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3590 				    asoc->advanced_peer_ack_point,
3591 				    tp1->rec.data.TSN_seq, 0, 0);
3592 			}
3593 		}
3594 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3595 			/*
3596 			 * We can't fwd-tsn past any that are reliable aka
3597 			 * retransmitted until the asoc fails.
3598 			 */
3599 			break;
3600 		}
3601 		if (!now_filled) {
3602 			(void)SCTP_GETTIME_TIMEVAL(&now);
3603 			now_filled = 1;
3604 		}
3605 		/*
3606 		 * now we got a chunk which is marked for another
3607 		 * retransmission to a PR-stream but has run out its chances
3608 		 * already maybe OR has been marked to skip now. Can we skip
3609 		 * it if its a resend?
3610 		 */
3611 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3612 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3613 			/*
3614 			 * Now is this one marked for resend and its time is
3615 			 * now up?
3616 			 */
3617 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3618 				/* Yes so drop it */
3619 				if (tp1->data) {
3620 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3621 					    1, SCTP_SO_NOT_LOCKED);
3622 				}
3623 			} else {
3624 				/*
3625 				 * No, we are done when hit one for resend
3626 				 * whos time as not expired.
3627 				 */
3628 				break;
3629 			}
3630 		}
3631 		/*
3632 		 * Ok now if this chunk is marked to drop it we can clean up
3633 		 * the chunk, advance our peer ack point and we can check
3634 		 * the next chunk.
3635 		 */
3636 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3637 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3638 			/* advance PeerAckPoint goes forward */
3639 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3640 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3641 				a_adv = tp1;
3642 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3643 				/* No update but we do save the chk */
3644 				a_adv = tp1;
3645 			}
3646 		} else {
3647 			/*
3648 			 * If it is still in RESEND we can advance no
3649 			 * further
3650 			 */
3651 			break;
3652 		}
3653 	}
3654 	return (a_adv);
3655 }
3656 
3657 static int
3658 sctp_fs_audit(struct sctp_association *asoc)
3659 {
3660 	struct sctp_tmit_chunk *chk;
3661 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3662 	int ret;
3663 
3664 #ifndef INVARIANTS
3665 	int entry_flight, entry_cnt;
3666 
3667 #endif
3668 
3669 	ret = 0;
3670 #ifndef INVARIANTS
3671 	entry_flight = asoc->total_flight;
3672 	entry_cnt = asoc->total_flight_count;
3673 #endif
3674 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3675 		return (0);
3676 
3677 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3678 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3679 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3680 			    chk->rec.data.TSN_seq,
3681 			    chk->send_size,
3682 			    chk->snd_count);
3683 			inflight++;
3684 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3685 			resend++;
3686 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3687 			inbetween++;
3688 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3689 			above++;
3690 		} else {
3691 			acked++;
3692 		}
3693 	}
3694 
3695 	if ((inflight > 0) || (inbetween > 0)) {
3696 #ifdef INVARIANTS
3697 		panic("Flight size-express incorrect? \n");
3698 #else
3699 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3700 		    entry_flight, entry_cnt);
3701 
3702 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3703 		    inflight, inbetween, resend, above, acked);
3704 		ret = 1;
3705 #endif
3706 	}
3707 	return (ret);
3708 }
3709 
3710 
3711 static void
3712 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3713     struct sctp_association *asoc,
3714     struct sctp_tmit_chunk *tp1)
3715 {
3716 	tp1->window_probe = 0;
3717 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3718 		/* TSN's skipped we do NOT move back. */
3719 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3720 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3721 		    tp1->book_size,
3722 		    (uint32_t) (uintptr_t) tp1->whoTo,
3723 		    tp1->rec.data.TSN_seq);
3724 		return;
3725 	}
3726 	/* First setup this by shrinking flight */
3727 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3728 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3729 		    tp1);
3730 	}
3731 	sctp_flight_size_decrease(tp1);
3732 	sctp_total_flight_decrease(stcb, tp1);
3733 	/* Now mark for resend */
3734 	tp1->sent = SCTP_DATAGRAM_RESEND;
3735 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3736 
3737 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3738 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3739 		    tp1->whoTo->flight_size,
3740 		    tp1->book_size,
3741 		    (uint32_t) (uintptr_t) tp1->whoTo,
3742 		    tp1->rec.data.TSN_seq);
3743 	}
3744 }
3745 
3746 void
3747 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3748     uint32_t rwnd, int *abort_now, int ecne_seen)
3749 {
3750 	struct sctp_nets *net;
3751 	struct sctp_association *asoc;
3752 	struct sctp_tmit_chunk *tp1, *tp2;
3753 	uint32_t old_rwnd;
3754 	int win_probe_recovery = 0;
3755 	int win_probe_recovered = 0;
3756 	int j, done_once = 0;
3757 	int rto_ok = 1;
3758 	uint32_t send_s;
3759 
3760 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3761 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3762 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3763 	}
3764 	SCTP_TCB_LOCK_ASSERT(stcb);
3765 #ifdef SCTP_ASOCLOG_OF_TSNS
3766 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3767 	stcb->asoc.cumack_log_at++;
3768 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3769 		stcb->asoc.cumack_log_at = 0;
3770 	}
3771 #endif
3772 	asoc = &stcb->asoc;
3773 	old_rwnd = asoc->peers_rwnd;
3774 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3775 		/* old ack */
3776 		return;
3777 	} else if (asoc->last_acked_seq == cumack) {
3778 		/* Window update sack */
3779 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3780 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3781 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3782 			/* SWS sender side engages */
3783 			asoc->peers_rwnd = 0;
3784 		}
3785 		if (asoc->peers_rwnd > old_rwnd) {
3786 			goto again;
3787 		}
3788 		return;
3789 	}
3790 	/* First setup for CC stuff */
3791 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3792 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3793 			/* Drag along the window_tsn for cwr's */
3794 			net->cwr_window_tsn = cumack;
3795 		}
3796 		net->prev_cwnd = net->cwnd;
3797 		net->net_ack = 0;
3798 		net->net_ack2 = 0;
3799 
3800 		/*
3801 		 * CMT: Reset CUC and Fast recovery algo variables before
3802 		 * SACK processing
3803 		 */
3804 		net->new_pseudo_cumack = 0;
3805 		net->will_exit_fast_recovery = 0;
3806 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3807 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3808 		}
3809 	}
3810 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3811 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3812 		    sctpchunk_listhead);
3813 		send_s = tp1->rec.data.TSN_seq + 1;
3814 	} else {
3815 		send_s = asoc->sending_seq;
3816 	}
3817 	if (SCTP_TSN_GE(cumack, send_s)) {
3818 		struct mbuf *op_err;
3819 		char msg[SCTP_DIAG_INFO_LEN];
3820 
3821 		*abort_now = 1;
3822 		/* XXX */
3823 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3824 		    cumack, send_s);
3825 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3826 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3827 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3828 		return;
3829 	}
3830 	asoc->this_sack_highest_gap = cumack;
3831 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3832 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3833 		    stcb->asoc.overall_error_count,
3834 		    0,
3835 		    SCTP_FROM_SCTP_INDATA,
3836 		    __LINE__);
3837 	}
3838 	stcb->asoc.overall_error_count = 0;
3839 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3840 		/* process the new consecutive TSN first */
3841 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3842 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3843 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3844 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3845 				}
3846 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3847 					/*
3848 					 * If it is less than ACKED, it is
3849 					 * now no-longer in flight. Higher
3850 					 * values may occur during marking
3851 					 */
3852 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3853 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3854 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3855 							    tp1->whoTo->flight_size,
3856 							    tp1->book_size,
3857 							    (uint32_t) (uintptr_t) tp1->whoTo,
3858 							    tp1->rec.data.TSN_seq);
3859 						}
3860 						sctp_flight_size_decrease(tp1);
3861 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3862 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3863 							    tp1);
3864 						}
3865 						/* sa_ignore NO_NULL_CHK */
3866 						sctp_total_flight_decrease(stcb, tp1);
3867 					}
3868 					tp1->whoTo->net_ack += tp1->send_size;
3869 					if (tp1->snd_count < 2) {
3870 						/*
3871 						 * True non-retransmited
3872 						 * chunk
3873 						 */
3874 						tp1->whoTo->net_ack2 +=
3875 						    tp1->send_size;
3876 
3877 						/* update RTO too? */
3878 						if (tp1->do_rtt) {
3879 							if (rto_ok) {
3880 								tp1->whoTo->RTO =
3881 								/*
3882 								 * sa_ignore
3883 								 * NO_NULL_CH
3884 								 * K
3885 								 */
3886 								    sctp_calculate_rto(stcb,
3887 								    asoc, tp1->whoTo,
3888 								    &tp1->sent_rcv_time,
3889 								    sctp_align_safe_nocopy,
3890 								    SCTP_RTT_FROM_DATA);
3891 								rto_ok = 0;
3892 							}
3893 							if (tp1->whoTo->rto_needed == 0) {
3894 								tp1->whoTo->rto_needed = 1;
3895 							}
3896 							tp1->do_rtt = 0;
3897 						}
3898 					}
3899 					/*
3900 					 * CMT: CUCv2 algorithm. From the
3901 					 * cumack'd TSNs, for each TSN being
3902 					 * acked for the first time, set the
3903 					 * following variables for the
3904 					 * corresp destination.
3905 					 * new_pseudo_cumack will trigger a
3906 					 * cwnd update.
3907 					 * find_(rtx_)pseudo_cumack will
3908 					 * trigger search for the next
3909 					 * expected (rtx-)pseudo-cumack.
3910 					 */
3911 					tp1->whoTo->new_pseudo_cumack = 1;
3912 					tp1->whoTo->find_pseudo_cumack = 1;
3913 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3914 
3915 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3916 						/* sa_ignore NO_NULL_CHK */
3917 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3918 					}
3919 				}
3920 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3921 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3922 				}
3923 				if (tp1->rec.data.chunk_was_revoked) {
3924 					/* deflate the cwnd */
3925 					tp1->whoTo->cwnd -= tp1->book_size;
3926 					tp1->rec.data.chunk_was_revoked = 0;
3927 				}
3928 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3929 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3930 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3931 #ifdef INVARIANTS
3932 					} else {
3933 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3934 #endif
3935 					}
3936 				}
3937 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3938 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3939 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3940 					asoc->trigger_reset = 1;
3941 				}
3942 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3943 				if (tp1->data) {
3944 					/* sa_ignore NO_NULL_CHK */
3945 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3946 					sctp_m_freem(tp1->data);
3947 					tp1->data = NULL;
3948 				}
3949 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3950 					sctp_log_sack(asoc->last_acked_seq,
3951 					    cumack,
3952 					    tp1->rec.data.TSN_seq,
3953 					    0,
3954 					    0,
3955 					    SCTP_LOG_FREE_SENT);
3956 				}
3957 				asoc->sent_queue_cnt--;
3958 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3959 			} else {
3960 				break;
3961 			}
3962 		}
3963 
3964 	}
3965 	/* sa_ignore NO_NULL_CHK */
3966 	if (stcb->sctp_socket) {
3967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3968 		struct socket *so;
3969 
3970 #endif
3971 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3972 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3973 			/* sa_ignore NO_NULL_CHK */
3974 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3975 		}
3976 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3977 		so = SCTP_INP_SO(stcb->sctp_ep);
3978 		atomic_add_int(&stcb->asoc.refcnt, 1);
3979 		SCTP_TCB_UNLOCK(stcb);
3980 		SCTP_SOCKET_LOCK(so, 1);
3981 		SCTP_TCB_LOCK(stcb);
3982 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3983 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3984 			/* assoc was freed while we were unlocked */
3985 			SCTP_SOCKET_UNLOCK(so, 1);
3986 			return;
3987 		}
3988 #endif
3989 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3990 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 		SCTP_SOCKET_UNLOCK(so, 1);
3992 #endif
3993 	} else {
3994 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3995 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3996 		}
3997 	}
3998 
3999 	/* JRS - Use the congestion control given in the CC module */
4000 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4001 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4002 			if (net->net_ack2 > 0) {
4003 				/*
4004 				 * Karn's rule applies to clearing error
4005 				 * count, this is optional.
4006 				 */
4007 				net->error_count = 0;
4008 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4009 					/* addr came good */
4010 					net->dest_state |= SCTP_ADDR_REACHABLE;
4011 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4012 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4013 				}
4014 				if (net == stcb->asoc.primary_destination) {
4015 					if (stcb->asoc.alternate) {
4016 						/*
4017 						 * release the alternate,
4018 						 * primary is good
4019 						 */
4020 						sctp_free_remote_addr(stcb->asoc.alternate);
4021 						stcb->asoc.alternate = NULL;
4022 					}
4023 				}
4024 				if (net->dest_state & SCTP_ADDR_PF) {
4025 					net->dest_state &= ~SCTP_ADDR_PF;
4026 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4027 					    stcb->sctp_ep, stcb, net,
4028 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4029 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4030 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4031 					/* Done with this net */
4032 					net->net_ack = 0;
4033 				}
4034 				/* restore any doubled timers */
4035 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4036 				if (net->RTO < stcb->asoc.minrto) {
4037 					net->RTO = stcb->asoc.minrto;
4038 				}
4039 				if (net->RTO > stcb->asoc.maxrto) {
4040 					net->RTO = stcb->asoc.maxrto;
4041 				}
4042 			}
4043 		}
4044 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4045 	}
4046 	asoc->last_acked_seq = cumack;
4047 
4048 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4049 		/* nothing left in-flight */
4050 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4051 			net->flight_size = 0;
4052 			net->partial_bytes_acked = 0;
4053 		}
4054 		asoc->total_flight = 0;
4055 		asoc->total_flight_count = 0;
4056 	}
4057 	/* RWND update */
4058 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4059 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4060 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4061 		/* SWS sender side engages */
4062 		asoc->peers_rwnd = 0;
4063 	}
4064 	if (asoc->peers_rwnd > old_rwnd) {
4065 		win_probe_recovery = 1;
4066 	}
4067 	/* Now assure a timer where data is queued at */
4068 again:
4069 	j = 0;
4070 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4071 		int to_ticks;
4072 
4073 		if (win_probe_recovery && (net->window_probe)) {
4074 			win_probe_recovered = 1;
4075 			/*
4076 			 * Find first chunk that was used with window probe
4077 			 * and clear the sent
4078 			 */
4079 			/* sa_ignore FREED_MEMORY */
4080 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4081 				if (tp1->window_probe) {
4082 					/* move back to data send queue */
4083 					sctp_window_probe_recovery(stcb, asoc, tp1);
4084 					break;
4085 				}
4086 			}
4087 		}
4088 		if (net->RTO == 0) {
4089 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4090 		} else {
4091 			to_ticks = MSEC_TO_TICKS(net->RTO);
4092 		}
4093 		if (net->flight_size) {
4094 			j++;
4095 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4096 			    sctp_timeout_handler, &net->rxt_timer);
4097 			if (net->window_probe) {
4098 				net->window_probe = 0;
4099 			}
4100 		} else {
4101 			if (net->window_probe) {
4102 				/*
4103 				 * In window probes we must assure a timer
4104 				 * is still running there
4105 				 */
4106 				net->window_probe = 0;
4107 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4108 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4109 					    sctp_timeout_handler, &net->rxt_timer);
4110 				}
4111 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4112 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4113 				    stcb, net,
4114 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4115 			}
4116 		}
4117 	}
4118 	if ((j == 0) &&
4119 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4120 	    (asoc->sent_queue_retran_cnt == 0) &&
4121 	    (win_probe_recovered == 0) &&
4122 	    (done_once == 0)) {
4123 		/*
4124 		 * huh, this should not happen unless all packets are
4125 		 * PR-SCTP and marked to skip of course.
4126 		 */
4127 		if (sctp_fs_audit(asoc)) {
4128 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4129 				net->flight_size = 0;
4130 			}
4131 			asoc->total_flight = 0;
4132 			asoc->total_flight_count = 0;
4133 			asoc->sent_queue_retran_cnt = 0;
4134 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4135 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4136 					sctp_flight_size_increase(tp1);
4137 					sctp_total_flight_increase(stcb, tp1);
4138 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4139 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4140 				}
4141 			}
4142 		}
4143 		done_once = 1;
4144 		goto again;
4145 	}
4146 	/**********************************/
4147 	/* Now what about shutdown issues */
4148 	/**********************************/
4149 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4150 		/* nothing left on sendqueue.. consider done */
4151 		/* clean up */
4152 		if ((asoc->stream_queue_cnt == 1) &&
4153 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4154 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4155 		    (asoc->locked_on_sending)
4156 		    ) {
4157 			struct sctp_stream_queue_pending *sp;
4158 
4159 			/*
4160 			 * I may be in a state where we got all across.. but
4161 			 * cannot write more due to a shutdown... we abort
4162 			 * since the user did not indicate EOR in this case.
4163 			 * The sp will be cleaned during free of the asoc.
4164 			 */
4165 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4166 			    sctp_streamhead);
4167 			if ((sp) && (sp->length == 0)) {
4168 				/* Let cleanup code purge it */
4169 				if (sp->msg_is_complete) {
4170 					asoc->stream_queue_cnt--;
4171 				} else {
4172 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4173 					asoc->locked_on_sending = NULL;
4174 					asoc->stream_queue_cnt--;
4175 				}
4176 			}
4177 		}
4178 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4179 		    (asoc->stream_queue_cnt == 0)) {
4180 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4181 				/* Need to abort here */
4182 				struct mbuf *op_err;
4183 
4184 		abort_out_now:
4185 				*abort_now = 1;
4186 				/* XXX */
4187 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4188 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4189 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4190 				return;
4191 			} else {
4192 				struct sctp_nets *netp;
4193 
4194 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4195 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4196 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4197 				}
4198 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4199 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4200 				sctp_stop_timers_for_shutdown(stcb);
4201 				if (asoc->alternate) {
4202 					netp = asoc->alternate;
4203 				} else {
4204 					netp = asoc->primary_destination;
4205 				}
4206 				sctp_send_shutdown(stcb, netp);
4207 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4208 				    stcb->sctp_ep, stcb, netp);
4209 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4210 				    stcb->sctp_ep, stcb, netp);
4211 			}
4212 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4213 		    (asoc->stream_queue_cnt == 0)) {
4214 			struct sctp_nets *netp;
4215 
4216 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4217 				goto abort_out_now;
4218 			}
4219 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4220 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4221 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4222 			sctp_stop_timers_for_shutdown(stcb);
4223 			if (asoc->alternate) {
4224 				netp = asoc->alternate;
4225 			} else {
4226 				netp = asoc->primary_destination;
4227 			}
4228 			sctp_send_shutdown_ack(stcb, netp);
4229 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4230 			    stcb->sctp_ep, stcb, netp);
4231 		}
4232 	}
4233 	/*********************************************/
4234 	/* Here we perform PR-SCTP procedures        */
4235 	/* (section 4.2)                             */
4236 	/*********************************************/
4237 	/* C1. update advancedPeerAckPoint */
4238 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4239 		asoc->advanced_peer_ack_point = cumack;
4240 	}
4241 	/* PR-Sctp issues need to be addressed too */
4242 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4243 		struct sctp_tmit_chunk *lchk;
4244 		uint32_t old_adv_peer_ack_point;
4245 
4246 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4247 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4248 		/* C3. See if we need to send a Fwd-TSN */
4249 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4250 			/*
4251 			 * ISSUE with ECN, see FWD-TSN processing.
4252 			 */
4253 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4254 				send_forward_tsn(stcb, asoc);
4255 			} else if (lchk) {
4256 				/* try to FR fwd-tsn's that get lost too */
4257 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4258 					send_forward_tsn(stcb, asoc);
4259 				}
4260 			}
4261 		}
4262 		if (lchk) {
4263 			/* Assure a timer is up */
4264 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4265 			    stcb->sctp_ep, stcb, lchk->whoTo);
4266 		}
4267 	}
4268 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4269 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4270 		    rwnd,
4271 		    stcb->asoc.peers_rwnd,
4272 		    stcb->asoc.total_flight,
4273 		    stcb->asoc.total_output_queue_size);
4274 	}
4275 }
4276 
4277 void
4278 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4279     struct sctp_tcb *stcb,
4280     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4281     int *abort_now, uint8_t flags,
4282     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4283 {
4284 	struct sctp_association *asoc;
4285 	struct sctp_tmit_chunk *tp1, *tp2;
4286 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4287 	uint16_t wake_him = 0;
4288 	uint32_t send_s = 0;
4289 	long j;
4290 	int accum_moved = 0;
4291 	int will_exit_fast_recovery = 0;
4292 	uint32_t a_rwnd, old_rwnd;
4293 	int win_probe_recovery = 0;
4294 	int win_probe_recovered = 0;
4295 	struct sctp_nets *net = NULL;
4296 	int done_once;
4297 	int rto_ok = 1;
4298 	uint8_t reneged_all = 0;
4299 	uint8_t cmt_dac_flag;
4300 
4301 	/*
4302 	 * we take any chance we can to service our queues since we cannot
4303 	 * get awoken when the socket is read from :<
4304 	 */
4305 	/*
4306 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4307 	 * old sack, if so discard. 2) If there is nothing left in the send
4308 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4309 	 * too, update any rwnd change and verify no timers are running.
4310 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4311 	 * moved process these first and note that it moved. 4) Process any
4312 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4313 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4314 	 * sync up flightsizes and things, stop all timers and also check
4315 	 * for shutdown_pending state. If so then go ahead and send off the
4316 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4317 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4318 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4319 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4320 	 * if in shutdown_recv state.
4321 	 */
4322 	SCTP_TCB_LOCK_ASSERT(stcb);
4323 	/* CMT DAC algo */
4324 	this_sack_lowest_newack = 0;
4325 	SCTP_STAT_INCR(sctps_slowpath_sack);
4326 	last_tsn = cum_ack;
4327 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4328 #ifdef SCTP_ASOCLOG_OF_TSNS
4329 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4330 	stcb->asoc.cumack_log_at++;
4331 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4332 		stcb->asoc.cumack_log_at = 0;
4333 	}
4334 #endif
4335 	a_rwnd = rwnd;
4336 
4337 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4338 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4339 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4340 	}
4341 	old_rwnd = stcb->asoc.peers_rwnd;
4342 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4343 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4344 		    stcb->asoc.overall_error_count,
4345 		    0,
4346 		    SCTP_FROM_SCTP_INDATA,
4347 		    __LINE__);
4348 	}
4349 	stcb->asoc.overall_error_count = 0;
4350 	asoc = &stcb->asoc;
4351 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4352 		sctp_log_sack(asoc->last_acked_seq,
4353 		    cum_ack,
4354 		    0,
4355 		    num_seg,
4356 		    num_dup,
4357 		    SCTP_LOG_NEW_SACK);
4358 	}
4359 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4360 		uint16_t i;
4361 		uint32_t *dupdata, dblock;
4362 
4363 		for (i = 0; i < num_dup; i++) {
4364 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4365 			    sizeof(uint32_t), (uint8_t *) & dblock);
4366 			if (dupdata == NULL) {
4367 				break;
4368 			}
4369 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4370 		}
4371 	}
4372 	/* reality check */
4373 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4374 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4375 		    sctpchunk_listhead);
4376 		send_s = tp1->rec.data.TSN_seq + 1;
4377 	} else {
4378 		tp1 = NULL;
4379 		send_s = asoc->sending_seq;
4380 	}
4381 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4382 		struct mbuf *op_err;
4383 		char msg[SCTP_DIAG_INFO_LEN];
4384 
4385 		/*
4386 		 * no way, we have not even sent this TSN out yet. Peer is
4387 		 * hopelessly messed up with us.
4388 		 */
4389 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4390 		    cum_ack, send_s);
4391 		if (tp1) {
4392 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4393 			    tp1->rec.data.TSN_seq, (void *)tp1);
4394 		}
4395 hopeless_peer:
4396 		*abort_now = 1;
4397 		/* XXX */
4398 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4399 		    cum_ack, send_s);
4400 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4401 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4402 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4403 		return;
4404 	}
4405 	/**********************/
4406 	/* 1) check the range */
4407 	/**********************/
4408 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4409 		/* acking something behind */
4410 		return;
4411 	}
4412 	/* update the Rwnd of the peer */
4413 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4414 	    TAILQ_EMPTY(&asoc->send_queue) &&
4415 	    (asoc->stream_queue_cnt == 0)) {
4416 		/* nothing left on send/sent and strmq */
4417 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4418 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4419 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4420 		}
4421 		asoc->peers_rwnd = a_rwnd;
4422 		if (asoc->sent_queue_retran_cnt) {
4423 			asoc->sent_queue_retran_cnt = 0;
4424 		}
4425 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4426 			/* SWS sender side engages */
4427 			asoc->peers_rwnd = 0;
4428 		}
4429 		/* stop any timers */
4430 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4431 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4432 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4433 			net->partial_bytes_acked = 0;
4434 			net->flight_size = 0;
4435 		}
4436 		asoc->total_flight = 0;
4437 		asoc->total_flight_count = 0;
4438 		return;
4439 	}
4440 	/*
4441 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4442 	 * things. The total byte count acked is tracked in netAckSz AND
4443 	 * netAck2 is used to track the total bytes acked that are un-
4444 	 * amibguious and were never retransmitted. We track these on a per
4445 	 * destination address basis.
4446 	 */
4447 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4448 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4449 			/* Drag along the window_tsn for cwr's */
4450 			net->cwr_window_tsn = cum_ack;
4451 		}
4452 		net->prev_cwnd = net->cwnd;
4453 		net->net_ack = 0;
4454 		net->net_ack2 = 0;
4455 
4456 		/*
4457 		 * CMT: Reset CUC and Fast recovery algo variables before
4458 		 * SACK processing
4459 		 */
4460 		net->new_pseudo_cumack = 0;
4461 		net->will_exit_fast_recovery = 0;
4462 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4463 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4464 		}
4465 	}
4466 	/* process the new consecutive TSN first */
4467 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4468 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4469 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4470 				accum_moved = 1;
4471 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4472 					/*
4473 					 * If it is less than ACKED, it is
4474 					 * now no-longer in flight. Higher
4475 					 * values may occur during marking
4476 					 */
4477 					if ((tp1->whoTo->dest_state &
4478 					    SCTP_ADDR_UNCONFIRMED) &&
4479 					    (tp1->snd_count < 2)) {
4480 						/*
4481 						 * If there was no retran
4482 						 * and the address is
4483 						 * un-confirmed and we sent
4484 						 * there and are now
4485 						 * sacked.. its confirmed,
4486 						 * mark it so.
4487 						 */
4488 						tp1->whoTo->dest_state &=
4489 						    ~SCTP_ADDR_UNCONFIRMED;
4490 					}
4491 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4492 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4493 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4494 							    tp1->whoTo->flight_size,
4495 							    tp1->book_size,
4496 							    (uint32_t) (uintptr_t) tp1->whoTo,
4497 							    tp1->rec.data.TSN_seq);
4498 						}
4499 						sctp_flight_size_decrease(tp1);
4500 						sctp_total_flight_decrease(stcb, tp1);
4501 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4502 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4503 							    tp1);
4504 						}
4505 					}
4506 					tp1->whoTo->net_ack += tp1->send_size;
4507 
4508 					/* CMT SFR and DAC algos */
4509 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4510 					tp1->whoTo->saw_newack = 1;
4511 
4512 					if (tp1->snd_count < 2) {
4513 						/*
4514 						 * True non-retransmited
4515 						 * chunk
4516 						 */
4517 						tp1->whoTo->net_ack2 +=
4518 						    tp1->send_size;
4519 
4520 						/* update RTO too? */
4521 						if (tp1->do_rtt) {
4522 							if (rto_ok) {
4523 								tp1->whoTo->RTO =
4524 								    sctp_calculate_rto(stcb,
4525 								    asoc, tp1->whoTo,
4526 								    &tp1->sent_rcv_time,
4527 								    sctp_align_safe_nocopy,
4528 								    SCTP_RTT_FROM_DATA);
4529 								rto_ok = 0;
4530 							}
4531 							if (tp1->whoTo->rto_needed == 0) {
4532 								tp1->whoTo->rto_needed = 1;
4533 							}
4534 							tp1->do_rtt = 0;
4535 						}
4536 					}
4537 					/*
4538 					 * CMT: CUCv2 algorithm. From the
4539 					 * cumack'd TSNs, for each TSN being
4540 					 * acked for the first time, set the
4541 					 * following variables for the
4542 					 * corresp destination.
4543 					 * new_pseudo_cumack will trigger a
4544 					 * cwnd update.
4545 					 * find_(rtx_)pseudo_cumack will
4546 					 * trigger search for the next
4547 					 * expected (rtx-)pseudo-cumack.
4548 					 */
4549 					tp1->whoTo->new_pseudo_cumack = 1;
4550 					tp1->whoTo->find_pseudo_cumack = 1;
4551 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4552 
4553 
4554 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4555 						sctp_log_sack(asoc->last_acked_seq,
4556 						    cum_ack,
4557 						    tp1->rec.data.TSN_seq,
4558 						    0,
4559 						    0,
4560 						    SCTP_LOG_TSN_ACKED);
4561 					}
4562 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4563 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4564 					}
4565 				}
4566 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4567 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4568 #ifdef SCTP_AUDITING_ENABLED
4569 					sctp_audit_log(0xB3,
4570 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4571 #endif
4572 				}
4573 				if (tp1->rec.data.chunk_was_revoked) {
4574 					/* deflate the cwnd */
4575 					tp1->whoTo->cwnd -= tp1->book_size;
4576 					tp1->rec.data.chunk_was_revoked = 0;
4577 				}
4578 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4579 					tp1->sent = SCTP_DATAGRAM_ACKED;
4580 				}
4581 			}
4582 		} else {
4583 			break;
4584 		}
4585 	}
4586 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4587 	/* always set this up to cum-ack */
4588 	asoc->this_sack_highest_gap = last_tsn;
4589 
4590 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4591 
4592 		/*
4593 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4594 		 * to be greater than the cumack. Also reset saw_newack to 0
4595 		 * for all dests.
4596 		 */
4597 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4598 			net->saw_newack = 0;
4599 			net->this_sack_highest_newack = last_tsn;
4600 		}
4601 
4602 		/*
4603 		 * thisSackHighestGap will increase while handling NEW
4604 		 * segments this_sack_highest_newack will increase while
4605 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4606 		 * used for CMT DAC algo. saw_newack will also change.
4607 		 */
4608 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4609 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4610 		    num_seg, num_nr_seg, &rto_ok)) {
4611 			wake_him++;
4612 		}
4613 		/*
4614 		 * validate the biggest_tsn_acked in the gap acks if strict
4615 		 * adherence is wanted.
4616 		 */
4617 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4618 			/*
4619 			 * peer is either confused or we are under attack.
4620 			 * We must abort.
4621 			 */
4622 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4623 			    biggest_tsn_acked, send_s);
4624 			goto hopeless_peer;
4625 		}
4626 	}
4627 	/*******************************************/
4628 	/* cancel ALL T3-send timer if accum moved */
4629 	/*******************************************/
4630 	if (asoc->sctp_cmt_on_off > 0) {
4631 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4632 			if (net->new_pseudo_cumack)
4633 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4634 				    stcb, net,
4635 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4636 
4637 		}
4638 	} else {
4639 		if (accum_moved) {
4640 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4641 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4642 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4643 			}
4644 		}
4645 	}
4646 	/********************************************/
4647 	/* drop the acked chunks from the sentqueue */
4648 	/********************************************/
4649 	asoc->last_acked_seq = cum_ack;
4650 
4651 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4652 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4653 			break;
4654 		}
4655 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4656 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4657 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4658 #ifdef INVARIANTS
4659 			} else {
4660 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4661 #endif
4662 			}
4663 		}
4664 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4665 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4666 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4667 			asoc->trigger_reset = 1;
4668 		}
4669 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4670 		if (PR_SCTP_ENABLED(tp1->flags)) {
4671 			if (asoc->pr_sctp_cnt != 0)
4672 				asoc->pr_sctp_cnt--;
4673 		}
4674 		asoc->sent_queue_cnt--;
4675 		if (tp1->data) {
4676 			/* sa_ignore NO_NULL_CHK */
4677 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4678 			sctp_m_freem(tp1->data);
4679 			tp1->data = NULL;
4680 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4681 				asoc->sent_queue_cnt_removeable--;
4682 			}
4683 		}
4684 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4685 			sctp_log_sack(asoc->last_acked_seq,
4686 			    cum_ack,
4687 			    tp1->rec.data.TSN_seq,
4688 			    0,
4689 			    0,
4690 			    SCTP_LOG_FREE_SENT);
4691 		}
4692 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4693 		wake_him++;
4694 	}
4695 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4696 #ifdef INVARIANTS
4697 		panic("Warning flight size is positive and should be 0");
4698 #else
4699 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4700 		    asoc->total_flight);
4701 #endif
4702 		asoc->total_flight = 0;
4703 	}
4704 	/* sa_ignore NO_NULL_CHK */
4705 	if ((wake_him) && (stcb->sctp_socket)) {
4706 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4707 		struct socket *so;
4708 
4709 #endif
4710 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4711 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4712 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4713 		}
4714 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4715 		so = SCTP_INP_SO(stcb->sctp_ep);
4716 		atomic_add_int(&stcb->asoc.refcnt, 1);
4717 		SCTP_TCB_UNLOCK(stcb);
4718 		SCTP_SOCKET_LOCK(so, 1);
4719 		SCTP_TCB_LOCK(stcb);
4720 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4721 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4722 			/* assoc was freed while we were unlocked */
4723 			SCTP_SOCKET_UNLOCK(so, 1);
4724 			return;
4725 		}
4726 #endif
4727 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4729 		SCTP_SOCKET_UNLOCK(so, 1);
4730 #endif
4731 	} else {
4732 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4733 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4734 		}
4735 	}
4736 
4737 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4738 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4739 			/* Setup so we will exit RFC2582 fast recovery */
4740 			will_exit_fast_recovery = 1;
4741 		}
4742 	}
4743 	/*
4744 	 * Check for revoked fragments:
4745 	 *
4746 	 * if Previous sack - Had no frags then we can't have any revoked if
4747 	 * Previous sack - Had frag's then - If we now have frags aka
4748 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4749 	 * some of them. else - The peer revoked all ACKED fragments, since
4750 	 * we had some before and now we have NONE.
4751 	 */
4752 
4753 	if (num_seg) {
4754 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4755 		asoc->saw_sack_with_frags = 1;
4756 	} else if (asoc->saw_sack_with_frags) {
4757 		int cnt_revoked = 0;
4758 
4759 		/* Peer revoked all dg's marked or acked */
4760 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4761 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4762 				tp1->sent = SCTP_DATAGRAM_SENT;
4763 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4764 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4765 					    tp1->whoTo->flight_size,
4766 					    tp1->book_size,
4767 					    (uint32_t) (uintptr_t) tp1->whoTo,
4768 					    tp1->rec.data.TSN_seq);
4769 				}
4770 				sctp_flight_size_increase(tp1);
4771 				sctp_total_flight_increase(stcb, tp1);
4772 				tp1->rec.data.chunk_was_revoked = 1;
4773 				/*
4774 				 * To ensure that this increase in
4775 				 * flightsize, which is artificial, does not
4776 				 * throttle the sender, we also increase the
4777 				 * cwnd artificially.
4778 				 */
4779 				tp1->whoTo->cwnd += tp1->book_size;
4780 				cnt_revoked++;
4781 			}
4782 		}
4783 		if (cnt_revoked) {
4784 			reneged_all = 1;
4785 		}
4786 		asoc->saw_sack_with_frags = 0;
4787 	}
4788 	if (num_nr_seg > 0)
4789 		asoc->saw_sack_with_nr_frags = 1;
4790 	else
4791 		asoc->saw_sack_with_nr_frags = 0;
4792 
4793 	/* JRS - Use the congestion control given in the CC module */
4794 	if (ecne_seen == 0) {
4795 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 			if (net->net_ack2 > 0) {
4797 				/*
4798 				 * Karn's rule applies to clearing error
4799 				 * count, this is optional.
4800 				 */
4801 				net->error_count = 0;
4802 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4803 					/* addr came good */
4804 					net->dest_state |= SCTP_ADDR_REACHABLE;
4805 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4806 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4807 				}
4808 				if (net == stcb->asoc.primary_destination) {
4809 					if (stcb->asoc.alternate) {
4810 						/*
4811 						 * release the alternate,
4812 						 * primary is good
4813 						 */
4814 						sctp_free_remote_addr(stcb->asoc.alternate);
4815 						stcb->asoc.alternate = NULL;
4816 					}
4817 				}
4818 				if (net->dest_state & SCTP_ADDR_PF) {
4819 					net->dest_state &= ~SCTP_ADDR_PF;
4820 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4821 					    stcb->sctp_ep, stcb, net,
4822 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4823 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4824 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4825 					/* Done with this net */
4826 					net->net_ack = 0;
4827 				}
4828 				/* restore any doubled timers */
4829 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4830 				if (net->RTO < stcb->asoc.minrto) {
4831 					net->RTO = stcb->asoc.minrto;
4832 				}
4833 				if (net->RTO > stcb->asoc.maxrto) {
4834 					net->RTO = stcb->asoc.maxrto;
4835 				}
4836 			}
4837 		}
4838 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4839 	}
4840 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4841 		/* nothing left in-flight */
4842 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4843 			/* stop all timers */
4844 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4845 			    stcb, net,
4846 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4847 			net->flight_size = 0;
4848 			net->partial_bytes_acked = 0;
4849 		}
4850 		asoc->total_flight = 0;
4851 		asoc->total_flight_count = 0;
4852 	}
4853 	/**********************************/
4854 	/* Now what about shutdown issues */
4855 	/**********************************/
4856 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4857 		/* nothing left on sendqueue.. consider done */
4858 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4859 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4860 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4861 		}
4862 		asoc->peers_rwnd = a_rwnd;
4863 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4864 			/* SWS sender side engages */
4865 			asoc->peers_rwnd = 0;
4866 		}
4867 		/* clean up */
4868 		if ((asoc->stream_queue_cnt == 1) &&
4869 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4870 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4871 		    (asoc->locked_on_sending)
4872 		    ) {
4873 			struct sctp_stream_queue_pending *sp;
4874 
4875 			/*
4876 			 * I may be in a state where we got all across.. but
4877 			 * cannot write more due to a shutdown... we abort
4878 			 * since the user did not indicate EOR in this case.
4879 			 */
4880 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4881 			    sctp_streamhead);
4882 			if ((sp) && (sp->length == 0)) {
4883 				asoc->locked_on_sending = NULL;
4884 				if (sp->msg_is_complete) {
4885 					asoc->stream_queue_cnt--;
4886 				} else {
4887 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4888 					asoc->stream_queue_cnt--;
4889 				}
4890 			}
4891 		}
4892 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4893 		    (asoc->stream_queue_cnt == 0)) {
4894 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4895 				/* Need to abort here */
4896 				struct mbuf *op_err;
4897 
4898 		abort_out_now:
4899 				*abort_now = 1;
4900 				/* XXX */
4901 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4902 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4903 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4904 				return;
4905 			} else {
4906 				struct sctp_nets *netp;
4907 
4908 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4909 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4910 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4911 				}
4912 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4913 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4914 				sctp_stop_timers_for_shutdown(stcb);
4915 				if (asoc->alternate) {
4916 					netp = asoc->alternate;
4917 				} else {
4918 					netp = asoc->primary_destination;
4919 				}
4920 				sctp_send_shutdown(stcb, netp);
4921 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4922 				    stcb->sctp_ep, stcb, netp);
4923 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4924 				    stcb->sctp_ep, stcb, netp);
4925 			}
4926 			return;
4927 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4928 		    (asoc->stream_queue_cnt == 0)) {
4929 			struct sctp_nets *netp;
4930 
4931 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4932 				goto abort_out_now;
4933 			}
4934 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4935 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4936 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4937 			sctp_stop_timers_for_shutdown(stcb);
4938 			if (asoc->alternate) {
4939 				netp = asoc->alternate;
4940 			} else {
4941 				netp = asoc->primary_destination;
4942 			}
4943 			sctp_send_shutdown_ack(stcb, netp);
4944 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4945 			    stcb->sctp_ep, stcb, netp);
4946 			return;
4947 		}
4948 	}
4949 	/*
4950 	 * Now here we are going to recycle net_ack for a different use...
4951 	 * HEADS UP.
4952 	 */
4953 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4954 		net->net_ack = 0;
4955 	}
4956 
4957 	/*
4958 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4959 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4960 	 * automatically ensure that.
4961 	 */
4962 	if ((asoc->sctp_cmt_on_off > 0) &&
4963 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4964 	    (cmt_dac_flag == 0)) {
4965 		this_sack_lowest_newack = cum_ack;
4966 	}
4967 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4968 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4969 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4970 	}
4971 	/* JRS - Use the congestion control given in the CC module */
4972 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4973 
4974 	/* Now are we exiting loss recovery ? */
4975 	if (will_exit_fast_recovery) {
4976 		/* Ok, we must exit fast recovery */
4977 		asoc->fast_retran_loss_recovery = 0;
4978 	}
4979 	if ((asoc->sat_t3_loss_recovery) &&
4980 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4981 		/* end satellite t3 loss recovery */
4982 		asoc->sat_t3_loss_recovery = 0;
4983 	}
4984 	/*
4985 	 * CMT Fast recovery
4986 	 */
4987 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4988 		if (net->will_exit_fast_recovery) {
4989 			/* Ok, we must exit fast recovery */
4990 			net->fast_retran_loss_recovery = 0;
4991 		}
4992 	}
4993 
4994 	/* Adjust and set the new rwnd value */
4995 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4996 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4997 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4998 	}
4999 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5000 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5001 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5002 		/* SWS sender side engages */
5003 		asoc->peers_rwnd = 0;
5004 	}
5005 	if (asoc->peers_rwnd > old_rwnd) {
5006 		win_probe_recovery = 1;
5007 	}
5008 	/*
5009 	 * Now we must setup so we have a timer up for anyone with
5010 	 * outstanding data.
5011 	 */
5012 	done_once = 0;
5013 again:
5014 	j = 0;
5015 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5016 		if (win_probe_recovery && (net->window_probe)) {
5017 			win_probe_recovered = 1;
5018 			/*-
5019 			 * Find first chunk that was used with
5020 			 * window probe and clear the event. Put
5021 			 * it back into the send queue as if has
5022 			 * not been sent.
5023 			 */
5024 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5025 				if (tp1->window_probe) {
5026 					sctp_window_probe_recovery(stcb, asoc, tp1);
5027 					break;
5028 				}
5029 			}
5030 		}
5031 		if (net->flight_size) {
5032 			j++;
5033 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5034 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5035 				    stcb->sctp_ep, stcb, net);
5036 			}
5037 			if (net->window_probe) {
5038 				net->window_probe = 0;
5039 			}
5040 		} else {
5041 			if (net->window_probe) {
5042 				/*
5043 				 * In window probes we must assure a timer
5044 				 * is still running there
5045 				 */
5046 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5047 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5048 					    stcb->sctp_ep, stcb, net);
5049 
5050 				}
5051 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5052 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5053 				    stcb, net,
5054 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5055 			}
5056 		}
5057 	}
5058 	if ((j == 0) &&
5059 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5060 	    (asoc->sent_queue_retran_cnt == 0) &&
5061 	    (win_probe_recovered == 0) &&
5062 	    (done_once == 0)) {
5063 		/*
5064 		 * huh, this should not happen unless all packets are
5065 		 * PR-SCTP and marked to skip of course.
5066 		 */
5067 		if (sctp_fs_audit(asoc)) {
5068 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5069 				net->flight_size = 0;
5070 			}
5071 			asoc->total_flight = 0;
5072 			asoc->total_flight_count = 0;
5073 			asoc->sent_queue_retran_cnt = 0;
5074 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5075 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5076 					sctp_flight_size_increase(tp1);
5077 					sctp_total_flight_increase(stcb, tp1);
5078 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5079 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5080 				}
5081 			}
5082 		}
5083 		done_once = 1;
5084 		goto again;
5085 	}
5086 	/*********************************************/
5087 	/* Here we perform PR-SCTP procedures        */
5088 	/* (section 4.2)                             */
5089 	/*********************************************/
5090 	/* C1. update advancedPeerAckPoint */
5091 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5092 		asoc->advanced_peer_ack_point = cum_ack;
5093 	}
5094 	/* C2. try to further move advancedPeerAckPoint ahead */
5095 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5096 		struct sctp_tmit_chunk *lchk;
5097 		uint32_t old_adv_peer_ack_point;
5098 
5099 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5100 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5101 		/* C3. See if we need to send a Fwd-TSN */
5102 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5103 			/*
5104 			 * ISSUE with ECN, see FWD-TSN processing.
5105 			 */
5106 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5107 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5108 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5109 				    old_adv_peer_ack_point);
5110 			}
5111 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5112 				send_forward_tsn(stcb, asoc);
5113 			} else if (lchk) {
5114 				/* try to FR fwd-tsn's that get lost too */
5115 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5116 					send_forward_tsn(stcb, asoc);
5117 				}
5118 			}
5119 		}
5120 		if (lchk) {
5121 			/* Assure a timer is up */
5122 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5123 			    stcb->sctp_ep, stcb, lchk->whoTo);
5124 		}
5125 	}
5126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5127 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5128 		    a_rwnd,
5129 		    stcb->asoc.peers_rwnd,
5130 		    stcb->asoc.total_flight,
5131 		    stcb->asoc.total_output_queue_size);
5132 	}
5133 }
5134 
5135 void
5136 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5137 {
5138 	/* Copy cum-ack */
5139 	uint32_t cum_ack, a_rwnd;
5140 
5141 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5142 	/* Arrange so a_rwnd does NOT change */
5143 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5144 
5145 	/* Now call the express sack handling */
5146 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5147 }
5148 
5149 static void
5150 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5151     struct sctp_stream_in *strmin)
5152 {
5153 	struct sctp_queued_to_read *ctl, *nctl;
5154 	struct sctp_association *asoc;
5155 	uint32_t tt;
5156 	int need_reasm_check = 0, old;
5157 
5158 	asoc = &stcb->asoc;
5159 	tt = strmin->last_sequence_delivered;
5160 	if (asoc->idata_supported) {
5161 		old = 0;
5162 	} else {
5163 		old = 1;
5164 	}
5165 	/*
5166 	 * First deliver anything prior to and including the stream no that
5167 	 * came in.
5168 	 */
5169 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5170 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5171 			/* this is deliverable now */
5172 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5173 				if (ctl->on_strm_q) {
5174 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5175 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5176 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5177 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5178 #ifdef INVARIANTS
5179 					} else {
5180 						panic("strmin: %p ctl: %p unknown %d",
5181 						    strmin, ctl, ctl->on_strm_q);
5182 #endif
5183 					}
5184 					ctl->on_strm_q = 0;
5185 				}
5186 				/* subtract pending on streams */
5187 				asoc->size_on_all_streams -= ctl->length;
5188 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5189 				/* deliver it to at least the delivery-q */
5190 				if (stcb->sctp_socket) {
5191 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5192 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5193 					    ctl,
5194 					    &stcb->sctp_socket->so_rcv,
5195 					    1, SCTP_READ_LOCK_HELD,
5196 					    SCTP_SO_NOT_LOCKED);
5197 				}
5198 			} else {
5199 				/* Its a fragmented message */
5200 				if (ctl->first_frag_seen) {
5201 					/*
5202 					 * Make it so this is next to
5203 					 * deliver, we restore later
5204 					 */
5205 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5206 					need_reasm_check = 1;
5207 					break;
5208 				}
5209 			}
5210 		} else {
5211 			/* no more delivery now. */
5212 			break;
5213 		}
5214 	}
5215 	if (need_reasm_check) {
5216 		int ret;
5217 
5218 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5219 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5220 			/* Restore the next to deliver unless we are ahead */
5221 			strmin->last_sequence_delivered = tt;
5222 		}
5223 		if (ret == 0) {
5224 			/* Left the front Partial one on */
5225 			return;
5226 		}
5227 		need_reasm_check = 0;
5228 	}
5229 	/*
5230 	 * now we must deliver things in queue the normal way  if any are
5231 	 * now ready.
5232 	 */
5233 	tt = strmin->last_sequence_delivered + 1;
5234 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5235 		if (tt == ctl->sinfo_ssn) {
5236 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5237 				/* this is deliverable now */
5238 				if (ctl->on_strm_q) {
5239 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5240 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5241 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5242 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5243 #ifdef INVARIANTS
5244 					} else {
5245 						panic("strmin: %p ctl: %p unknown %d",
5246 						    strmin, ctl, ctl->on_strm_q);
5247 #endif
5248 					}
5249 					ctl->on_strm_q = 0;
5250 				}
5251 				/* subtract pending on streams */
5252 				asoc->size_on_all_streams -= ctl->length;
5253 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5254 				/* deliver it to at least the delivery-q */
5255 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5256 				if (stcb->sctp_socket) {
5257 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5258 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5259 					    ctl,
5260 					    &stcb->sctp_socket->so_rcv, 1,
5261 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5262 
5263 				}
5264 				tt = strmin->last_sequence_delivered + 1;
5265 			} else {
5266 				/* Its a fragmented message */
5267 				if (ctl->first_frag_seen) {
5268 					/*
5269 					 * Make it so this is next to
5270 					 * deliver
5271 					 */
5272 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5273 					need_reasm_check = 1;
5274 					break;
5275 				}
5276 			}
5277 		} else {
5278 			break;
5279 		}
5280 	}
5281 	if (need_reasm_check) {
5282 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5283 	}
5284 }
5285 
5286 
5287 static void
5288 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5289     struct sctp_association *asoc,
5290     uint16_t stream, uint32_t seq, int ordered, int old)
5291 {
5292 	struct sctp_queued_to_read *control;
5293 	struct sctp_stream_in *strm;
5294 	struct sctp_tmit_chunk *chk, *nchk;
5295 
5296 	/*
5297 	 * For now large messages held on the stream reasm that are complete
5298 	 * will be tossed too. We could in theory do more work to spin
5299 	 * through and stop after dumping one msg aka seeing the start of a
5300 	 * new msg at the head, and call the delivery function... to see if
5301 	 * it can be delivered... But for now we just dump everything on the
5302 	 * queue.
5303 	 */
5304 	strm = &asoc->strmin[stream];
5305 	control = find_reasm_entry(strm, (uint32_t) seq, ordered, old);
5306 	if (control == NULL) {
5307 		/* Not found */
5308 		return;
5309 	}
5310 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5311 		/* Purge hanging chunks */
5312 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5313 		asoc->size_on_reasm_queue -= chk->send_size;
5314 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5315 		if (chk->data) {
5316 			sctp_m_freem(chk->data);
5317 			chk->data = NULL;
5318 		}
5319 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5320 	}
5321 	TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5322 	if (control->on_read_q == 0) {
5323 		sctp_free_remote_addr(control->whoFrom);
5324 		if (control->data) {
5325 			sctp_m_freem(control->data);
5326 			control->data = NULL;
5327 		}
5328 		sctp_free_a_readq(stcb, control);
5329 	}
5330 }
5331 
5332 
5333 void
5334 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5335     struct sctp_forward_tsn_chunk *fwd,
5336     int *abort_flag, struct mbuf *m, int offset)
5337 {
5338 	/* The pr-sctp fwd tsn */
5339 	/*
5340 	 * here we will perform all the data receiver side steps for
5341 	 * processing FwdTSN, as required in by pr-sctp draft:
5342 	 *
5343 	 * Assume we get FwdTSN(x):
5344 	 *
5345 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5346 	 * others we have 3) examine and update re-ordering queue on
5347 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5348 	 * report where we are.
5349 	 */
5350 	struct sctp_association *asoc;
5351 	uint32_t new_cum_tsn, gap;
5352 	unsigned int i, fwd_sz, m_size;
5353 	uint32_t str_seq;
5354 	struct sctp_stream_in *strm;
5355 	struct sctp_queued_to_read *ctl, *sv;
5356 
5357 	asoc = &stcb->asoc;
5358 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5359 		SCTPDBG(SCTP_DEBUG_INDATA1,
5360 		    "Bad size too small/big fwd-tsn\n");
5361 		return;
5362 	}
5363 	m_size = (stcb->asoc.mapping_array_size << 3);
5364 	/*************************************************************/
5365 	/* 1. Here we update local cumTSN and shift the bitmap array */
5366 	/*************************************************************/
5367 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5368 
5369 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5370 		/* Already got there ... */
5371 		return;
5372 	}
5373 	/*
5374 	 * now we know the new TSN is more advanced, let's find the actual
5375 	 * gap
5376 	 */
5377 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5378 	asoc->cumulative_tsn = new_cum_tsn;
5379 	if (gap >= m_size) {
5380 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5381 			struct mbuf *op_err;
5382 			char msg[SCTP_DIAG_INFO_LEN];
5383 
5384 			/*
5385 			 * out of range (of single byte chunks in the rwnd I
5386 			 * give out). This must be an attacker.
5387 			 */
5388 			*abort_flag = 1;
5389 			snprintf(msg, sizeof(msg),
5390 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5391 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5392 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5393 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5394 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5395 			return;
5396 		}
5397 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5398 
5399 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5400 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5401 		asoc->highest_tsn_inside_map = new_cum_tsn;
5402 
5403 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5404 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5405 
5406 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5407 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5408 		}
5409 	} else {
5410 		SCTP_TCB_LOCK_ASSERT(stcb);
5411 		for (i = 0; i <= gap; i++) {
5412 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5413 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5414 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5415 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5416 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5417 				}
5418 			}
5419 		}
5420 	}
5421 	/*************************************************************/
5422 	/* 2. Clear up re-assembly queue                             */
5423 	/*************************************************************/
5424 
5425 	/* This is now done as part of clearing up the stream/seq */
5426 
5427 	/*******************************************************/
5428 	/* 3. Update the PR-stream re-ordering queues and fix  */
5429 	/* delivery issues as needed.                       */
5430 	/*******************************************************/
5431 	fwd_sz -= sizeof(*fwd);
5432 	if (m && fwd_sz) {
5433 		/* New method. */
5434 		unsigned int num_str;
5435 		uint32_t sequence;
5436 		uint16_t stream;
5437 		uint16_t ordered, flags;
5438 		int old;
5439 		struct sctp_strseq *stseq, strseqbuf;
5440 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5441 
5442 		offset += sizeof(*fwd);
5443 
5444 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5445 		if (asoc->idata_supported) {
5446 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5447 			old = 0;
5448 		} else {
5449 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5450 			old = 1;
5451 		}
5452 		for (i = 0; i < num_str; i++) {
5453 			if (asoc->idata_supported) {
5454 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5455 				    sizeof(struct sctp_strseq_mid),
5456 				    (uint8_t *) & strseqbuf_m);
5457 				offset += sizeof(struct sctp_strseq_mid);
5458 				if (stseq_m == NULL) {
5459 					break;
5460 				}
5461 				stream = ntohs(stseq_m->stream);
5462 				sequence = ntohl(stseq_m->msg_id);
5463 				flags = ntohs(stseq_m->flags);
5464 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5465 					ordered = 0;
5466 				} else {
5467 					ordered = 1;
5468 				}
5469 			} else {
5470 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5471 				    sizeof(struct sctp_strseq),
5472 				    (uint8_t *) & strseqbuf);
5473 				offset += sizeof(struct sctp_strseq);
5474 				if (stseq == NULL) {
5475 					break;
5476 				}
5477 				stream = ntohs(stseq->stream);
5478 				sequence = (uint32_t) ntohs(stseq->sequence);
5479 				ordered = 1;
5480 			}
5481 			/* Convert */
5482 
5483 			/* now process */
5484 
5485 			/*
5486 			 * Ok we now look for the stream/seq on the read
5487 			 * queue where its not all delivered. If we find it
5488 			 * we transmute the read entry into a PDI_ABORTED.
5489 			 */
5490 			if (stream >= asoc->streamincnt) {
5491 				/* screwed up streams, stop!  */
5492 				break;
5493 			}
5494 			if ((asoc->str_of_pdapi == stream) &&
5495 			    (asoc->ssn_of_pdapi == sequence)) {
5496 				/*
5497 				 * If this is the one we were partially
5498 				 * delivering now then we no longer are.
5499 				 * Note this will change with the reassembly
5500 				 * re-write.
5501 				 */
5502 				asoc->fragmented_delivery_inprogress = 0;
5503 			}
5504 			strm = &asoc->strmin[stream];
5505 			sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence, ordered, old);
5506 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5507 				if ((ctl->sinfo_stream == stream) &&
5508 				    (ctl->sinfo_ssn == sequence)) {
5509 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5510 					ctl->pdapi_aborted = 1;
5511 					sv = stcb->asoc.control_pdapi;
5512 					ctl->end_added = 1;
5513 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5514 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5515 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5516 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5517 #ifdef INVARIANTS
5518 					} else if (ctl->on_strm_q) {
5519 						panic("strm: %p ctl: %p unknown %d",
5520 						    strm, ctl, ctl->on_strm_q);
5521 #endif
5522 					}
5523 					ctl->on_strm_q = 0;
5524 					stcb->asoc.control_pdapi = ctl;
5525 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5526 					    stcb,
5527 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5528 					    (void *)&str_seq,
5529 					    SCTP_SO_NOT_LOCKED);
5530 					stcb->asoc.control_pdapi = sv;
5531 					break;
5532 				} else if ((ctl->sinfo_stream == stream) &&
5533 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5534 					/* We are past our victim SSN */
5535 					break;
5536 				}
5537 			}
5538 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5539 				/* Update the sequence number */
5540 				strm->last_sequence_delivered = sequence;
5541 			}
5542 			/* now kick the stream the new way */
5543 			/* sa_ignore NO_NULL_CHK */
5544 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5545 		}
5546 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5547 	}
5548 	/*
5549 	 * Now slide thing forward.
5550 	 */
5551 	sctp_slide_mapping_arrays(stcb);
5552 }
5553