xref: /freebsd/sys/netinet/sctp_indata.c (revision 7f9dff23d3092aa33ad45b2b63e52469b3c13a6e)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t sid,
134     uint32_t mid, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = sid;
145 	read_queue_e->sinfo_flags = (flags << 8);
146 	read_queue_e->sinfo_ppid = ppid;
147 	read_queue_e->sinfo_context = context;
148 	read_queue_e->sinfo_tsn = tsn;
149 	read_queue_e->sinfo_cumtsn = tsn;
150 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 	read_queue_e->mid = mid;
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/* Only one stream can be here in old style
345 				 * -- abort */
346 				return (-1);
347 			}
348 			TAILQ_INSERT_TAIL(q, control, next_instrm);
349 			control->on_strm_q = SCTP_ON_UNORDERED;
350 			return (0);
351 		}
352 	} else {
353 		q = &strm->inqueue;
354 	}
355 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
357 	}
358 	if (TAILQ_EMPTY(q)) {
359 		/* Empty queue */
360 		TAILQ_INSERT_HEAD(q, control, next_instrm);
361 		if (unordered) {
362 			control->on_strm_q = SCTP_ON_UNORDERED;
363 		} else {
364 			control->on_strm_q = SCTP_ON_ORDERED;
365 		}
366 		return (0);
367 	} else {
368 		TAILQ_FOREACH(at, q, next_instrm) {
369 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
370 				/*
371 				 * one in queue is bigger than the new one,
372 				 * insert before this one
373 				 */
374 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
375 				if (unordered) {
376 					control->on_strm_q = SCTP_ON_UNORDERED;
377 				} else {
378 					control->on_strm_q = SCTP_ON_ORDERED;
379 				}
380 				break;
381 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
382 				/*
383 				 * Gak, He sent me a duplicate msg id
384 				 * number?? return -1 to abort.
385 				 */
386 				return (-1);
387 			} else {
388 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
389 					/*
390 					 * We are at the end, insert it
391 					 * after this one
392 					 */
393 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
394 						sctp_log_strm_del(control, at,
395 						    SCTP_STR_LOG_FROM_INSERT_TL);
396 					}
397 					TAILQ_INSERT_AFTER(q,
398 					    at, control, next_instrm);
399 					if (unordered) {
400 						control->on_strm_q = SCTP_ON_UNORDERED;
401 					} else {
402 						control->on_strm_q = SCTP_ON_ORDERED;
403 					}
404 					break;
405 				}
406 			}
407 		}
408 	}
409 	return (0);
410 }
411 
412 static void
413 sctp_abort_in_reasm(struct sctp_tcb *stcb,
414     struct sctp_queued_to_read *control,
415     struct sctp_tmit_chunk *chk,
416     int *abort_flag, int opspot)
417 {
418 	char msg[SCTP_DIAG_INFO_LEN];
419 	struct mbuf *oper;
420 
421 	if (stcb->asoc.idata_supported) {
422 		snprintf(msg, sizeof(msg),
423 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
424 		    opspot,
425 		    control->fsn_included,
426 		    chk->rec.data.tsn,
427 		    chk->rec.data.sid,
428 		    chk->rec.data.fsn, chk->rec.data.mid);
429 	} else {
430 		snprintf(msg, sizeof(msg),
431 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
432 		    opspot,
433 		    control->fsn_included,
434 		    chk->rec.data.tsn,
435 		    chk->rec.data.sid,
436 		    chk->rec.data.fsn,
437 		    (uint16_t) chk->rec.data.mid);
438 	}
439 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
440 	sctp_m_freem(chk->data);
441 	chk->data = NULL;
442 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
443 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
444 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
445 	*abort_flag = 1;
446 }
447 
448 static void
449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
450 {
451 	/*
452 	 * The control could not be placed and must be cleaned.
453 	 */
454 	struct sctp_tmit_chunk *chk, *nchk;
455 
456 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
457 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
458 		if (chk->data)
459 			sctp_m_freem(chk->data);
460 		chk->data = NULL;
461 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
462 	}
463 	sctp_free_a_readq(stcb, control);
464 }
465 
466 /*
467  * Queue the chunk either right into the socket buffer if it is the next one
468  * to go OR put it in the correct place in the delivery queue.  If we do
469  * append to the so_buf, keep doing so until we are out of order as
470  * long as the control's entered are non-fragmented.
471  */
472 static void
473 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
474     struct sctp_stream_in *strm,
475     struct sctp_association *asoc,
476     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
477 {
478 	/*
479 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
480 	 * all the data in one stream this could happen quite rapidly. One
481 	 * could use the TSN to keep track of things, but this scheme breaks
482 	 * down in the other type of stream usage that could occur. Send a
483 	 * single msg to stream 0, send 4Billion messages to stream 1, now
484 	 * send a message to stream 0. You have a situation where the TSN
485 	 * has wrapped but not in the stream. Is this worth worrying about
486 	 * or should we just change our queue sort at the bottom to be by
487 	 * TSN.
488 	 *
489 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
490 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
491 	 * assignment this could happen... and I don't see how this would be
492 	 * a violation. So for now I am undecided an will leave the sort by
493 	 * SSN alone. Maybe a hybred approach is the answer
494 	 *
495 	 */
496 	struct sctp_queued_to_read *at;
497 	int queue_needed;
498 	uint32_t nxt_todel;
499 	struct mbuf *op_err;
500 	char msg[SCTP_DIAG_INFO_LEN];
501 
502 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
503 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
504 	}
505 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
506 		/* The incoming sseq is behind where we last delivered? */
507 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
508 		    control->mid, strm->last_mid_delivered);
509 protocol_error:
510 		/*
511 		 * throw it in the stream so it gets cleaned up in
512 		 * association destruction
513 		 */
514 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
515 		if (asoc->idata_supported) {
516 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
517 			    strm->last_mid_delivered, control->sinfo_tsn,
518 			    control->sinfo_stream, control->mid);
519 		} else {
520 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
521 			    (uint16_t) strm->last_mid_delivered,
522 			    control->sinfo_tsn,
523 			    control->sinfo_stream,
524 			    (uint16_t) control->mid);
525 		}
526 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
527 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
528 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
529 		*abort_flag = 1;
530 		return;
531 
532 	}
533 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
534 		goto protocol_error;
535 	}
536 	queue_needed = 1;
537 	asoc->size_on_all_streams += control->length;
538 	sctp_ucount_incr(asoc->cnt_on_all_streams);
539 	nxt_todel = strm->last_mid_delivered + 1;
540 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
541 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
542 		struct socket *so;
543 
544 		so = SCTP_INP_SO(stcb->sctp_ep);
545 		atomic_add_int(&stcb->asoc.refcnt, 1);
546 		SCTP_TCB_UNLOCK(stcb);
547 		SCTP_SOCKET_LOCK(so, 1);
548 		SCTP_TCB_LOCK(stcb);
549 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
550 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
551 			SCTP_SOCKET_UNLOCK(so, 1);
552 			return;
553 		}
554 #endif
555 		/* can be delivered right away? */
556 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
557 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
558 		}
559 		/* EY it wont be queued if it could be delivered directly */
560 		queue_needed = 0;
561 		asoc->size_on_all_streams -= control->length;
562 		sctp_ucount_decr(asoc->cnt_on_all_streams);
563 		strm->last_mid_delivered++;
564 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
565 		sctp_add_to_readq(stcb->sctp_ep, stcb,
566 		    control,
567 		    &stcb->sctp_socket->so_rcv, 1,
568 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
569 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
570 			/* all delivered */
571 			nxt_todel = strm->last_mid_delivered + 1;
572 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
573 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
574 				asoc->size_on_all_streams -= control->length;
575 				sctp_ucount_decr(asoc->cnt_on_all_streams);
576 				if (control->on_strm_q == SCTP_ON_ORDERED) {
577 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
578 #ifdef INVARIANTS
579 				} else {
580 					panic("Huh control: %p is on_strm_q: %d",
581 					    control, control->on_strm_q);
582 #endif
583 				}
584 				control->on_strm_q = 0;
585 				strm->last_mid_delivered++;
586 				/*
587 				 * We ignore the return of deliver_data here
588 				 * since we always can hold the chunk on the
589 				 * d-queue. And we have a finite number that
590 				 * can be delivered from the strq.
591 				 */
592 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
593 					sctp_log_strm_del(control, NULL,
594 					    SCTP_STR_LOG_FROM_IMMED_DEL);
595 				}
596 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
597 				sctp_add_to_readq(stcb->sctp_ep, stcb,
598 				    control,
599 				    &stcb->sctp_socket->so_rcv, 1,
600 				    SCTP_READ_LOCK_NOT_HELD,
601 				    SCTP_SO_LOCKED);
602 				continue;
603 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
604 				*need_reasm = 1;
605 			}
606 			break;
607 		}
608 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
609 		SCTP_SOCKET_UNLOCK(so, 1);
610 #endif
611 	}
612 	if (queue_needed) {
613 		/*
614 		 * Ok, we did not deliver this guy, find the correct place
615 		 * to put it on the queue.
616 		 */
617 		if (sctp_place_control_in_stream(strm, asoc, control)) {
618 			snprintf(msg, sizeof(msg),
619 			    "Queue to str MID: %u duplicate",
620 			    control->mid);
621 			sctp_clean_up_control(stcb, control);
622 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
623 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
624 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
625 			*abort_flag = 1;
626 		}
627 	}
628 }
629 
630 
631 static void
632 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
633 {
634 	struct mbuf *m, *prev = NULL;
635 	struct sctp_tcb *stcb;
636 
637 	stcb = control->stcb;
638 	control->held_length = 0;
639 	control->length = 0;
640 	m = control->data;
641 	while (m) {
642 		if (SCTP_BUF_LEN(m) == 0) {
643 			/* Skip mbufs with NO length */
644 			if (prev == NULL) {
645 				/* First one */
646 				control->data = sctp_m_free(m);
647 				m = control->data;
648 			} else {
649 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
650 				m = SCTP_BUF_NEXT(prev);
651 			}
652 			if (m == NULL) {
653 				control->tail_mbuf = prev;
654 			}
655 			continue;
656 		}
657 		prev = m;
658 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
659 		if (control->on_read_q) {
660 			/*
661 			 * On read queue so we must increment the SB stuff,
662 			 * we assume caller has done any locks of SB.
663 			 */
664 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
665 		}
666 		m = SCTP_BUF_NEXT(m);
667 	}
668 	if (prev) {
669 		control->tail_mbuf = prev;
670 	}
671 }
672 
673 static void
674 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
675 {
676 	struct mbuf *prev = NULL;
677 	struct sctp_tcb *stcb;
678 
679 	stcb = control->stcb;
680 	if (stcb == NULL) {
681 #ifdef INVARIANTS
682 		panic("Control broken");
683 #else
684 		return;
685 #endif
686 	}
687 	if (control->tail_mbuf == NULL) {
688 		/* TSNH */
689 		control->data = m;
690 		sctp_setup_tail_pointer(control);
691 		return;
692 	}
693 	control->tail_mbuf->m_next = m;
694 	while (m) {
695 		if (SCTP_BUF_LEN(m) == 0) {
696 			/* Skip mbufs with NO length */
697 			if (prev == NULL) {
698 				/* First one */
699 				control->tail_mbuf->m_next = sctp_m_free(m);
700 				m = control->tail_mbuf->m_next;
701 			} else {
702 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
703 				m = SCTP_BUF_NEXT(prev);
704 			}
705 			if (m == NULL) {
706 				control->tail_mbuf = prev;
707 			}
708 			continue;
709 		}
710 		prev = m;
711 		if (control->on_read_q) {
712 			/*
713 			 * On read queue so we must increment the SB stuff,
714 			 * we assume caller has done any locks of SB.
715 			 */
716 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
717 		}
718 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
719 		m = SCTP_BUF_NEXT(m);
720 	}
721 	if (prev) {
722 		control->tail_mbuf = prev;
723 	}
724 }
725 
726 static void
727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
728 {
729 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
730 	nc->sinfo_stream = control->sinfo_stream;
731 	nc->mid = control->mid;
732 	TAILQ_INIT(&nc->reasm);
733 	nc->top_fsn = control->top_fsn;
734 	nc->mid = control->mid;
735 	nc->sinfo_flags = control->sinfo_flags;
736 	nc->sinfo_ppid = control->sinfo_ppid;
737 	nc->sinfo_context = control->sinfo_context;
738 	nc->fsn_included = 0xffffffff;
739 	nc->sinfo_tsn = control->sinfo_tsn;
740 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
741 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
742 	nc->whoFrom = control->whoFrom;
743 	atomic_add_int(&nc->whoFrom->ref_count, 1);
744 	nc->stcb = control->stcb;
745 	nc->port_from = control->port_from;
746 }
747 
748 static void
749 sctp_reset_a_control(struct sctp_queued_to_read *control,
750     struct sctp_inpcb *inp, uint32_t tsn)
751 {
752 	control->fsn_included = tsn;
753 	if (control->on_read_q) {
754 		/*
755 		 * We have to purge it from there, hopefully this will work
756 		 * :-)
757 		 */
758 		TAILQ_REMOVE(&inp->read_queue, control, next);
759 		control->on_read_q = 0;
760 	}
761 }
762 
763 static int
764 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
765     struct sctp_association *asoc,
766     struct sctp_stream_in *strm,
767     struct sctp_queued_to_read *control,
768     uint32_t pd_point,
769     int inp_read_lock_held)
770 {
771 	/*
772 	 * Special handling for the old un-ordered data chunk. All the
773 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
774 	 * to see if we have it all. If you return one, no other control
775 	 * entries on the un-ordered queue will be looked at. In theory
776 	 * there should be no others entries in reality, unless the guy is
777 	 * sending both unordered NDATA and unordered DATA...
778 	 */
779 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
780 	uint32_t fsn;
781 	struct sctp_queued_to_read *nc;
782 	int cnt_added;
783 
784 	if (control->first_frag_seen == 0) {
785 		/* Nothing we can do, we have not seen the first piece yet */
786 		return (1);
787 	}
788 	/* Collapse any we can */
789 	cnt_added = 0;
790 restart:
791 	fsn = control->fsn_included + 1;
792 	/* Now what can we add? */
793 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
794 		if (chk->rec.data.fsn == fsn) {
795 			/* Ok lets add it */
796 			sctp_alloc_a_readq(stcb, nc);
797 			if (nc == NULL) {
798 				break;
799 			}
800 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
801 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
802 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
803 			fsn++;
804 			cnt_added++;
805 			chk = NULL;
806 			if (control->end_added) {
807 				/* We are done */
808 				if (!TAILQ_EMPTY(&control->reasm)) {
809 					/*
810 					 * Ok we have to move anything left
811 					 * on the control queue to a new
812 					 * control.
813 					 */
814 					sctp_build_readq_entry_from_ctl(nc, control);
815 					tchk = TAILQ_FIRST(&control->reasm);
816 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
817 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
818 						asoc->size_on_reasm_queue -= tchk->send_size;
819 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
820 						nc->first_frag_seen = 1;
821 						nc->fsn_included = tchk->rec.data.fsn;
822 						nc->data = tchk->data;
823 						nc->sinfo_ppid = tchk->rec.data.ppid;
824 						nc->sinfo_tsn = tchk->rec.data.tsn;
825 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
826 						tchk->data = NULL;
827 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
828 						sctp_setup_tail_pointer(nc);
829 						tchk = TAILQ_FIRST(&control->reasm);
830 					}
831 					/* Spin the rest onto the queue */
832 					while (tchk) {
833 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
834 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
835 						tchk = TAILQ_FIRST(&control->reasm);
836 					}
837 					/* Now lets add it to the queue
838 					 * after removing control */
839 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
840 					nc->on_strm_q = SCTP_ON_UNORDERED;
841 					if (control->on_strm_q) {
842 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
843 						control->on_strm_q = 0;
844 					}
845 				}
846 				if (control->pdapi_started) {
847 					strm->pd_api_started = 0;
848 					control->pdapi_started = 0;
849 				}
850 				if (control->on_strm_q) {
851 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
852 					control->on_strm_q = 0;
853 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
854 				}
855 				if (control->on_read_q == 0) {
856 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
857 					    &stcb->sctp_socket->so_rcv, control->end_added,
858 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
859 				}
860 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
861 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
862 					/* Switch to the new guy and
863 					 * continue */
864 					control = nc;
865 					goto restart;
866 				} else {
867 					if (nc->on_strm_q == 0) {
868 						sctp_free_a_readq(stcb, nc);
869 					}
870 				}
871 				return (1);
872 			} else {
873 				sctp_free_a_readq(stcb, nc);
874 			}
875 		} else {
876 			/* Can't add more */
877 			break;
878 		}
879 	}
880 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
881 		strm->pd_api_started = 1;
882 		control->pdapi_started = 1;
883 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
884 		    &stcb->sctp_socket->so_rcv, control->end_added,
885 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
886 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
887 		return (0);
888 	} else {
889 		return (1);
890 	}
891 }
892 
893 static void
894 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
895     struct sctp_association *asoc,
896     struct sctp_queued_to_read *control,
897     struct sctp_tmit_chunk *chk,
898     int *abort_flag)
899 {
900 	struct sctp_tmit_chunk *at;
901 	int inserted;
902 
903 	/*
904 	 * Here we need to place the chunk into the control structure sorted
905 	 * in the correct order.
906 	 */
907 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
908 		/* Its the very first one. */
909 		SCTPDBG(SCTP_DEBUG_XXX,
910 		    "chunk is a first fsn: %u becomes fsn_included\n",
911 		    chk->rec.data.fsn);
912 		if (control->first_frag_seen) {
913 			/*
914 			 * In old un-ordered we can reassembly on one
915 			 * control multiple messages. As long as the next
916 			 * FIRST is greater then the old first (TSN i.e. FSN
917 			 * wise)
918 			 */
919 			struct mbuf *tdata;
920 			uint32_t tmp;
921 
922 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
923 				/* Easy way the start of a new guy beyond
924 				 * the lowest */
925 				goto place_chunk;
926 			}
927 			if ((chk->rec.data.fsn == control->fsn_included) ||
928 			    (control->pdapi_started)) {
929 				/*
930 				 * Ok this should not happen, if it does we
931 				 * started the pd-api on the higher TSN
932 				 * (since the equals part is a TSN failure
933 				 * it must be that).
934 				 *
935 				 * We are completly hosed in that case since
936 				 * I have no way to recover. This really
937 				 * will only happen if we can get more TSN's
938 				 * higher before the pd-api-point.
939 				 */
940 				sctp_abort_in_reasm(stcb, control, chk,
941 				    abort_flag,
942 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
943 
944 				return;
945 			}
946 			/*
947 			 * Ok we have two firsts and the one we just got is
948 			 * smaller than the one we previously placed.. yuck!
949 			 * We must swap them out.
950 			 */
951 			/* swap the mbufs */
952 			tdata = control->data;
953 			control->data = chk->data;
954 			chk->data = tdata;
955 			/* Save the lengths */
956 			chk->send_size = control->length;
957 			/* Recompute length of control and tail pointer */
958 			sctp_setup_tail_pointer(control);
959 			/* Fix the FSN included */
960 			tmp = control->fsn_included;
961 			control->fsn_included = chk->rec.data.fsn;
962 			chk->rec.data.fsn = tmp;
963 			/* Fix the TSN included */
964 			tmp = control->sinfo_tsn;
965 			control->sinfo_tsn = chk->rec.data.tsn;
966 			chk->rec.data.tsn = tmp;
967 			/* Fix the PPID included */
968 			tmp = control->sinfo_ppid;
969 			control->sinfo_ppid = chk->rec.data.ppid;
970 			chk->rec.data.ppid = tmp;
971 			/* Fix tail pointer */
972 			goto place_chunk;
973 		}
974 		control->first_frag_seen = 1;
975 		control->top_fsn = control->fsn_included = chk->rec.data.fsn;
976 		control->sinfo_tsn = chk->rec.data.tsn;
977 		control->sinfo_ppid = chk->rec.data.ppid;
978 		control->data = chk->data;
979 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
980 		chk->data = NULL;
981 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
982 		sctp_setup_tail_pointer(control);
983 		return;
984 	}
985 place_chunk:
986 	inserted = 0;
987 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
988 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
989 			/*
990 			 * This one in queue is bigger than the new one,
991 			 * insert the new one before at.
992 			 */
993 			asoc->size_on_reasm_queue += chk->send_size;
994 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
995 			inserted = 1;
996 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
997 			break;
998 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
999 			/*
1000 			 * They sent a duplicate fsn number. This really
1001 			 * should not happen since the FSN is a TSN and it
1002 			 * should have been dropped earlier.
1003 			 */
1004 			sctp_abort_in_reasm(stcb, control, chk,
1005 			    abort_flag,
1006 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1007 			return;
1008 		}
1009 	}
1010 	if (inserted == 0) {
1011 		/* Its at the end */
1012 		asoc->size_on_reasm_queue += chk->send_size;
1013 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1014 		control->top_fsn = chk->rec.data.fsn;
1015 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1016 	}
1017 }
1018 
1019 static int
1020 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1021     struct sctp_stream_in *strm, int inp_read_lock_held)
1022 {
1023 	/*
1024 	 * Given a stream, strm, see if any of the SSN's on it that are
1025 	 * fragmented are ready to deliver. If so go ahead and place them on
1026 	 * the read queue. In so placing if we have hit the end, then we
1027 	 * need to remove them from the stream's queue.
1028 	 */
1029 	struct sctp_queued_to_read *control, *nctl = NULL;
1030 	uint32_t next_to_del;
1031 	uint32_t pd_point;
1032 	int ret = 0;
1033 
1034 	if (stcb->sctp_socket) {
1035 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1036 		    stcb->sctp_ep->partial_delivery_point);
1037 	} else {
1038 		pd_point = stcb->sctp_ep->partial_delivery_point;
1039 	}
1040 	control = TAILQ_FIRST(&strm->uno_inqueue);
1041 
1042 	if ((control) &&
1043 	    (asoc->idata_supported == 0)) {
1044 		/* Special handling needed for "old" data format */
1045 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1046 			goto done_un;
1047 		}
1048 	}
1049 	if (strm->pd_api_started) {
1050 		/* Can't add more */
1051 		return (0);
1052 	}
1053 	while (control) {
1054 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1055 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1056 		nctl = TAILQ_NEXT(control, next_instrm);
1057 		if (control->end_added) {
1058 			/* We just put the last bit on */
1059 			if (control->on_strm_q) {
1060 #ifdef INVARIANTS
1061 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1062 					panic("Huh control: %p on_q: %d -- not unordered?",
1063 					    control, control->on_strm_q);
1064 				}
1065 #endif
1066 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1067 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1068 				control->on_strm_q = 0;
1069 			}
1070 			if (control->on_read_q == 0) {
1071 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1072 				    control,
1073 				    &stcb->sctp_socket->so_rcv, control->end_added,
1074 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1075 			}
1076 		} else {
1077 			/* Can we do a PD-API for this un-ordered guy? */
1078 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1079 				strm->pd_api_started = 1;
1080 				control->pdapi_started = 1;
1081 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1082 				    control,
1083 				    &stcb->sctp_socket->so_rcv, control->end_added,
1084 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1085 
1086 				break;
1087 			}
1088 		}
1089 		control = nctl;
1090 	}
1091 done_un:
1092 	control = TAILQ_FIRST(&strm->inqueue);
1093 	if (strm->pd_api_started) {
1094 		/* Can't add more */
1095 		return (0);
1096 	}
1097 	if (control == NULL) {
1098 		return (ret);
1099 	}
1100 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1101 		/*
1102 		 * Ok the guy at the top was being partially delivered
1103 		 * completed, so we remove it. Note the pd_api flag was
1104 		 * taken off when the chunk was merged on in
1105 		 * sctp_queue_data_for_reasm below.
1106 		 */
1107 		nctl = TAILQ_NEXT(control, next_instrm);
1108 		SCTPDBG(SCTP_DEBUG_XXX,
1109 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1110 		    control, control->end_added, control->mid,
1111 		    control->top_fsn, control->fsn_included,
1112 		    strm->last_mid_delivered);
1113 		if (control->end_added) {
1114 			if (control->on_strm_q) {
1115 #ifdef INVARIANTS
1116 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1117 					panic("Huh control: %p on_q: %d -- not ordered?",
1118 					    control, control->on_strm_q);
1119 				}
1120 #endif
1121 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1122 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1123 				control->on_strm_q = 0;
1124 			}
1125 			if (strm->pd_api_started && control->pdapi_started) {
1126 				control->pdapi_started = 0;
1127 				strm->pd_api_started = 0;
1128 			}
1129 			if (control->on_read_q == 0) {
1130 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1131 				    control,
1132 				    &stcb->sctp_socket->so_rcv, control->end_added,
1133 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1134 			}
1135 			control = nctl;
1136 		}
1137 	}
1138 	if (strm->pd_api_started) {
1139 		/* Can't add more must have gotten an un-ordered above being
1140 		 * partially delivered. */
1141 		return (0);
1142 	}
1143 deliver_more:
1144 	next_to_del = strm->last_mid_delivered + 1;
1145 	if (control) {
1146 		SCTPDBG(SCTP_DEBUG_XXX,
1147 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1148 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1149 		    next_to_del);
1150 		nctl = TAILQ_NEXT(control, next_instrm);
1151 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1152 		    (control->first_frag_seen)) {
1153 			int done;
1154 
1155 			/* Ok we can deliver it onto the stream. */
1156 			if (control->end_added) {
1157 				/* We are done with it afterwards */
1158 				if (control->on_strm_q) {
1159 #ifdef INVARIANTS
1160 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1161 						panic("Huh control: %p on_q: %d -- not ordered?",
1162 						    control, control->on_strm_q);
1163 					}
1164 #endif
1165 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1166 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1167 					control->on_strm_q = 0;
1168 				}
1169 				ret++;
1170 			}
1171 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1172 				/* A singleton now slipping through - mark
1173 				 * it non-revokable too */
1174 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1175 			} else if (control->end_added == 0) {
1176 				/* Check if we can defer adding until its
1177 				 * all there */
1178 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1179 					/* Don't need it or cannot add more
1180 					 * (one being delivered that way) */
1181 					goto out;
1182 				}
1183 			}
1184 			done = (control->end_added) && (control->last_frag_seen);
1185 			if (control->on_read_q == 0) {
1186 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1187 				    control,
1188 				    &stcb->sctp_socket->so_rcv, control->end_added,
1189 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1190 			}
1191 			strm->last_mid_delivered = next_to_del;
1192 			if (done) {
1193 				control = nctl;
1194 				goto deliver_more;
1195 			} else {
1196 				/* We are now doing PD API */
1197 				strm->pd_api_started = 1;
1198 				control->pdapi_started = 1;
1199 			}
1200 		}
1201 	}
1202 out:
1203 	return (ret);
1204 }
1205 
1206 
1207 void
1208 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1209     struct sctp_stream_in *strm,
1210     struct sctp_tcb *stcb, struct sctp_association *asoc,
1211     struct sctp_tmit_chunk *chk, int hold_rlock)
1212 {
1213 	/*
1214 	 * Given a control and a chunk, merge the data from the chk onto the
1215 	 * control and free up the chunk resources.
1216 	 */
1217 	int i_locked = 0;
1218 
1219 	if (control->on_read_q && (hold_rlock == 0)) {
1220 		/*
1221 		 * Its being pd-api'd so we must do some locks.
1222 		 */
1223 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1224 		i_locked = 1;
1225 	}
1226 	if (control->data == NULL) {
1227 		control->data = chk->data;
1228 		sctp_setup_tail_pointer(control);
1229 	} else {
1230 		sctp_add_to_tail_pointer(control, chk->data);
1231 	}
1232 	control->fsn_included = chk->rec.data.fsn;
1233 	asoc->size_on_reasm_queue -= chk->send_size;
1234 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1235 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1236 	chk->data = NULL;
1237 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1238 		control->first_frag_seen = 1;
1239 	}
1240 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1241 		/* Its complete */
1242 		if ((control->on_strm_q) && (control->on_read_q)) {
1243 			if (control->pdapi_started) {
1244 				control->pdapi_started = 0;
1245 				strm->pd_api_started = 0;
1246 			}
1247 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1248 				/* Unordered */
1249 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1250 				control->on_strm_q = 0;
1251 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1252 				/* Ordered */
1253 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1254 				control->on_strm_q = 0;
1255 #ifdef INVARIANTS
1256 			} else if (control->on_strm_q) {
1257 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1258 				    control->on_strm_q);
1259 #endif
1260 			}
1261 		}
1262 		control->end_added = 1;
1263 		control->last_frag_seen = 1;
1264 	}
1265 	if (i_locked) {
1266 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1267 	}
1268 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1269 }
1270 
1271 /*
1272  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1273  * queue, see if anthing can be delivered. If so pull it off (or as much as
1274  * we can. If we run out of space then we must dump what we can and set the
1275  * appropriate flag to say we queued what we could.
1276  */
1277 static void
1278 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1279     struct sctp_stream_in *strm,
1280     struct sctp_queued_to_read *control,
1281     struct sctp_tmit_chunk *chk,
1282     int created_control,
1283     int *abort_flag, uint32_t tsn)
1284 {
1285 	uint32_t next_fsn;
1286 	struct sctp_tmit_chunk *at, *nat;
1287 	int do_wakeup, unordered;
1288 
1289 	/*
1290 	 * For old un-ordered data chunks.
1291 	 */
1292 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1293 		unordered = 1;
1294 	} else {
1295 		unordered = 0;
1296 	}
1297 	/* Must be added to the stream-in queue */
1298 	if (created_control) {
1299 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1300 			/* Duplicate SSN? */
1301 			sctp_clean_up_control(stcb, control);
1302 			sctp_abort_in_reasm(stcb, control, chk,
1303 			    abort_flag,
1304 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1305 			return;
1306 		}
1307 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1308 			/*
1309 			 * Ok we created this control and now lets validate
1310 			 * that its legal i.e. there is a B bit set, if not
1311 			 * and we have up to the cum-ack then its invalid.
1312 			 */
1313 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1314 				sctp_abort_in_reasm(stcb, control, chk,
1315 				    abort_flag,
1316 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1317 				return;
1318 			}
1319 		}
1320 	}
1321 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1322 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1323 		return;
1324 	}
1325 	/*
1326 	 * Ok we must queue the chunk into the reasembly portion: o if its
1327 	 * the first it goes to the control mbuf. o if its not first but the
1328 	 * next in sequence it goes to the control, and each succeeding one
1329 	 * in order also goes. o if its not in order we place it on the list
1330 	 * in its place.
1331 	 */
1332 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1333 		/* Its the very first one. */
1334 		SCTPDBG(SCTP_DEBUG_XXX,
1335 		    "chunk is a first fsn: %u becomes fsn_included\n",
1336 		    chk->rec.data.fsn);
1337 		if (control->first_frag_seen) {
1338 			/*
1339 			 * Error on senders part, they either sent us two
1340 			 * data chunks with FIRST, or they sent two
1341 			 * un-ordered chunks that were fragmented at the
1342 			 * same time in the same stream.
1343 			 */
1344 			sctp_abort_in_reasm(stcb, control, chk,
1345 			    abort_flag,
1346 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1347 			return;
1348 		}
1349 		control->first_frag_seen = 1;
1350 		control->fsn_included = chk->rec.data.fsn;
1351 		control->data = chk->data;
1352 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1353 		chk->data = NULL;
1354 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1355 		sctp_setup_tail_pointer(control);
1356 	} else {
1357 		/* Place the chunk in our list */
1358 		int inserted = 0;
1359 
1360 		if (control->last_frag_seen == 0) {
1361 			/* Still willing to raise highest FSN seen */
1362 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1363 				SCTPDBG(SCTP_DEBUG_XXX,
1364 				    "We have a new top_fsn: %u\n",
1365 				    chk->rec.data.fsn);
1366 				control->top_fsn = chk->rec.data.fsn;
1367 			}
1368 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1369 				SCTPDBG(SCTP_DEBUG_XXX,
1370 				    "The last fsn is now in place fsn: %u\n",
1371 				    chk->rec.data.fsn);
1372 				control->last_frag_seen = 1;
1373 			}
1374 			if (asoc->idata_supported || control->first_frag_seen) {
1375 				/*
1376 				 * For IDATA we always check since we know
1377 				 * that the first fragment is 0. For old
1378 				 * DATA we have to receive the first before
1379 				 * we know the first FSN (which is the TSN).
1380 				 */
1381 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1382 					/* We have already delivered up to
1383 					 * this so its a dup */
1384 					sctp_abort_in_reasm(stcb, control, chk,
1385 					    abort_flag,
1386 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1387 					return;
1388 				}
1389 			}
1390 		} else {
1391 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1392 				/* Second last? huh? */
1393 				SCTPDBG(SCTP_DEBUG_XXX,
1394 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1395 				    chk->rec.data.fsn, control->top_fsn);
1396 				sctp_abort_in_reasm(stcb, control,
1397 				    chk, abort_flag,
1398 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1399 				return;
1400 			}
1401 			if (asoc->idata_supported || control->first_frag_seen) {
1402 				/*
1403 				 * For IDATA we always check since we know
1404 				 * that the first fragment is 0. For old
1405 				 * DATA we have to receive the first before
1406 				 * we know the first FSN (which is the TSN).
1407 				 */
1408 
1409 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1410 					/* We have already delivered up to
1411 					 * this so its a dup */
1412 					SCTPDBG(SCTP_DEBUG_XXX,
1413 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1414 					    chk->rec.data.fsn, control->fsn_included);
1415 					sctp_abort_in_reasm(stcb, control, chk,
1416 					    abort_flag,
1417 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1418 					return;
1419 				}
1420 			}
1421 			/* validate not beyond top FSN if we have seen last
1422 			 * one */
1423 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1424 				SCTPDBG(SCTP_DEBUG_XXX,
1425 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1426 				    chk->rec.data.fsn,
1427 				    control->top_fsn);
1428 				sctp_abort_in_reasm(stcb, control, chk,
1429 				    abort_flag,
1430 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1431 				return;
1432 			}
1433 		}
1434 		/*
1435 		 * If we reach here, we need to place the new chunk in the
1436 		 * reassembly for this control.
1437 		 */
1438 		SCTPDBG(SCTP_DEBUG_XXX,
1439 		    "chunk is a not first fsn: %u needs to be inserted\n",
1440 		    chk->rec.data.fsn);
1441 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1442 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1443 				/*
1444 				 * This one in queue is bigger than the new
1445 				 * one, insert the new one before at.
1446 				 */
1447 				SCTPDBG(SCTP_DEBUG_XXX,
1448 				    "Insert it before fsn: %u\n",
1449 				    at->rec.data.fsn);
1450 				asoc->size_on_reasm_queue += chk->send_size;
1451 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1452 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1453 				inserted = 1;
1454 				break;
1455 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1456 				/* Gak, He sent me a duplicate str seq
1457 				 * number */
1458 				/*
1459 				 * foo bar, I guess I will just free this
1460 				 * new guy, should we abort too? FIX ME
1461 				 * MAYBE? Or it COULD be that the SSN's have
1462 				 * wrapped. Maybe I should compare to TSN
1463 				 * somehow... sigh for now just blow away
1464 				 * the chunk!
1465 				 */
1466 				SCTPDBG(SCTP_DEBUG_XXX,
1467 				    "Duplicate to fsn: %u -- abort\n",
1468 				    at->rec.data.fsn);
1469 				sctp_abort_in_reasm(stcb, control,
1470 				    chk, abort_flag,
1471 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1472 				return;
1473 			}
1474 		}
1475 		if (inserted == 0) {
1476 			/* Goes on the end */
1477 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1478 			    chk->rec.data.fsn);
1479 			asoc->size_on_reasm_queue += chk->send_size;
1480 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1481 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1482 		}
1483 	}
1484 	/*
1485 	 * Ok lets see if we can suck any up into the control structure that
1486 	 * are in seq if it makes sense.
1487 	 */
1488 	do_wakeup = 0;
1489 	/*
1490 	 * If the first fragment has not been seen there is no sense in
1491 	 * looking.
1492 	 */
1493 	if (control->first_frag_seen) {
1494 		next_fsn = control->fsn_included + 1;
1495 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1496 			if (at->rec.data.fsn == next_fsn) {
1497 				/* We can add this one now to the control */
1498 				SCTPDBG(SCTP_DEBUG_XXX,
1499 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1500 				    control, at,
1501 				    at->rec.data.fsn,
1502 				    next_fsn, control->fsn_included);
1503 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1504 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1505 				if (control->on_read_q) {
1506 					do_wakeup = 1;
1507 				}
1508 				next_fsn++;
1509 				if (control->end_added && control->pdapi_started) {
1510 					if (strm->pd_api_started) {
1511 						strm->pd_api_started = 0;
1512 						control->pdapi_started = 0;
1513 					}
1514 					if (control->on_read_q == 0) {
1515 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1516 						    control,
1517 						    &stcb->sctp_socket->so_rcv, control->end_added,
1518 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1519 						do_wakeup = 1;
1520 					}
1521 					break;
1522 				}
1523 			} else {
1524 				break;
1525 			}
1526 		}
1527 	}
1528 	if (do_wakeup) {
1529 		/* Need to wakeup the reader */
1530 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1531 	}
1532 }
1533 
1534 static struct sctp_queued_to_read *
1535 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1536 {
1537 	struct sctp_queued_to_read *control;
1538 
1539 	if (ordered) {
1540 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1541 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1542 				break;
1543 			}
1544 		}
1545 	} else {
1546 		if (idata_supported) {
1547 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1548 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1549 					break;
1550 				}
1551 			}
1552 		} else {
1553 			control = TAILQ_FIRST(&strm->uno_inqueue);
1554 		}
1555 	}
1556 	return (control);
1557 }
1558 
1559 static int
1560 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1561     struct mbuf **m, int offset, int chk_length,
1562     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1563     int *break_flag, int last_chunk, uint8_t chtype)
1564 {
1565 	/* Process a data chunk */
1566 	/* struct sctp_tmit_chunk *chk; */
1567 	struct sctp_data_chunk *ch;
1568 	struct sctp_idata_chunk *nch, chunk_buf;
1569 	struct sctp_tmit_chunk *chk;
1570 	uint32_t tsn, fsn, gap, mid;
1571 	struct mbuf *dmbuf;
1572 	int the_len;
1573 	int need_reasm_check = 0;
1574 	uint16_t sid;
1575 	struct mbuf *op_err;
1576 	char msg[SCTP_DIAG_INFO_LEN];
1577 	struct sctp_queued_to_read *control = NULL;
1578 	uint32_t ppid;
1579 	uint8_t chunk_flags;
1580 	struct sctp_stream_reset_list *liste;
1581 	struct sctp_stream_in *strm;
1582 	int ordered;
1583 	size_t clen;
1584 	int created_control = 0;
1585 
1586 	chk = NULL;
1587 	if (chtype == SCTP_IDATA) {
1588 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1589 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1590 		ch = (struct sctp_data_chunk *)nch;
1591 		clen = sizeof(struct sctp_idata_chunk);
1592 		tsn = ntohl(ch->dp.tsn);
1593 		mid = ntohl(nch->dp.mid);
1594 		ppid = nch->dp.ppid_fsn.ppid;
1595 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1596 			fsn = 0;
1597 		else
1598 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1599 	} else {
1600 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1601 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1602 		tsn = ntohl(ch->dp.tsn);
1603 		ppid = ch->dp.ppid;
1604 		clen = sizeof(struct sctp_data_chunk);
1605 		fsn = tsn;
1606 		mid = (uint32_t) (ntohs(ch->dp.ssn));
1607 		nch = NULL;
1608 	}
1609 	chunk_flags = ch->ch.chunk_flags;
1610 	if ((size_t)chk_length == clen) {
1611 		/*
1612 		 * Need to send an abort since we had a empty data chunk.
1613 		 */
1614 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1615 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1616 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1617 		*abort_flag = 1;
1618 		return (0);
1619 	}
1620 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1621 		asoc->send_sack = 1;
1622 	}
1623 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1624 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1625 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1626 	}
1627 	if (stcb == NULL) {
1628 		return (0);
1629 	}
1630 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1631 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1632 		/* It is a duplicate */
1633 		SCTP_STAT_INCR(sctps_recvdupdata);
1634 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1635 			/* Record a dup for the next outbound sack */
1636 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1637 			asoc->numduptsns++;
1638 		}
1639 		asoc->send_sack = 1;
1640 		return (0);
1641 	}
1642 	/* Calculate the number of TSN's between the base and this TSN */
1643 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1644 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1645 		/* Can't hold the bit in the mapping at max array, toss it */
1646 		return (0);
1647 	}
1648 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1649 		SCTP_TCB_LOCK_ASSERT(stcb);
1650 		if (sctp_expand_mapping_array(asoc, gap)) {
1651 			/* Can't expand, drop it */
1652 			return (0);
1653 		}
1654 	}
1655 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1656 		*high_tsn = tsn;
1657 	}
1658 	/* See if we have received this one already */
1659 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1660 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1661 		SCTP_STAT_INCR(sctps_recvdupdata);
1662 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1663 			/* Record a dup for the next outbound sack */
1664 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1665 			asoc->numduptsns++;
1666 		}
1667 		asoc->send_sack = 1;
1668 		return (0);
1669 	}
1670 	/*
1671 	 * Check to see about the GONE flag, duplicates would cause a sack
1672 	 * to be sent up above
1673 	 */
1674 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1675 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1676 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1677 		/*
1678 		 * wait a minute, this guy is gone, there is no longer a
1679 		 * receiver. Send peer an ABORT!
1680 		 */
1681 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1682 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1683 		*abort_flag = 1;
1684 		return (0);
1685 	}
1686 	/*
1687 	 * Now before going further we see if there is room. If NOT then we
1688 	 * MAY let one through only IF this TSN is the one we are waiting
1689 	 * for on a partial delivery API.
1690 	 */
1691 
1692 	/* Is the stream valid? */
1693 	sid = ntohs(ch->dp.sid);
1694 
1695 	if (sid >= asoc->streamincnt) {
1696 		struct sctp_error_invalid_stream *cause;
1697 
1698 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1699 		    0, M_NOWAIT, 1, MT_DATA);
1700 		if (op_err != NULL) {
1701 			/* add some space up front so prepend will work well */
1702 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1703 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1704 			/*
1705 			 * Error causes are just param's and this one has
1706 			 * two back to back phdr, one with the error type
1707 			 * and size, the other with the streamid and a rsvd
1708 			 */
1709 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1710 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1711 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1712 			cause->stream_id = ch->dp.sid;
1713 			cause->reserved = htons(0);
1714 			sctp_queue_op_err(stcb, op_err);
1715 		}
1716 		SCTP_STAT_INCR(sctps_badsid);
1717 		SCTP_TCB_LOCK_ASSERT(stcb);
1718 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1719 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1720 			asoc->highest_tsn_inside_nr_map = tsn;
1721 		}
1722 		if (tsn == (asoc->cumulative_tsn + 1)) {
1723 			/* Update cum-ack */
1724 			asoc->cumulative_tsn = tsn;
1725 		}
1726 		return (0);
1727 	}
1728 	strm = &asoc->strmin[sid];
1729 	/*
1730 	 * If its a fragmented message, lets see if we can find the control
1731 	 * on the reassembly queues.
1732 	 */
1733 	if ((chtype == SCTP_IDATA) &&
1734 	    ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1735 	    (fsn == 0)) {
1736 		/*
1737 		 * The first *must* be fsn 0, and other (middle/end) pieces
1738 		 * can *not* be fsn 0. XXX: This can happen in case of a
1739 		 * wrap around. Ignore is for now.
1740 		 */
1741 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1742 		    mid, chunk_flags);
1743 		goto err_out;
1744 	}
1745 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
1746 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1747 	    chunk_flags, control);
1748 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1749 		/* See if we can find the re-assembly entity */
1750 		if (control != NULL) {
1751 			/* We found something, does it belong? */
1752 			if (ordered && (mid != control->mid)) {
1753 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1754 		err_out:
1755 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1756 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1757 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1758 				*abort_flag = 1;
1759 				return (0);
1760 			}
1761 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1762 				/* We can't have a switched order with an
1763 				 * unordered chunk */
1764 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1765 				    tsn);
1766 				goto err_out;
1767 			}
1768 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1769 				/* We can't have a switched unordered with a
1770 				 * ordered chunk */
1771 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1772 				    tsn);
1773 				goto err_out;
1774 			}
1775 		}
1776 	} else {
1777 		/*
1778 		 * Its a complete segment. Lets validate we don't have a
1779 		 * re-assembly going on with the same Stream/Seq (for
1780 		 * ordered) or in the same Stream for unordered.
1781 		 */
1782 		if (control != NULL) {
1783 			if (ordered || asoc->idata_supported) {
1784 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1785 				    chunk_flags, mid);
1786 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1787 				goto err_out;
1788 			} else {
1789 				if ((tsn == control->fsn_included + 1) &&
1790 				    (control->end_added == 0)) {
1791 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1792 					goto err_out;
1793 				} else {
1794 					control = NULL;
1795 				}
1796 			}
1797 		}
1798 	}
1799 	/* now do the tests */
1800 	if (((asoc->cnt_on_all_streams +
1801 	    asoc->cnt_on_reasm_queue +
1802 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1803 	    (((int)asoc->my_rwnd) <= 0)) {
1804 		/*
1805 		 * When we have NO room in the rwnd we check to make sure
1806 		 * the reader is doing its job...
1807 		 */
1808 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1809 			/* some to read, wake-up */
1810 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1811 			struct socket *so;
1812 
1813 			so = SCTP_INP_SO(stcb->sctp_ep);
1814 			atomic_add_int(&stcb->asoc.refcnt, 1);
1815 			SCTP_TCB_UNLOCK(stcb);
1816 			SCTP_SOCKET_LOCK(so, 1);
1817 			SCTP_TCB_LOCK(stcb);
1818 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1819 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1820 				/* assoc was freed while we were unlocked */
1821 				SCTP_SOCKET_UNLOCK(so, 1);
1822 				return (0);
1823 			}
1824 #endif
1825 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1826 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1827 			SCTP_SOCKET_UNLOCK(so, 1);
1828 #endif
1829 		}
1830 		/* now is it in the mapping array of what we have accepted? */
1831 		if (nch == NULL) {
1832 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1833 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1834 				/* Nope not in the valid range dump it */
1835 		dump_packet:
1836 				sctp_set_rwnd(stcb, asoc);
1837 				if ((asoc->cnt_on_all_streams +
1838 				    asoc->cnt_on_reasm_queue +
1839 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1840 					SCTP_STAT_INCR(sctps_datadropchklmt);
1841 				} else {
1842 					SCTP_STAT_INCR(sctps_datadroprwnd);
1843 				}
1844 				*break_flag = 1;
1845 				return (0);
1846 			}
1847 		} else {
1848 			if (control == NULL) {
1849 				goto dump_packet;
1850 			}
1851 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1852 				goto dump_packet;
1853 			}
1854 		}
1855 	}
1856 #ifdef SCTP_ASOCLOG_OF_TSNS
1857 	SCTP_TCB_LOCK_ASSERT(stcb);
1858 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1859 		asoc->tsn_in_at = 0;
1860 		asoc->tsn_in_wrapped = 1;
1861 	}
1862 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1863 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1864 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1865 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1866 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1867 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1868 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1869 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1870 	asoc->tsn_in_at++;
1871 #endif
1872 	/*
1873 	 * Before we continue lets validate that we are not being fooled by
1874 	 * an evil attacker. We can only have Nk chunks based on our TSN
1875 	 * spread allowed by the mapping array N * 8 bits, so there is no
1876 	 * way our stream sequence numbers could have wrapped. We of course
1877 	 * only validate the FIRST fragment so the bit must be set.
1878 	 */
1879 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1880 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1881 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1882 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1883 		/* The incoming sseq is behind where we last delivered? */
1884 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1885 		    mid, asoc->strmin[sid].last_mid_delivered);
1886 
1887 		if (asoc->idata_supported) {
1888 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1889 			    asoc->strmin[sid].last_mid_delivered,
1890 			    tsn,
1891 			    sid,
1892 			    mid);
1893 		} else {
1894 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1895 			    (uint16_t) asoc->strmin[sid].last_mid_delivered,
1896 			    tsn,
1897 			    sid,
1898 			    (uint16_t) mid);
1899 		}
1900 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1901 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1902 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1903 		*abort_flag = 1;
1904 		return (0);
1905 	}
1906 	/************************************
1907 	 * From here down we may find ch-> invalid
1908 	 * so its a good idea NOT to use it.
1909 	 *************************************/
1910 	if (nch) {
1911 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1912 	} else {
1913 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1914 	}
1915 	if (last_chunk == 0) {
1916 		if (nch) {
1917 			dmbuf = SCTP_M_COPYM(*m,
1918 			    (offset + sizeof(struct sctp_idata_chunk)),
1919 			    the_len, M_NOWAIT);
1920 		} else {
1921 			dmbuf = SCTP_M_COPYM(*m,
1922 			    (offset + sizeof(struct sctp_data_chunk)),
1923 			    the_len, M_NOWAIT);
1924 		}
1925 #ifdef SCTP_MBUF_LOGGING
1926 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1927 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1928 		}
1929 #endif
1930 	} else {
1931 		/* We can steal the last chunk */
1932 		int l_len;
1933 
1934 		dmbuf = *m;
1935 		/* lop off the top part */
1936 		if (nch) {
1937 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1938 		} else {
1939 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1940 		}
1941 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1942 			l_len = SCTP_BUF_LEN(dmbuf);
1943 		} else {
1944 			/*
1945 			 * need to count up the size hopefully does not hit
1946 			 * this to often :-0
1947 			 */
1948 			struct mbuf *lat;
1949 
1950 			l_len = 0;
1951 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1952 				l_len += SCTP_BUF_LEN(lat);
1953 			}
1954 		}
1955 		if (l_len > the_len) {
1956 			/* Trim the end round bytes off  too */
1957 			m_adj(dmbuf, -(l_len - the_len));
1958 		}
1959 	}
1960 	if (dmbuf == NULL) {
1961 		SCTP_STAT_INCR(sctps_nomem);
1962 		return (0);
1963 	}
1964 	/*
1965 	 * Now no matter what we need a control, get one if we don't have
1966 	 * one (we may have gotten it above when we found the message was
1967 	 * fragmented
1968 	 */
1969 	if (control == NULL) {
1970 		sctp_alloc_a_readq(stcb, control);
1971 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1972 		    ppid,
1973 		    sid,
1974 		    chunk_flags,
1975 		    NULL, fsn, mid);
1976 		if (control == NULL) {
1977 			SCTP_STAT_INCR(sctps_nomem);
1978 			return (0);
1979 		}
1980 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1981 			control->data = dmbuf;
1982 			control->tail_mbuf = NULL;
1983 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1984 			control->top_fsn = control->fsn_included = fsn;
1985 		}
1986 		created_control = 1;
1987 	}
1988 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
1989 	    chunk_flags, ordered, mid, control);
1990 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1991 	    TAILQ_EMPTY(&asoc->resetHead) &&
1992 	    ((ordered == 0) ||
1993 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
1994 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
1995 		/* Candidate for express delivery */
1996 		/*
1997 		 * Its not fragmented, No PD-API is up, Nothing in the
1998 		 * delivery queue, Its un-ordered OR ordered and the next to
1999 		 * deliver AND nothing else is stuck on the stream queue,
2000 		 * And there is room for it in the socket buffer. Lets just
2001 		 * stuff it up the buffer....
2002 		 */
2003 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2004 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2005 			asoc->highest_tsn_inside_nr_map = tsn;
2006 		}
2007 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2008 		    control, mid);
2009 
2010 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2011 		    control, &stcb->sctp_socket->so_rcv,
2012 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2013 
2014 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2015 			/* for ordered, bump what we delivered */
2016 			strm->last_mid_delivered++;
2017 		}
2018 		SCTP_STAT_INCR(sctps_recvexpress);
2019 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2020 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2021 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2022 		}
2023 		control = NULL;
2024 		goto finish_express_del;
2025 	}
2026 	/* Now will we need a chunk too? */
2027 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2028 		sctp_alloc_a_chunk(stcb, chk);
2029 		if (chk == NULL) {
2030 			/* No memory so we drop the chunk */
2031 			SCTP_STAT_INCR(sctps_nomem);
2032 			if (last_chunk == 0) {
2033 				/* we copied it, free the copy */
2034 				sctp_m_freem(dmbuf);
2035 			}
2036 			return (0);
2037 		}
2038 		chk->rec.data.tsn = tsn;
2039 		chk->no_fr_allowed = 0;
2040 		chk->rec.data.fsn = fsn;
2041 		chk->rec.data.mid = mid;
2042 		chk->rec.data.sid = sid;
2043 		chk->rec.data.ppid = ppid;
2044 		chk->rec.data.context = stcb->asoc.context;
2045 		chk->rec.data.doing_fast_retransmit = 0;
2046 		chk->rec.data.rcv_flags = chunk_flags;
2047 		chk->asoc = asoc;
2048 		chk->send_size = the_len;
2049 		chk->whoTo = net;
2050 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2051 		    chk,
2052 		    control, mid);
2053 		atomic_add_int(&net->ref_count, 1);
2054 		chk->data = dmbuf;
2055 	}
2056 	/* Set the appropriate TSN mark */
2057 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2058 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2059 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2060 			asoc->highest_tsn_inside_nr_map = tsn;
2061 		}
2062 	} else {
2063 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2064 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2065 			asoc->highest_tsn_inside_map = tsn;
2066 		}
2067 	}
2068 	/* Now is it complete (i.e. not fragmented)? */
2069 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2070 		/*
2071 		 * Special check for when streams are resetting. We could be
2072 		 * more smart about this and check the actual stream to see
2073 		 * if it is not being reset.. that way we would not create a
2074 		 * HOLB when amongst streams being reset and those not being
2075 		 * reset.
2076 		 *
2077 		 */
2078 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2079 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2080 			/*
2081 			 * yep its past where we need to reset... go ahead
2082 			 * and queue it.
2083 			 */
2084 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2085 				/* first one on */
2086 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2087 			} else {
2088 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2089 				unsigned char inserted = 0;
2090 
2091 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2092 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2093 
2094 						continue;
2095 					} else {
2096 						/* found it */
2097 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2098 						inserted = 1;
2099 						break;
2100 					}
2101 				}
2102 				if (inserted == 0) {
2103 					/*
2104 					 * must be put at end, use prevP
2105 					 * (all setup from loop) to setup
2106 					 * nextP.
2107 					 */
2108 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2109 				}
2110 			}
2111 			goto finish_express_del;
2112 		}
2113 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2114 			/* queue directly into socket buffer */
2115 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2116 			    control, mid);
2117 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2118 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2119 			    control,
2120 			    &stcb->sctp_socket->so_rcv, 1,
2121 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2122 
2123 		} else {
2124 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2125 			    mid);
2126 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2127 			if (*abort_flag) {
2128 				if (last_chunk) {
2129 					*m = NULL;
2130 				}
2131 				return (0);
2132 			}
2133 		}
2134 		goto finish_express_del;
2135 	}
2136 	/* If we reach here its a reassembly */
2137 	need_reasm_check = 1;
2138 	SCTPDBG(SCTP_DEBUG_XXX,
2139 	    "Queue data to stream for reasm control: %p MID: %u\n",
2140 	    control, mid);
2141 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2142 	if (*abort_flag) {
2143 		/*
2144 		 * the assoc is now gone and chk was put onto the reasm
2145 		 * queue, which has all been freed.
2146 		 */
2147 		if (last_chunk) {
2148 			*m = NULL;
2149 		}
2150 		return (0);
2151 	}
2152 finish_express_del:
2153 	/* Here we tidy up things */
2154 	if (tsn == (asoc->cumulative_tsn + 1)) {
2155 		/* Update cum-ack */
2156 		asoc->cumulative_tsn = tsn;
2157 	}
2158 	if (last_chunk) {
2159 		*m = NULL;
2160 	}
2161 	if (ordered) {
2162 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2163 	} else {
2164 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2165 	}
2166 	SCTP_STAT_INCR(sctps_recvdata);
2167 	/* Set it present please */
2168 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2169 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2170 	}
2171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2172 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2173 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2174 	}
2175 	/* check the special flag for stream resets */
2176 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2177 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2178 		/*
2179 		 * we have finished working through the backlogged TSN's now
2180 		 * time to reset streams. 1: call reset function. 2: free
2181 		 * pending_reply space 3: distribute any chunks in
2182 		 * pending_reply_queue.
2183 		 */
2184 		struct sctp_queued_to_read *ctl, *nctl;
2185 
2186 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2187 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2188 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2189 		SCTP_FREE(liste, SCTP_M_STRESET);
2190 		/* sa_ignore FREED_MEMORY */
2191 		liste = TAILQ_FIRST(&asoc->resetHead);
2192 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2193 			/* All can be removed */
2194 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2195 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2196 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2197 				if (*abort_flag) {
2198 					return (0);
2199 				}
2200 			}
2201 		} else {
2202 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2203 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2204 					break;
2205 				}
2206 				/*
2207 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2208 				 * process it which is the NOT of
2209 				 * ctl->sinfo_tsn > liste->tsn
2210 				 */
2211 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2212 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2213 				if (*abort_flag) {
2214 					return (0);
2215 				}
2216 			}
2217 		}
2218 		/*
2219 		 * Now service re-assembly to pick up anything that has been
2220 		 * held on reassembly queue?
2221 		 */
2222 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2223 		need_reasm_check = 0;
2224 	}
2225 	if (need_reasm_check) {
2226 		/* Another one waits ? */
2227 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2228 	}
2229 	return (1);
2230 }
2231 
2232 static const int8_t sctp_map_lookup_tab[256] = {
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 4,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 5,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 4,
2239 	0, 1, 0, 2, 0, 1, 0, 3,
2240 	0, 1, 0, 2, 0, 1, 0, 6,
2241 	0, 1, 0, 2, 0, 1, 0, 3,
2242 	0, 1, 0, 2, 0, 1, 0, 4,
2243 	0, 1, 0, 2, 0, 1, 0, 3,
2244 	0, 1, 0, 2, 0, 1, 0, 5,
2245 	0, 1, 0, 2, 0, 1, 0, 3,
2246 	0, 1, 0, 2, 0, 1, 0, 4,
2247 	0, 1, 0, 2, 0, 1, 0, 3,
2248 	0, 1, 0, 2, 0, 1, 0, 7,
2249 	0, 1, 0, 2, 0, 1, 0, 3,
2250 	0, 1, 0, 2, 0, 1, 0, 4,
2251 	0, 1, 0, 2, 0, 1, 0, 3,
2252 	0, 1, 0, 2, 0, 1, 0, 5,
2253 	0, 1, 0, 2, 0, 1, 0, 3,
2254 	0, 1, 0, 2, 0, 1, 0, 4,
2255 	0, 1, 0, 2, 0, 1, 0, 3,
2256 	0, 1, 0, 2, 0, 1, 0, 6,
2257 	0, 1, 0, 2, 0, 1, 0, 3,
2258 	0, 1, 0, 2, 0, 1, 0, 4,
2259 	0, 1, 0, 2, 0, 1, 0, 3,
2260 	0, 1, 0, 2, 0, 1, 0, 5,
2261 	0, 1, 0, 2, 0, 1, 0, 3,
2262 	0, 1, 0, 2, 0, 1, 0, 4,
2263 	0, 1, 0, 2, 0, 1, 0, 3,
2264 	0, 1, 0, 2, 0, 1, 0, 8
2265 };
2266 
2267 
2268 void
2269 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2270 {
2271 	/*
2272 	 * Now we also need to check the mapping array in a couple of ways.
2273 	 * 1) Did we move the cum-ack point?
2274 	 *
2275 	 * When you first glance at this you might think that all entries
2276 	 * that make up the position of the cum-ack would be in the
2277 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2278 	 * deliverable. Thats true with one exception, when its a fragmented
2279 	 * message we may not deliver the data until some threshold (or all
2280 	 * of it) is in place. So we must OR the nr_mapping_array and
2281 	 * mapping_array to get a true picture of the cum-ack.
2282 	 */
2283 	struct sctp_association *asoc;
2284 	int at;
2285 	uint8_t val;
2286 	int slide_from, slide_end, lgap, distance;
2287 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2288 
2289 	asoc = &stcb->asoc;
2290 
2291 	old_cumack = asoc->cumulative_tsn;
2292 	old_base = asoc->mapping_array_base_tsn;
2293 	old_highest = asoc->highest_tsn_inside_map;
2294 	/*
2295 	 * We could probably improve this a small bit by calculating the
2296 	 * offset of the current cum-ack as the starting point.
2297 	 */
2298 	at = 0;
2299 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2300 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2301 		if (val == 0xff) {
2302 			at += 8;
2303 		} else {
2304 			/* there is a 0 bit */
2305 			at += sctp_map_lookup_tab[val];
2306 			break;
2307 		}
2308 	}
2309 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2310 
2311 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2312 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2313 #ifdef INVARIANTS
2314 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2315 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2316 #else
2317 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2318 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2319 		sctp_print_mapping_array(asoc);
2320 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2321 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2322 		}
2323 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2324 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2325 #endif
2326 	}
2327 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2328 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2329 	} else {
2330 		highest_tsn = asoc->highest_tsn_inside_map;
2331 	}
2332 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2333 		/* The complete array was completed by a single FR */
2334 		/* highest becomes the cum-ack */
2335 		int clr;
2336 #ifdef INVARIANTS
2337 		unsigned int i;
2338 #endif
2339 
2340 		/* clear the array */
2341 		clr = ((at + 7) >> 3);
2342 		if (clr > asoc->mapping_array_size) {
2343 			clr = asoc->mapping_array_size;
2344 		}
2345 		memset(asoc->mapping_array, 0, clr);
2346 		memset(asoc->nr_mapping_array, 0, clr);
2347 #ifdef INVARIANTS
2348 		for (i = 0; i < asoc->mapping_array_size; i++) {
2349 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2350 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2351 				sctp_print_mapping_array(asoc);
2352 			}
2353 		}
2354 #endif
2355 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2356 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2357 	} else if (at >= 8) {
2358 		/* we can slide the mapping array down */
2359 		/* slide_from holds where we hit the first NON 0xff byte */
2360 
2361 		/*
2362 		 * now calculate the ceiling of the move using our highest
2363 		 * TSN value
2364 		 */
2365 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2366 		slide_end = (lgap >> 3);
2367 		if (slide_end < slide_from) {
2368 			sctp_print_mapping_array(asoc);
2369 #ifdef INVARIANTS
2370 			panic("impossible slide");
2371 #else
2372 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2373 			    lgap, slide_end, slide_from, at);
2374 			return;
2375 #endif
2376 		}
2377 		if (slide_end > asoc->mapping_array_size) {
2378 #ifdef INVARIANTS
2379 			panic("would overrun buffer");
2380 #else
2381 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2382 			    asoc->mapping_array_size, slide_end);
2383 			slide_end = asoc->mapping_array_size;
2384 #endif
2385 		}
2386 		distance = (slide_end - slide_from) + 1;
2387 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 			sctp_log_map(old_base, old_cumack, old_highest,
2389 			    SCTP_MAP_PREPARE_SLIDE);
2390 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2391 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2392 		}
2393 		if (distance + slide_from > asoc->mapping_array_size ||
2394 		    distance < 0) {
2395 			/*
2396 			 * Here we do NOT slide forward the array so that
2397 			 * hopefully when more data comes in to fill it up
2398 			 * we will be able to slide it forward. Really I
2399 			 * don't think this should happen :-0
2400 			 */
2401 
2402 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2403 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2404 				    (uint32_t) asoc->mapping_array_size,
2405 				    SCTP_MAP_SLIDE_NONE);
2406 			}
2407 		} else {
2408 			int ii;
2409 
2410 			for (ii = 0; ii < distance; ii++) {
2411 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2412 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2413 
2414 			}
2415 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2416 				asoc->mapping_array[ii] = 0;
2417 				asoc->nr_mapping_array[ii] = 0;
2418 			}
2419 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2420 				asoc->highest_tsn_inside_map += (slide_from << 3);
2421 			}
2422 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2423 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2424 			}
2425 			asoc->mapping_array_base_tsn += (slide_from << 3);
2426 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2427 				sctp_log_map(asoc->mapping_array_base_tsn,
2428 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2429 				    SCTP_MAP_SLIDE_RESULT);
2430 			}
2431 		}
2432 	}
2433 }
2434 
2435 void
2436 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2437 {
2438 	struct sctp_association *asoc;
2439 	uint32_t highest_tsn;
2440 	int is_a_gap;
2441 
2442 	sctp_slide_mapping_arrays(stcb);
2443 	asoc = &stcb->asoc;
2444 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2445 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2446 	} else {
2447 		highest_tsn = asoc->highest_tsn_inside_map;
2448 	}
2449 	/* Is there a gap now? */
2450 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2451 
2452 	/*
2453 	 * Now we need to see if we need to queue a sack or just start the
2454 	 * timer (if allowed).
2455 	 */
2456 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2457 		/*
2458 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2459 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2460 		 * SACK
2461 		 */
2462 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2463 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2464 			    stcb->sctp_ep, stcb, NULL,
2465 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2466 		}
2467 		sctp_send_shutdown(stcb,
2468 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2469 		if (is_a_gap) {
2470 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2471 		}
2472 	} else {
2473 		/*
2474 		 * CMT DAC algorithm: increase number of packets received
2475 		 * since last ack
2476 		 */
2477 		stcb->asoc.cmt_dac_pkts_rcvd++;
2478 
2479 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2480 							 * SACK */
2481 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2482 							 * longer is one */
2483 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2484 		    (is_a_gap) ||	/* is still a gap */
2485 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2486 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2487 
2488 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2489 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2490 			    (stcb->asoc.send_sack == 0) &&
2491 			    (stcb->asoc.numduptsns == 0) &&
2492 			    (stcb->asoc.delayed_ack) &&
2493 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2494 
2495 				/*
2496 				 * CMT DAC algorithm: With CMT, delay acks
2497 				 * even in the face of
2498 				 *
2499 				 * reordering. Therefore, if acks that do
2500 				 * not have to be sent because of the above
2501 				 * reasons, will be delayed. That is, acks
2502 				 * that would have been sent due to gap
2503 				 * reports will be delayed with DAC. Start
2504 				 * the delayed ack timer.
2505 				 */
2506 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2507 				    stcb->sctp_ep, stcb, NULL);
2508 			} else {
2509 				/*
2510 				 * Ok we must build a SACK since the timer
2511 				 * is pending, we got our first packet OR
2512 				 * there are gaps or duplicates.
2513 				 */
2514 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2515 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2516 			}
2517 		} else {
2518 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2519 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2520 				    stcb->sctp_ep, stcb, NULL);
2521 			}
2522 		}
2523 	}
2524 }
2525 
2526 int
2527 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2528     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2529     struct sctp_nets *net, uint32_t * high_tsn)
2530 {
2531 	struct sctp_chunkhdr *ch, chunk_buf;
2532 	struct sctp_association *asoc;
2533 	int num_chunks = 0;	/* number of control chunks processed */
2534 	int stop_proc = 0;
2535 	int chk_length, break_flag, last_chunk;
2536 	int abort_flag = 0, was_a_gap;
2537 	struct mbuf *m;
2538 	uint32_t highest_tsn;
2539 
2540 	/* set the rwnd */
2541 	sctp_set_rwnd(stcb, &stcb->asoc);
2542 
2543 	m = *mm;
2544 	SCTP_TCB_LOCK_ASSERT(stcb);
2545 	asoc = &stcb->asoc;
2546 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2547 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2548 	} else {
2549 		highest_tsn = asoc->highest_tsn_inside_map;
2550 	}
2551 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2552 	/*
2553 	 * setup where we got the last DATA packet from for any SACK that
2554 	 * may need to go out. Don't bump the net. This is done ONLY when a
2555 	 * chunk is assigned.
2556 	 */
2557 	asoc->last_data_chunk_from = net;
2558 
2559 	/*-
2560 	 * Now before we proceed we must figure out if this is a wasted
2561 	 * cluster... i.e. it is a small packet sent in and yet the driver
2562 	 * underneath allocated a full cluster for it. If so we must copy it
2563 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2564 	 * with cluster starvation. Note for __Panda__ we don't do this
2565 	 * since it has clusters all the way down to 64 bytes.
2566 	 */
2567 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2568 		/* we only handle mbufs that are singletons.. not chains */
2569 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2570 		if (m) {
2571 			/* ok lets see if we can copy the data up */
2572 			caddr_t *from, *to;
2573 
2574 			/* get the pointers and copy */
2575 			to = mtod(m, caddr_t *);
2576 			from = mtod((*mm), caddr_t *);
2577 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2578 			/* copy the length and free up the old */
2579 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2580 			sctp_m_freem(*mm);
2581 			/* success, back copy */
2582 			*mm = m;
2583 		} else {
2584 			/* We are in trouble in the mbuf world .. yikes */
2585 			m = *mm;
2586 		}
2587 	}
2588 	/* get pointer to the first chunk header */
2589 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2590 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2591 	if (ch == NULL) {
2592 		return (1);
2593 	}
2594 	/*
2595 	 * process all DATA chunks...
2596 	 */
2597 	*high_tsn = asoc->cumulative_tsn;
2598 	break_flag = 0;
2599 	asoc->data_pkts_seen++;
2600 	while (stop_proc == 0) {
2601 		/* validate chunk length */
2602 		chk_length = ntohs(ch->chunk_length);
2603 		if (length - *offset < chk_length) {
2604 			/* all done, mutulated chunk */
2605 			stop_proc = 1;
2606 			continue;
2607 		}
2608 		if ((asoc->idata_supported == 1) &&
2609 		    (ch->chunk_type == SCTP_DATA)) {
2610 			struct mbuf *op_err;
2611 			char msg[SCTP_DIAG_INFO_LEN];
2612 
2613 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2614 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2615 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2616 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2617 			return (2);
2618 		}
2619 		if ((asoc->idata_supported == 0) &&
2620 		    (ch->chunk_type == SCTP_IDATA)) {
2621 			struct mbuf *op_err;
2622 			char msg[SCTP_DIAG_INFO_LEN];
2623 
2624 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2625 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2626 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2627 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2628 			return (2);
2629 		}
2630 		if ((ch->chunk_type == SCTP_DATA) ||
2631 		    (ch->chunk_type == SCTP_IDATA)) {
2632 			int clen;
2633 
2634 			if (ch->chunk_type == SCTP_DATA) {
2635 				clen = sizeof(struct sctp_data_chunk);
2636 			} else {
2637 				clen = sizeof(struct sctp_idata_chunk);
2638 			}
2639 			if (chk_length < clen) {
2640 				/*
2641 				 * Need to send an abort since we had a
2642 				 * invalid data chunk.
2643 				 */
2644 				struct mbuf *op_err;
2645 				char msg[SCTP_DIAG_INFO_LEN];
2646 
2647 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2648 				    chk_length);
2649 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2650 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2651 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2652 				return (2);
2653 			}
2654 #ifdef SCTP_AUDITING_ENABLED
2655 			sctp_audit_log(0xB1, 0);
2656 #endif
2657 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2658 				last_chunk = 1;
2659 			} else {
2660 				last_chunk = 0;
2661 			}
2662 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2663 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2664 			    last_chunk, ch->chunk_type)) {
2665 				num_chunks++;
2666 			}
2667 			if (abort_flag)
2668 				return (2);
2669 
2670 			if (break_flag) {
2671 				/*
2672 				 * Set because of out of rwnd space and no
2673 				 * drop rep space left.
2674 				 */
2675 				stop_proc = 1;
2676 				continue;
2677 			}
2678 		} else {
2679 			/* not a data chunk in the data region */
2680 			switch (ch->chunk_type) {
2681 			case SCTP_INITIATION:
2682 			case SCTP_INITIATION_ACK:
2683 			case SCTP_SELECTIVE_ACK:
2684 			case SCTP_NR_SELECTIVE_ACK:
2685 			case SCTP_HEARTBEAT_REQUEST:
2686 			case SCTP_HEARTBEAT_ACK:
2687 			case SCTP_ABORT_ASSOCIATION:
2688 			case SCTP_SHUTDOWN:
2689 			case SCTP_SHUTDOWN_ACK:
2690 			case SCTP_OPERATION_ERROR:
2691 			case SCTP_COOKIE_ECHO:
2692 			case SCTP_COOKIE_ACK:
2693 			case SCTP_ECN_ECHO:
2694 			case SCTP_ECN_CWR:
2695 			case SCTP_SHUTDOWN_COMPLETE:
2696 			case SCTP_AUTHENTICATION:
2697 			case SCTP_ASCONF_ACK:
2698 			case SCTP_PACKET_DROPPED:
2699 			case SCTP_STREAM_RESET:
2700 			case SCTP_FORWARD_CUM_TSN:
2701 			case SCTP_ASCONF:
2702 				{
2703 					/*
2704 					 * Now, what do we do with KNOWN
2705 					 * chunks that are NOT in the right
2706 					 * place?
2707 					 *
2708 					 * For now, I do nothing but ignore
2709 					 * them. We may later want to add
2710 					 * sysctl stuff to switch out and do
2711 					 * either an ABORT() or possibly
2712 					 * process them.
2713 					 */
2714 					struct mbuf *op_err;
2715 					char msg[SCTP_DIAG_INFO_LEN];
2716 
2717 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2718 					    ch->chunk_type);
2719 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2720 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2721 					return (2);
2722 				}
2723 			default:
2724 				/* unknown chunk type, use bit rules */
2725 				if (ch->chunk_type & 0x40) {
2726 					/* Add a error report to the queue */
2727 					struct mbuf *op_err;
2728 					struct sctp_gen_error_cause *cause;
2729 
2730 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2731 					    0, M_NOWAIT, 1, MT_DATA);
2732 					if (op_err != NULL) {
2733 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2734 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2735 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2736 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2737 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2738 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2739 							sctp_queue_op_err(stcb, op_err);
2740 						} else {
2741 							sctp_m_freem(op_err);
2742 						}
2743 					}
2744 				}
2745 				if ((ch->chunk_type & 0x80) == 0) {
2746 					/* discard the rest of this packet */
2747 					stop_proc = 1;
2748 				}	/* else skip this bad chunk and
2749 				  * continue... */ break;
2750 			}	/* switch of chunk type */
2751 		}
2752 		*offset += SCTP_SIZE32(chk_length);
2753 		if ((*offset >= length) || stop_proc) {
2754 			/* no more data left in the mbuf chain */
2755 			stop_proc = 1;
2756 			continue;
2757 		}
2758 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2759 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2760 		if (ch == NULL) {
2761 			*offset = length;
2762 			stop_proc = 1;
2763 			continue;
2764 		}
2765 	}
2766 	if (break_flag) {
2767 		/*
2768 		 * we need to report rwnd overrun drops.
2769 		 */
2770 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2771 	}
2772 	if (num_chunks) {
2773 		/*
2774 		 * Did we get data, if so update the time for auto-close and
2775 		 * give peer credit for being alive.
2776 		 */
2777 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2778 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2779 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2780 			    stcb->asoc.overall_error_count,
2781 			    0,
2782 			    SCTP_FROM_SCTP_INDATA,
2783 			    __LINE__);
2784 		}
2785 		stcb->asoc.overall_error_count = 0;
2786 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2787 	}
2788 	/* now service all of the reassm queue if needed */
2789 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2790 		/* Assure that we ack right away */
2791 		stcb->asoc.send_sack = 1;
2792 	}
2793 	/* Start a sack timer or QUEUE a SACK for sending */
2794 	sctp_sack_check(stcb, was_a_gap);
2795 	return (0);
2796 }
2797 
2798 static int
2799 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2800     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2801     int *num_frs,
2802     uint32_t * biggest_newly_acked_tsn,
2803     uint32_t * this_sack_lowest_newack,
2804     int *rto_ok)
2805 {
2806 	struct sctp_tmit_chunk *tp1;
2807 	unsigned int theTSN;
2808 	int j, wake_him = 0, circled = 0;
2809 
2810 	/* Recover the tp1 we last saw */
2811 	tp1 = *p_tp1;
2812 	if (tp1 == NULL) {
2813 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2814 	}
2815 	for (j = frag_strt; j <= frag_end; j++) {
2816 		theTSN = j + last_tsn;
2817 		while (tp1) {
2818 			if (tp1->rec.data.doing_fast_retransmit)
2819 				(*num_frs) += 1;
2820 
2821 			/*-
2822 			 * CMT: CUCv2 algorithm. For each TSN being
2823 			 * processed from the sent queue, track the
2824 			 * next expected pseudo-cumack, or
2825 			 * rtx_pseudo_cumack, if required. Separate
2826 			 * cumack trackers for first transmissions,
2827 			 * and retransmissions.
2828 			 */
2829 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2830 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2831 			    (tp1->snd_count == 1)) {
2832 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2833 				tp1->whoTo->find_pseudo_cumack = 0;
2834 			}
2835 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2836 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2837 			    (tp1->snd_count > 1)) {
2838 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2839 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2840 			}
2841 			if (tp1->rec.data.tsn == theTSN) {
2842 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2843 					/*-
2844 					 * must be held until
2845 					 * cum-ack passes
2846 					 */
2847 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2848 						/*-
2849 						 * If it is less than RESEND, it is
2850 						 * now no-longer in flight.
2851 						 * Higher values may already be set
2852 						 * via previous Gap Ack Blocks...
2853 						 * i.e. ACKED or RESEND.
2854 						 */
2855 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2856 						    *biggest_newly_acked_tsn)) {
2857 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2858 						}
2859 						/*-
2860 						 * CMT: SFR algo (and HTNA) - set
2861 						 * saw_newack to 1 for dest being
2862 						 * newly acked. update
2863 						 * this_sack_highest_newack if
2864 						 * appropriate.
2865 						 */
2866 						if (tp1->rec.data.chunk_was_revoked == 0)
2867 							tp1->whoTo->saw_newack = 1;
2868 
2869 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2870 						    tp1->whoTo->this_sack_highest_newack)) {
2871 							tp1->whoTo->this_sack_highest_newack =
2872 							    tp1->rec.data.tsn;
2873 						}
2874 						/*-
2875 						 * CMT DAC algo: also update
2876 						 * this_sack_lowest_newack
2877 						 */
2878 						if (*this_sack_lowest_newack == 0) {
2879 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2880 								sctp_log_sack(*this_sack_lowest_newack,
2881 								    last_tsn,
2882 								    tp1->rec.data.tsn,
2883 								    0,
2884 								    0,
2885 								    SCTP_LOG_TSN_ACKED);
2886 							}
2887 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2888 						}
2889 						/*-
2890 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2891 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2892 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2893 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2894 						 * Separate pseudo_cumack trackers for first transmissions and
2895 						 * retransmissions.
2896 						 */
2897 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2898 							if (tp1->rec.data.chunk_was_revoked == 0) {
2899 								tp1->whoTo->new_pseudo_cumack = 1;
2900 							}
2901 							tp1->whoTo->find_pseudo_cumack = 1;
2902 						}
2903 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2904 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2905 						}
2906 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2907 							if (tp1->rec.data.chunk_was_revoked == 0) {
2908 								tp1->whoTo->new_pseudo_cumack = 1;
2909 							}
2910 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2911 						}
2912 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2913 							sctp_log_sack(*biggest_newly_acked_tsn,
2914 							    last_tsn,
2915 							    tp1->rec.data.tsn,
2916 							    frag_strt,
2917 							    frag_end,
2918 							    SCTP_LOG_TSN_ACKED);
2919 						}
2920 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2921 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2922 							    tp1->whoTo->flight_size,
2923 							    tp1->book_size,
2924 							    (uint32_t) (uintptr_t) tp1->whoTo,
2925 							    tp1->rec.data.tsn);
2926 						}
2927 						sctp_flight_size_decrease(tp1);
2928 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2929 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2930 							    tp1);
2931 						}
2932 						sctp_total_flight_decrease(stcb, tp1);
2933 
2934 						tp1->whoTo->net_ack += tp1->send_size;
2935 						if (tp1->snd_count < 2) {
2936 							/*-
2937 							 * True non-retransmited chunk
2938 							 */
2939 							tp1->whoTo->net_ack2 += tp1->send_size;
2940 
2941 							/*-
2942 							 * update RTO too ?
2943 							 */
2944 							if (tp1->do_rtt) {
2945 								if (*rto_ok) {
2946 									tp1->whoTo->RTO =
2947 									    sctp_calculate_rto(stcb,
2948 									    &stcb->asoc,
2949 									    tp1->whoTo,
2950 									    &tp1->sent_rcv_time,
2951 									    sctp_align_safe_nocopy,
2952 									    SCTP_RTT_FROM_DATA);
2953 									*rto_ok = 0;
2954 								}
2955 								if (tp1->whoTo->rto_needed == 0) {
2956 									tp1->whoTo->rto_needed = 1;
2957 								}
2958 								tp1->do_rtt = 0;
2959 							}
2960 						}
2961 					}
2962 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2963 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2964 						    stcb->asoc.this_sack_highest_gap)) {
2965 							stcb->asoc.this_sack_highest_gap =
2966 							    tp1->rec.data.tsn;
2967 						}
2968 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2969 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2970 #ifdef SCTP_AUDITING_ENABLED
2971 							sctp_audit_log(0xB2,
2972 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2973 #endif
2974 						}
2975 					}
2976 					/*-
2977 					 * All chunks NOT UNSENT fall through here and are marked
2978 					 * (leave PR-SCTP ones that are to skip alone though)
2979 					 */
2980 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2981 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2982 						tp1->sent = SCTP_DATAGRAM_MARKED;
2983 					}
2984 					if (tp1->rec.data.chunk_was_revoked) {
2985 						/* deflate the cwnd */
2986 						tp1->whoTo->cwnd -= tp1->book_size;
2987 						tp1->rec.data.chunk_was_revoked = 0;
2988 					}
2989 					/* NR Sack code here */
2990 					if (nr_sacking &&
2991 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2992 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
2993 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
2994 #ifdef INVARIANTS
2995 						} else {
2996 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
2997 #endif
2998 						}
2999 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3000 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3001 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3002 							stcb->asoc.trigger_reset = 1;
3003 						}
3004 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3005 						if (tp1->data) {
3006 							/* sa_ignore
3007 							 * NO_NULL_CHK */
3008 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3009 							sctp_m_freem(tp1->data);
3010 							tp1->data = NULL;
3011 						}
3012 						wake_him++;
3013 					}
3014 				}
3015 				break;
3016 			} /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3017 				break;
3018 			}
3019 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3020 			if ((tp1 == NULL) && (circled == 0)) {
3021 				circled++;
3022 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3023 			}
3024 		}		/* end while (tp1) */
3025 		if (tp1 == NULL) {
3026 			circled = 0;
3027 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3028 		}
3029 		/* In case the fragments were not in order we must reset */
3030 	}			/* end for (j = fragStart */
3031 	*p_tp1 = tp1;
3032 	return (wake_him);	/* Return value only used for nr-sack */
3033 }
3034 
3035 
3036 static int
3037 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3038     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3039     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3040     int num_seg, int num_nr_seg, int *rto_ok)
3041 {
3042 	struct sctp_gap_ack_block *frag, block;
3043 	struct sctp_tmit_chunk *tp1;
3044 	int i;
3045 	int num_frs = 0;
3046 	int chunk_freed;
3047 	int non_revocable;
3048 	uint16_t frag_strt, frag_end, prev_frag_end;
3049 
3050 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3051 	prev_frag_end = 0;
3052 	chunk_freed = 0;
3053 
3054 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3055 		if (i == num_seg) {
3056 			prev_frag_end = 0;
3057 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3058 		}
3059 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3060 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3061 		*offset += sizeof(block);
3062 		if (frag == NULL) {
3063 			return (chunk_freed);
3064 		}
3065 		frag_strt = ntohs(frag->start);
3066 		frag_end = ntohs(frag->end);
3067 
3068 		if (frag_strt > frag_end) {
3069 			/* This gap report is malformed, skip it. */
3070 			continue;
3071 		}
3072 		if (frag_strt <= prev_frag_end) {
3073 			/* This gap report is not in order, so restart. */
3074 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3075 		}
3076 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3077 			*biggest_tsn_acked = last_tsn + frag_end;
3078 		}
3079 		if (i < num_seg) {
3080 			non_revocable = 0;
3081 		} else {
3082 			non_revocable = 1;
3083 		}
3084 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3085 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3086 		    this_sack_lowest_newack, rto_ok)) {
3087 			chunk_freed = 1;
3088 		}
3089 		prev_frag_end = frag_end;
3090 	}
3091 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3092 		if (num_frs)
3093 			sctp_log_fr(*biggest_tsn_acked,
3094 			    *biggest_newly_acked_tsn,
3095 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3096 	}
3097 	return (chunk_freed);
3098 }
3099 
3100 static void
3101 sctp_check_for_revoked(struct sctp_tcb *stcb,
3102     struct sctp_association *asoc, uint32_t cumack,
3103     uint32_t biggest_tsn_acked)
3104 {
3105 	struct sctp_tmit_chunk *tp1;
3106 
3107 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3108 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3109 			/*
3110 			 * ok this guy is either ACK or MARKED. If it is
3111 			 * ACKED it has been previously acked but not this
3112 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3113 			 * again.
3114 			 */
3115 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3116 				break;
3117 			}
3118 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3119 				/* it has been revoked */
3120 				tp1->sent = SCTP_DATAGRAM_SENT;
3121 				tp1->rec.data.chunk_was_revoked = 1;
3122 				/*
3123 				 * We must add this stuff back in to assure
3124 				 * timers and such get started.
3125 				 */
3126 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3127 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3128 					    tp1->whoTo->flight_size,
3129 					    tp1->book_size,
3130 					    (uint32_t) (uintptr_t) tp1->whoTo,
3131 					    tp1->rec.data.tsn);
3132 				}
3133 				sctp_flight_size_increase(tp1);
3134 				sctp_total_flight_increase(stcb, tp1);
3135 				/*
3136 				 * We inflate the cwnd to compensate for our
3137 				 * artificial inflation of the flight_size.
3138 				 */
3139 				tp1->whoTo->cwnd += tp1->book_size;
3140 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3141 					sctp_log_sack(asoc->last_acked_seq,
3142 					    cumack,
3143 					    tp1->rec.data.tsn,
3144 					    0,
3145 					    0,
3146 					    SCTP_LOG_TSN_REVOKED);
3147 				}
3148 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3149 				/* it has been re-acked in this SACK */
3150 				tp1->sent = SCTP_DATAGRAM_ACKED;
3151 			}
3152 		}
3153 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3154 			break;
3155 	}
3156 }
3157 
3158 
3159 static void
3160 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3161     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3162 {
3163 	struct sctp_tmit_chunk *tp1;
3164 	int strike_flag = 0;
3165 	struct timeval now;
3166 	int tot_retrans = 0;
3167 	uint32_t sending_seq;
3168 	struct sctp_nets *net;
3169 	int num_dests_sacked = 0;
3170 
3171 	/*
3172 	 * select the sending_seq, this is either the next thing ready to be
3173 	 * sent but not transmitted, OR, the next seq we assign.
3174 	 */
3175 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3176 	if (tp1 == NULL) {
3177 		sending_seq = asoc->sending_seq;
3178 	} else {
3179 		sending_seq = tp1->rec.data.tsn;
3180 	}
3181 
3182 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3183 	if ((asoc->sctp_cmt_on_off > 0) &&
3184 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3185 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3186 			if (net->saw_newack)
3187 				num_dests_sacked++;
3188 		}
3189 	}
3190 	if (stcb->asoc.prsctp_supported) {
3191 		(void)SCTP_GETTIME_TIMEVAL(&now);
3192 	}
3193 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3194 		strike_flag = 0;
3195 		if (tp1->no_fr_allowed) {
3196 			/* this one had a timeout or something */
3197 			continue;
3198 		}
3199 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3200 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3201 				sctp_log_fr(biggest_tsn_newly_acked,
3202 				    tp1->rec.data.tsn,
3203 				    tp1->sent,
3204 				    SCTP_FR_LOG_CHECK_STRIKE);
3205 		}
3206 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3207 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3208 			/* done */
3209 			break;
3210 		}
3211 		if (stcb->asoc.prsctp_supported) {
3212 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3213 				/* Is it expired? */
3214 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3215 					/* Yes so drop it */
3216 					if (tp1->data != NULL) {
3217 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3218 						    SCTP_SO_NOT_LOCKED);
3219 					}
3220 					continue;
3221 				}
3222 			}
3223 		}
3224 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3225 			/* we are beyond the tsn in the sack  */
3226 			break;
3227 		}
3228 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3229 			/* either a RESEND, ACKED, or MARKED */
3230 			/* skip */
3231 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3232 				/* Continue strikin FWD-TSN chunks */
3233 				tp1->rec.data.fwd_tsn_cnt++;
3234 			}
3235 			continue;
3236 		}
3237 		/*
3238 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3239 		 */
3240 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3241 			/*
3242 			 * No new acks were receieved for data sent to this
3243 			 * dest. Therefore, according to the SFR algo for
3244 			 * CMT, no data sent to this dest can be marked for
3245 			 * FR using this SACK.
3246 			 */
3247 			continue;
3248 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3249 		    tp1->whoTo->this_sack_highest_newack)) {
3250 			/*
3251 			 * CMT: New acks were receieved for data sent to
3252 			 * this dest. But no new acks were seen for data
3253 			 * sent after tp1. Therefore, according to the SFR
3254 			 * algo for CMT, tp1 cannot be marked for FR using
3255 			 * this SACK. This step covers part of the DAC algo
3256 			 * and the HTNA algo as well.
3257 			 */
3258 			continue;
3259 		}
3260 		/*
3261 		 * Here we check to see if we were have already done a FR
3262 		 * and if so we see if the biggest TSN we saw in the sack is
3263 		 * smaller than the recovery point. If so we don't strike
3264 		 * the tsn... otherwise we CAN strike the TSN.
3265 		 */
3266 		/*
3267 		 * @@@ JRI: Check for CMT if (accum_moved &&
3268 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3269 		 * 0)) {
3270 		 */
3271 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3272 			/*
3273 			 * Strike the TSN if in fast-recovery and cum-ack
3274 			 * moved.
3275 			 */
3276 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3277 				sctp_log_fr(biggest_tsn_newly_acked,
3278 				    tp1->rec.data.tsn,
3279 				    tp1->sent,
3280 				    SCTP_FR_LOG_STRIKE_CHUNK);
3281 			}
3282 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3283 				tp1->sent++;
3284 			}
3285 			if ((asoc->sctp_cmt_on_off > 0) &&
3286 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3287 				/*
3288 				 * CMT DAC algorithm: If SACK flag is set to
3289 				 * 0, then lowest_newack test will not pass
3290 				 * because it would have been set to the
3291 				 * cumack earlier. If not already to be
3292 				 * rtx'd, If not a mixed sack and if tp1 is
3293 				 * not between two sacked TSNs, then mark by
3294 				 * one more. NOTE that we are marking by one
3295 				 * additional time since the SACK DAC flag
3296 				 * indicates that two packets have been
3297 				 * received after this missing TSN.
3298 				 */
3299 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3300 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3301 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3302 						sctp_log_fr(16 + num_dests_sacked,
3303 						    tp1->rec.data.tsn,
3304 						    tp1->sent,
3305 						    SCTP_FR_LOG_STRIKE_CHUNK);
3306 					}
3307 					tp1->sent++;
3308 				}
3309 			}
3310 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3311 		    (asoc->sctp_cmt_on_off == 0)) {
3312 			/*
3313 			 * For those that have done a FR we must take
3314 			 * special consideration if we strike. I.e the
3315 			 * biggest_newly_acked must be higher than the
3316 			 * sending_seq at the time we did the FR.
3317 			 */
3318 			if (
3319 #ifdef SCTP_FR_TO_ALTERNATE
3320 			/*
3321 			 * If FR's go to new networks, then we must only do
3322 			 * this for singly homed asoc's. However if the FR's
3323 			 * go to the same network (Armando's work) then its
3324 			 * ok to FR multiple times.
3325 			 */
3326 			    (asoc->numnets < 2)
3327 #else
3328 			    (1)
3329 #endif
3330 			    ) {
3331 
3332 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3333 				    tp1->rec.data.fast_retran_tsn)) {
3334 					/*
3335 					 * Strike the TSN, since this ack is
3336 					 * beyond where things were when we
3337 					 * did a FR.
3338 					 */
3339 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3340 						sctp_log_fr(biggest_tsn_newly_acked,
3341 						    tp1->rec.data.tsn,
3342 						    tp1->sent,
3343 						    SCTP_FR_LOG_STRIKE_CHUNK);
3344 					}
3345 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3346 						tp1->sent++;
3347 					}
3348 					strike_flag = 1;
3349 					if ((asoc->sctp_cmt_on_off > 0) &&
3350 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3351 						/*
3352 						 * CMT DAC algorithm: If
3353 						 * SACK flag is set to 0,
3354 						 * then lowest_newack test
3355 						 * will not pass because it
3356 						 * would have been set to
3357 						 * the cumack earlier. If
3358 						 * not already to be rtx'd,
3359 						 * If not a mixed sack and
3360 						 * if tp1 is not between two
3361 						 * sacked TSNs, then mark by
3362 						 * one more. NOTE that we
3363 						 * are marking by one
3364 						 * additional time since the
3365 						 * SACK DAC flag indicates
3366 						 * that two packets have
3367 						 * been received after this
3368 						 * missing TSN.
3369 						 */
3370 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3371 						    (num_dests_sacked == 1) &&
3372 						    SCTP_TSN_GT(this_sack_lowest_newack,
3373 						    tp1->rec.data.tsn)) {
3374 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3375 								sctp_log_fr(32 + num_dests_sacked,
3376 								    tp1->rec.data.tsn,
3377 								    tp1->sent,
3378 								    SCTP_FR_LOG_STRIKE_CHUNK);
3379 							}
3380 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3381 								tp1->sent++;
3382 							}
3383 						}
3384 					}
3385 				}
3386 			}
3387 			/*
3388 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3389 			 * algo covers HTNA.
3390 			 */
3391 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3392 		    biggest_tsn_newly_acked)) {
3393 			/*
3394 			 * We don't strike these: This is the  HTNA
3395 			 * algorithm i.e. we don't strike If our TSN is
3396 			 * larger than the Highest TSN Newly Acked.
3397 			 */
3398 			;
3399 		} else {
3400 			/* Strike the TSN */
3401 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3402 				sctp_log_fr(biggest_tsn_newly_acked,
3403 				    tp1->rec.data.tsn,
3404 				    tp1->sent,
3405 				    SCTP_FR_LOG_STRIKE_CHUNK);
3406 			}
3407 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3408 				tp1->sent++;
3409 			}
3410 			if ((asoc->sctp_cmt_on_off > 0) &&
3411 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3412 				/*
3413 				 * CMT DAC algorithm: If SACK flag is set to
3414 				 * 0, then lowest_newack test will not pass
3415 				 * because it would have been set to the
3416 				 * cumack earlier. If not already to be
3417 				 * rtx'd, If not a mixed sack and if tp1 is
3418 				 * not between two sacked TSNs, then mark by
3419 				 * one more. NOTE that we are marking by one
3420 				 * additional time since the SACK DAC flag
3421 				 * indicates that two packets have been
3422 				 * received after this missing TSN.
3423 				 */
3424 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3425 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3426 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3427 						sctp_log_fr(48 + num_dests_sacked,
3428 						    tp1->rec.data.tsn,
3429 						    tp1->sent,
3430 						    SCTP_FR_LOG_STRIKE_CHUNK);
3431 					}
3432 					tp1->sent++;
3433 				}
3434 			}
3435 		}
3436 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3437 			struct sctp_nets *alt;
3438 
3439 			/* fix counts and things */
3440 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3441 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3442 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3443 				    tp1->book_size,
3444 				    (uint32_t) (uintptr_t) tp1->whoTo,
3445 				    tp1->rec.data.tsn);
3446 			}
3447 			if (tp1->whoTo) {
3448 				tp1->whoTo->net_ack++;
3449 				sctp_flight_size_decrease(tp1);
3450 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3451 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3452 					    tp1);
3453 				}
3454 			}
3455 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3456 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3457 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3458 			}
3459 			/* add back to the rwnd */
3460 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3461 
3462 			/* remove from the total flight */
3463 			sctp_total_flight_decrease(stcb, tp1);
3464 
3465 			if ((stcb->asoc.prsctp_supported) &&
3466 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3467 				/* Has it been retransmitted tv_sec times? -
3468 				 * we store the retran count there. */
3469 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3470 					/* Yes, so drop it */
3471 					if (tp1->data != NULL) {
3472 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3473 						    SCTP_SO_NOT_LOCKED);
3474 					}
3475 					/* Make sure to flag we had a FR */
3476 					tp1->whoTo->net_ack++;
3477 					continue;
3478 				}
3479 			}
3480 			/* SCTP_PRINTF("OK, we are now ready to FR this
3481 			 * guy\n"); */
3482 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3483 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3484 				    0, SCTP_FR_MARKED);
3485 			}
3486 			if (strike_flag) {
3487 				/* This is a subsequent FR */
3488 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3489 			}
3490 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3491 			if (asoc->sctp_cmt_on_off > 0) {
3492 				/*
3493 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3494 				 * If CMT is being used, then pick dest with
3495 				 * largest ssthresh for any retransmission.
3496 				 */
3497 				tp1->no_fr_allowed = 1;
3498 				alt = tp1->whoTo;
3499 				/* sa_ignore NO_NULL_CHK */
3500 				if (asoc->sctp_cmt_pf > 0) {
3501 					/* JRS 5/18/07 - If CMT PF is on,
3502 					 * use the PF version of
3503 					 * find_alt_net() */
3504 					alt = sctp_find_alternate_net(stcb, alt, 2);
3505 				} else {
3506 					/* JRS 5/18/07 - If only CMT is on,
3507 					 * use the CMT version of
3508 					 * find_alt_net() */
3509 					/* sa_ignore NO_NULL_CHK */
3510 					alt = sctp_find_alternate_net(stcb, alt, 1);
3511 				}
3512 				if (alt == NULL) {
3513 					alt = tp1->whoTo;
3514 				}
3515 				/*
3516 				 * CUCv2: If a different dest is picked for
3517 				 * the retransmission, then new
3518 				 * (rtx-)pseudo_cumack needs to be tracked
3519 				 * for orig dest. Let CUCv2 track new (rtx-)
3520 				 * pseudo-cumack always.
3521 				 */
3522 				if (tp1->whoTo) {
3523 					tp1->whoTo->find_pseudo_cumack = 1;
3524 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3525 				}
3526 			} else {/* CMT is OFF */
3527 
3528 #ifdef SCTP_FR_TO_ALTERNATE
3529 				/* Can we find an alternate? */
3530 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3531 #else
3532 				/*
3533 				 * default behavior is to NOT retransmit
3534 				 * FR's to an alternate. Armando Caro's
3535 				 * paper details why.
3536 				 */
3537 				alt = tp1->whoTo;
3538 #endif
3539 			}
3540 
3541 			tp1->rec.data.doing_fast_retransmit = 1;
3542 			tot_retrans++;
3543 			/* mark the sending seq for possible subsequent FR's */
3544 			/*
3545 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3546 			 * (uint32_t)tpi->rec.data.tsn);
3547 			 */
3548 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3549 				/*
3550 				 * If the queue of send is empty then its
3551 				 * the next sequence number that will be
3552 				 * assigned so we subtract one from this to
3553 				 * get the one we last sent.
3554 				 */
3555 				tp1->rec.data.fast_retran_tsn = sending_seq;
3556 			} else {
3557 				/*
3558 				 * If there are chunks on the send queue
3559 				 * (unsent data that has made it from the
3560 				 * stream queues but not out the door, we
3561 				 * take the first one (which will have the
3562 				 * lowest TSN) and subtract one to get the
3563 				 * one we last sent.
3564 				 */
3565 				struct sctp_tmit_chunk *ttt;
3566 
3567 				ttt = TAILQ_FIRST(&asoc->send_queue);
3568 				tp1->rec.data.fast_retran_tsn =
3569 				    ttt->rec.data.tsn;
3570 			}
3571 
3572 			if (tp1->do_rtt) {
3573 				/*
3574 				 * this guy had a RTO calculation pending on
3575 				 * it, cancel it
3576 				 */
3577 				if ((tp1->whoTo != NULL) &&
3578 				    (tp1->whoTo->rto_needed == 0)) {
3579 					tp1->whoTo->rto_needed = 1;
3580 				}
3581 				tp1->do_rtt = 0;
3582 			}
3583 			if (alt != tp1->whoTo) {
3584 				/* yes, there is an alternate. */
3585 				sctp_free_remote_addr(tp1->whoTo);
3586 				/* sa_ignore FREED_MEMORY */
3587 				tp1->whoTo = alt;
3588 				atomic_add_int(&alt->ref_count, 1);
3589 			}
3590 		}
3591 	}
3592 }
3593 
3594 struct sctp_tmit_chunk *
3595 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3596     struct sctp_association *asoc)
3597 {
3598 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3599 	struct timeval now;
3600 	int now_filled = 0;
3601 
3602 	if (asoc->prsctp_supported == 0) {
3603 		return (NULL);
3604 	}
3605 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3606 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3607 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3608 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3609 			/* no chance to advance, out of here */
3610 			break;
3611 		}
3612 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3613 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3614 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3615 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3616 				    asoc->advanced_peer_ack_point,
3617 				    tp1->rec.data.tsn, 0, 0);
3618 			}
3619 		}
3620 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3621 			/*
3622 			 * We can't fwd-tsn past any that are reliable aka
3623 			 * retransmitted until the asoc fails.
3624 			 */
3625 			break;
3626 		}
3627 		if (!now_filled) {
3628 			(void)SCTP_GETTIME_TIMEVAL(&now);
3629 			now_filled = 1;
3630 		}
3631 		/*
3632 		 * now we got a chunk which is marked for another
3633 		 * retransmission to a PR-stream but has run out its chances
3634 		 * already maybe OR has been marked to skip now. Can we skip
3635 		 * it if its a resend?
3636 		 */
3637 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3638 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3639 			/*
3640 			 * Now is this one marked for resend and its time is
3641 			 * now up?
3642 			 */
3643 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3644 				/* Yes so drop it */
3645 				if (tp1->data) {
3646 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3647 					    1, SCTP_SO_NOT_LOCKED);
3648 				}
3649 			} else {
3650 				/*
3651 				 * No, we are done when hit one for resend
3652 				 * whos time as not expired.
3653 				 */
3654 				break;
3655 			}
3656 		}
3657 		/*
3658 		 * Ok now if this chunk is marked to drop it we can clean up
3659 		 * the chunk, advance our peer ack point and we can check
3660 		 * the next chunk.
3661 		 */
3662 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3663 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3664 			/* advance PeerAckPoint goes forward */
3665 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3666 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3667 				a_adv = tp1;
3668 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3669 				/* No update but we do save the chk */
3670 				a_adv = tp1;
3671 			}
3672 		} else {
3673 			/*
3674 			 * If it is still in RESEND we can advance no
3675 			 * further
3676 			 */
3677 			break;
3678 		}
3679 	}
3680 	return (a_adv);
3681 }
3682 
3683 static int
3684 sctp_fs_audit(struct sctp_association *asoc)
3685 {
3686 	struct sctp_tmit_chunk *chk;
3687 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3688 	int ret;
3689 #ifndef INVARIANTS
3690 	int entry_flight, entry_cnt;
3691 #endif
3692 
3693 	ret = 0;
3694 #ifndef INVARIANTS
3695 	entry_flight = asoc->total_flight;
3696 	entry_cnt = asoc->total_flight_count;
3697 #endif
3698 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3699 		return (0);
3700 
3701 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3702 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3703 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3704 			    chk->rec.data.tsn,
3705 			    chk->send_size,
3706 			    chk->snd_count);
3707 			inflight++;
3708 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3709 			resend++;
3710 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3711 			inbetween++;
3712 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3713 			above++;
3714 		} else {
3715 			acked++;
3716 		}
3717 	}
3718 
3719 	if ((inflight > 0) || (inbetween > 0)) {
3720 #ifdef INVARIANTS
3721 		panic("Flight size-express incorrect? \n");
3722 #else
3723 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3724 		    entry_flight, entry_cnt);
3725 
3726 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3727 		    inflight, inbetween, resend, above, acked);
3728 		ret = 1;
3729 #endif
3730 	}
3731 	return (ret);
3732 }
3733 
3734 
3735 static void
3736 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3737     struct sctp_association *asoc,
3738     struct sctp_tmit_chunk *tp1)
3739 {
3740 	tp1->window_probe = 0;
3741 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3742 		/* TSN's skipped we do NOT move back. */
3743 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3744 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3745 		    tp1->book_size,
3746 		    (uint32_t) (uintptr_t) tp1->whoTo,
3747 		    tp1->rec.data.tsn);
3748 		return;
3749 	}
3750 	/* First setup this by shrinking flight */
3751 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3752 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3753 		    tp1);
3754 	}
3755 	sctp_flight_size_decrease(tp1);
3756 	sctp_total_flight_decrease(stcb, tp1);
3757 	/* Now mark for resend */
3758 	tp1->sent = SCTP_DATAGRAM_RESEND;
3759 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3760 
3761 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3762 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3763 		    tp1->whoTo->flight_size,
3764 		    tp1->book_size,
3765 		    (uint32_t) (uintptr_t) tp1->whoTo,
3766 		    tp1->rec.data.tsn);
3767 	}
3768 }
3769 
3770 void
3771 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3772     uint32_t rwnd, int *abort_now, int ecne_seen)
3773 {
3774 	struct sctp_nets *net;
3775 	struct sctp_association *asoc;
3776 	struct sctp_tmit_chunk *tp1, *tp2;
3777 	uint32_t old_rwnd;
3778 	int win_probe_recovery = 0;
3779 	int win_probe_recovered = 0;
3780 	int j, done_once = 0;
3781 	int rto_ok = 1;
3782 	uint32_t send_s;
3783 
3784 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3785 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3786 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3787 	}
3788 	SCTP_TCB_LOCK_ASSERT(stcb);
3789 #ifdef SCTP_ASOCLOG_OF_TSNS
3790 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3791 	stcb->asoc.cumack_log_at++;
3792 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3793 		stcb->asoc.cumack_log_at = 0;
3794 	}
3795 #endif
3796 	asoc = &stcb->asoc;
3797 	old_rwnd = asoc->peers_rwnd;
3798 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3799 		/* old ack */
3800 		return;
3801 	} else if (asoc->last_acked_seq == cumack) {
3802 		/* Window update sack */
3803 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3804 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3805 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3806 			/* SWS sender side engages */
3807 			asoc->peers_rwnd = 0;
3808 		}
3809 		if (asoc->peers_rwnd > old_rwnd) {
3810 			goto again;
3811 		}
3812 		return;
3813 	}
3814 	/* First setup for CC stuff */
3815 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3816 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3817 			/* Drag along the window_tsn for cwr's */
3818 			net->cwr_window_tsn = cumack;
3819 		}
3820 		net->prev_cwnd = net->cwnd;
3821 		net->net_ack = 0;
3822 		net->net_ack2 = 0;
3823 
3824 		/*
3825 		 * CMT: Reset CUC and Fast recovery algo variables before
3826 		 * SACK processing
3827 		 */
3828 		net->new_pseudo_cumack = 0;
3829 		net->will_exit_fast_recovery = 0;
3830 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3831 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3832 		}
3833 	}
3834 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3835 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3836 		    sctpchunk_listhead);
3837 		send_s = tp1->rec.data.tsn + 1;
3838 	} else {
3839 		send_s = asoc->sending_seq;
3840 	}
3841 	if (SCTP_TSN_GE(cumack, send_s)) {
3842 		struct mbuf *op_err;
3843 		char msg[SCTP_DIAG_INFO_LEN];
3844 
3845 		*abort_now = 1;
3846 		/* XXX */
3847 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3848 		    cumack, send_s);
3849 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3850 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3851 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3852 		return;
3853 	}
3854 	asoc->this_sack_highest_gap = cumack;
3855 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3856 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3857 		    stcb->asoc.overall_error_count,
3858 		    0,
3859 		    SCTP_FROM_SCTP_INDATA,
3860 		    __LINE__);
3861 	}
3862 	stcb->asoc.overall_error_count = 0;
3863 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3864 		/* process the new consecutive TSN first */
3865 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3866 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3867 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3868 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3869 				}
3870 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3871 					/*
3872 					 * If it is less than ACKED, it is
3873 					 * now no-longer in flight. Higher
3874 					 * values may occur during marking
3875 					 */
3876 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3877 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3878 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3879 							    tp1->whoTo->flight_size,
3880 							    tp1->book_size,
3881 							    (uint32_t) (uintptr_t) tp1->whoTo,
3882 							    tp1->rec.data.tsn);
3883 						}
3884 						sctp_flight_size_decrease(tp1);
3885 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3886 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3887 							    tp1);
3888 						}
3889 						/* sa_ignore NO_NULL_CHK */
3890 						sctp_total_flight_decrease(stcb, tp1);
3891 					}
3892 					tp1->whoTo->net_ack += tp1->send_size;
3893 					if (tp1->snd_count < 2) {
3894 						/*
3895 						 * True non-retransmited
3896 						 * chunk
3897 						 */
3898 						tp1->whoTo->net_ack2 +=
3899 						    tp1->send_size;
3900 
3901 						/* update RTO too? */
3902 						if (tp1->do_rtt) {
3903 							if (rto_ok) {
3904 								tp1->whoTo->RTO =
3905 								/*
3906 								 * sa_ignore
3907 								 * NO_NULL_CHK
3908 								 */
3909 								    sctp_calculate_rto(stcb,
3910 								    asoc, tp1->whoTo,
3911 								    &tp1->sent_rcv_time,
3912 								    sctp_align_safe_nocopy,
3913 								    SCTP_RTT_FROM_DATA);
3914 								rto_ok = 0;
3915 							}
3916 							if (tp1->whoTo->rto_needed == 0) {
3917 								tp1->whoTo->rto_needed = 1;
3918 							}
3919 							tp1->do_rtt = 0;
3920 						}
3921 					}
3922 					/*
3923 					 * CMT: CUCv2 algorithm. From the
3924 					 * cumack'd TSNs, for each TSN being
3925 					 * acked for the first time, set the
3926 					 * following variables for the
3927 					 * corresp destination.
3928 					 * new_pseudo_cumack will trigger a
3929 					 * cwnd update.
3930 					 * find_(rtx_)pseudo_cumack will
3931 					 * trigger search for the next
3932 					 * expected (rtx-)pseudo-cumack.
3933 					 */
3934 					tp1->whoTo->new_pseudo_cumack = 1;
3935 					tp1->whoTo->find_pseudo_cumack = 1;
3936 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3937 
3938 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3939 						/* sa_ignore NO_NULL_CHK */
3940 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3941 					}
3942 				}
3943 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3944 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3945 				}
3946 				if (tp1->rec.data.chunk_was_revoked) {
3947 					/* deflate the cwnd */
3948 					tp1->whoTo->cwnd -= tp1->book_size;
3949 					tp1->rec.data.chunk_was_revoked = 0;
3950 				}
3951 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3952 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3953 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
3954 #ifdef INVARIANTS
3955 					} else {
3956 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3957 #endif
3958 					}
3959 				}
3960 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3961 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3962 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
3963 					asoc->trigger_reset = 1;
3964 				}
3965 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3966 				if (tp1->data) {
3967 					/* sa_ignore NO_NULL_CHK */
3968 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3969 					sctp_m_freem(tp1->data);
3970 					tp1->data = NULL;
3971 				}
3972 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3973 					sctp_log_sack(asoc->last_acked_seq,
3974 					    cumack,
3975 					    tp1->rec.data.tsn,
3976 					    0,
3977 					    0,
3978 					    SCTP_LOG_FREE_SENT);
3979 				}
3980 				asoc->sent_queue_cnt--;
3981 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3982 			} else {
3983 				break;
3984 			}
3985 		}
3986 
3987 	}
3988 	/* sa_ignore NO_NULL_CHK */
3989 	if (stcb->sctp_socket) {
3990 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 		struct socket *so;
3992 
3993 #endif
3994 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3995 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3996 			/* sa_ignore NO_NULL_CHK */
3997 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3998 		}
3999 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4000 		so = SCTP_INP_SO(stcb->sctp_ep);
4001 		atomic_add_int(&stcb->asoc.refcnt, 1);
4002 		SCTP_TCB_UNLOCK(stcb);
4003 		SCTP_SOCKET_LOCK(so, 1);
4004 		SCTP_TCB_LOCK(stcb);
4005 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4006 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4007 			/* assoc was freed while we were unlocked */
4008 			SCTP_SOCKET_UNLOCK(so, 1);
4009 			return;
4010 		}
4011 #endif
4012 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4013 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 		SCTP_SOCKET_UNLOCK(so, 1);
4015 #endif
4016 	} else {
4017 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4018 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4019 		}
4020 	}
4021 
4022 	/* JRS - Use the congestion control given in the CC module */
4023 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4024 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4025 			if (net->net_ack2 > 0) {
4026 				/*
4027 				 * Karn's rule applies to clearing error
4028 				 * count, this is optional.
4029 				 */
4030 				net->error_count = 0;
4031 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4032 					/* addr came good */
4033 					net->dest_state |= SCTP_ADDR_REACHABLE;
4034 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4035 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4036 				}
4037 				if (net == stcb->asoc.primary_destination) {
4038 					if (stcb->asoc.alternate) {
4039 						/* release the alternate,
4040 						 * primary is good */
4041 						sctp_free_remote_addr(stcb->asoc.alternate);
4042 						stcb->asoc.alternate = NULL;
4043 					}
4044 				}
4045 				if (net->dest_state & SCTP_ADDR_PF) {
4046 					net->dest_state &= ~SCTP_ADDR_PF;
4047 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4048 					    stcb->sctp_ep, stcb, net,
4049 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4050 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4051 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4052 					/* Done with this net */
4053 					net->net_ack = 0;
4054 				}
4055 				/* restore any doubled timers */
4056 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4057 				if (net->RTO < stcb->asoc.minrto) {
4058 					net->RTO = stcb->asoc.minrto;
4059 				}
4060 				if (net->RTO > stcb->asoc.maxrto) {
4061 					net->RTO = stcb->asoc.maxrto;
4062 				}
4063 			}
4064 		}
4065 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4066 	}
4067 	asoc->last_acked_seq = cumack;
4068 
4069 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4070 		/* nothing left in-flight */
4071 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4072 			net->flight_size = 0;
4073 			net->partial_bytes_acked = 0;
4074 		}
4075 		asoc->total_flight = 0;
4076 		asoc->total_flight_count = 0;
4077 	}
4078 	/* RWND update */
4079 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4080 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4081 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4082 		/* SWS sender side engages */
4083 		asoc->peers_rwnd = 0;
4084 	}
4085 	if (asoc->peers_rwnd > old_rwnd) {
4086 		win_probe_recovery = 1;
4087 	}
4088 	/* Now assure a timer where data is queued at */
4089 again:
4090 	j = 0;
4091 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4092 		int to_ticks;
4093 
4094 		if (win_probe_recovery && (net->window_probe)) {
4095 			win_probe_recovered = 1;
4096 			/*
4097 			 * Find first chunk that was used with window probe
4098 			 * and clear the sent
4099 			 */
4100 			/* sa_ignore FREED_MEMORY */
4101 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4102 				if (tp1->window_probe) {
4103 					/* move back to data send queue */
4104 					sctp_window_probe_recovery(stcb, asoc, tp1);
4105 					break;
4106 				}
4107 			}
4108 		}
4109 		if (net->RTO == 0) {
4110 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4111 		} else {
4112 			to_ticks = MSEC_TO_TICKS(net->RTO);
4113 		}
4114 		if (net->flight_size) {
4115 			j++;
4116 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4117 			    sctp_timeout_handler, &net->rxt_timer);
4118 			if (net->window_probe) {
4119 				net->window_probe = 0;
4120 			}
4121 		} else {
4122 			if (net->window_probe) {
4123 				/* In window probes we must assure a timer
4124 				 * is still running there */
4125 				net->window_probe = 0;
4126 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4127 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4128 					    sctp_timeout_handler, &net->rxt_timer);
4129 				}
4130 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4131 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4132 				    stcb, net,
4133 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4134 			}
4135 		}
4136 	}
4137 	if ((j == 0) &&
4138 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4139 	    (asoc->sent_queue_retran_cnt == 0) &&
4140 	    (win_probe_recovered == 0) &&
4141 	    (done_once == 0)) {
4142 		/*
4143 		 * huh, this should not happen unless all packets are
4144 		 * PR-SCTP and marked to skip of course.
4145 		 */
4146 		if (sctp_fs_audit(asoc)) {
4147 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4148 				net->flight_size = 0;
4149 			}
4150 			asoc->total_flight = 0;
4151 			asoc->total_flight_count = 0;
4152 			asoc->sent_queue_retran_cnt = 0;
4153 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4154 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4155 					sctp_flight_size_increase(tp1);
4156 					sctp_total_flight_increase(stcb, tp1);
4157 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4158 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4159 				}
4160 			}
4161 		}
4162 		done_once = 1;
4163 		goto again;
4164 	}
4165 	/**********************************/
4166 	/* Now what about shutdown issues */
4167 	/**********************************/
4168 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4169 		/* nothing left on sendqueue.. consider done */
4170 		/* clean up */
4171 		if ((asoc->stream_queue_cnt == 1) &&
4172 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4173 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4174 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4175 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4176 		}
4177 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4178 		    (asoc->stream_queue_cnt == 0)) {
4179 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4180 				/* Need to abort here */
4181 				struct mbuf *op_err;
4182 
4183 		abort_out_now:
4184 				*abort_now = 1;
4185 				/* XXX */
4186 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4187 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4188 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4189 				return;
4190 			} else {
4191 				struct sctp_nets *netp;
4192 
4193 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4194 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4195 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4196 				}
4197 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4198 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4199 				sctp_stop_timers_for_shutdown(stcb);
4200 				if (asoc->alternate) {
4201 					netp = asoc->alternate;
4202 				} else {
4203 					netp = asoc->primary_destination;
4204 				}
4205 				sctp_send_shutdown(stcb, netp);
4206 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4207 				    stcb->sctp_ep, stcb, netp);
4208 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4209 				    stcb->sctp_ep, stcb, netp);
4210 			}
4211 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4212 		    (asoc->stream_queue_cnt == 0)) {
4213 			struct sctp_nets *netp;
4214 
4215 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4216 				goto abort_out_now;
4217 			}
4218 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4219 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4220 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4221 			sctp_stop_timers_for_shutdown(stcb);
4222 			if (asoc->alternate) {
4223 				netp = asoc->alternate;
4224 			} else {
4225 				netp = asoc->primary_destination;
4226 			}
4227 			sctp_send_shutdown_ack(stcb, netp);
4228 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4229 			    stcb->sctp_ep, stcb, netp);
4230 		}
4231 	}
4232 	/*********************************************/
4233 	/* Here we perform PR-SCTP procedures        */
4234 	/* (section 4.2)                             */
4235 	/*********************************************/
4236 	/* C1. update advancedPeerAckPoint */
4237 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4238 		asoc->advanced_peer_ack_point = cumack;
4239 	}
4240 	/* PR-Sctp issues need to be addressed too */
4241 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4242 		struct sctp_tmit_chunk *lchk;
4243 		uint32_t old_adv_peer_ack_point;
4244 
4245 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4246 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4247 		/* C3. See if we need to send a Fwd-TSN */
4248 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4249 			/*
4250 			 * ISSUE with ECN, see FWD-TSN processing.
4251 			 */
4252 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4253 				send_forward_tsn(stcb, asoc);
4254 			} else if (lchk) {
4255 				/* try to FR fwd-tsn's that get lost too */
4256 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4257 					send_forward_tsn(stcb, asoc);
4258 				}
4259 			}
4260 		}
4261 		if (lchk) {
4262 			/* Assure a timer is up */
4263 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4264 			    stcb->sctp_ep, stcb, lchk->whoTo);
4265 		}
4266 	}
4267 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4268 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4269 		    rwnd,
4270 		    stcb->asoc.peers_rwnd,
4271 		    stcb->asoc.total_flight,
4272 		    stcb->asoc.total_output_queue_size);
4273 	}
4274 }
4275 
4276 void
4277 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4278     struct sctp_tcb *stcb,
4279     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4280     int *abort_now, uint8_t flags,
4281     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4282 {
4283 	struct sctp_association *asoc;
4284 	struct sctp_tmit_chunk *tp1, *tp2;
4285 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4286 	uint16_t wake_him = 0;
4287 	uint32_t send_s = 0;
4288 	long j;
4289 	int accum_moved = 0;
4290 	int will_exit_fast_recovery = 0;
4291 	uint32_t a_rwnd, old_rwnd;
4292 	int win_probe_recovery = 0;
4293 	int win_probe_recovered = 0;
4294 	struct sctp_nets *net = NULL;
4295 	int done_once;
4296 	int rto_ok = 1;
4297 	uint8_t reneged_all = 0;
4298 	uint8_t cmt_dac_flag;
4299 
4300 	/*
4301 	 * we take any chance we can to service our queues since we cannot
4302 	 * get awoken when the socket is read from :<
4303 	 */
4304 	/*
4305 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4306 	 * old sack, if so discard. 2) If there is nothing left in the send
4307 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4308 	 * too, update any rwnd change and verify no timers are running.
4309 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4310 	 * moved process these first and note that it moved. 4) Process any
4311 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4312 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4313 	 * sync up flightsizes and things, stop all timers and also check
4314 	 * for shutdown_pending state. If so then go ahead and send off the
4315 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4316 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4317 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4318 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4319 	 * if in shutdown_recv state.
4320 	 */
4321 	SCTP_TCB_LOCK_ASSERT(stcb);
4322 	/* CMT DAC algo */
4323 	this_sack_lowest_newack = 0;
4324 	SCTP_STAT_INCR(sctps_slowpath_sack);
4325 	last_tsn = cum_ack;
4326 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4327 #ifdef SCTP_ASOCLOG_OF_TSNS
4328 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4329 	stcb->asoc.cumack_log_at++;
4330 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4331 		stcb->asoc.cumack_log_at = 0;
4332 	}
4333 #endif
4334 	a_rwnd = rwnd;
4335 
4336 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4337 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4338 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4339 	}
4340 	old_rwnd = stcb->asoc.peers_rwnd;
4341 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4342 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4343 		    stcb->asoc.overall_error_count,
4344 		    0,
4345 		    SCTP_FROM_SCTP_INDATA,
4346 		    __LINE__);
4347 	}
4348 	stcb->asoc.overall_error_count = 0;
4349 	asoc = &stcb->asoc;
4350 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4351 		sctp_log_sack(asoc->last_acked_seq,
4352 		    cum_ack,
4353 		    0,
4354 		    num_seg,
4355 		    num_dup,
4356 		    SCTP_LOG_NEW_SACK);
4357 	}
4358 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4359 		uint16_t i;
4360 		uint32_t *dupdata, dblock;
4361 
4362 		for (i = 0; i < num_dup; i++) {
4363 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4364 			    sizeof(uint32_t), (uint8_t *) & dblock);
4365 			if (dupdata == NULL) {
4366 				break;
4367 			}
4368 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4369 		}
4370 	}
4371 	/* reality check */
4372 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4373 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4374 		    sctpchunk_listhead);
4375 		send_s = tp1->rec.data.tsn + 1;
4376 	} else {
4377 		tp1 = NULL;
4378 		send_s = asoc->sending_seq;
4379 	}
4380 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4381 		struct mbuf *op_err;
4382 		char msg[SCTP_DIAG_INFO_LEN];
4383 
4384 		/*
4385 		 * no way, we have not even sent this TSN out yet. Peer is
4386 		 * hopelessly messed up with us.
4387 		 */
4388 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4389 		    cum_ack, send_s);
4390 		if (tp1) {
4391 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4392 			    tp1->rec.data.tsn, (void *)tp1);
4393 		}
4394 hopeless_peer:
4395 		*abort_now = 1;
4396 		/* XXX */
4397 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4398 		    cum_ack, send_s);
4399 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4400 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4401 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4402 		return;
4403 	}
4404 	/**********************/
4405 	/* 1) check the range */
4406 	/**********************/
4407 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4408 		/* acking something behind */
4409 		return;
4410 	}
4411 	/* update the Rwnd of the peer */
4412 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4413 	    TAILQ_EMPTY(&asoc->send_queue) &&
4414 	    (asoc->stream_queue_cnt == 0)) {
4415 		/* nothing left on send/sent and strmq */
4416 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4417 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4418 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4419 		}
4420 		asoc->peers_rwnd = a_rwnd;
4421 		if (asoc->sent_queue_retran_cnt) {
4422 			asoc->sent_queue_retran_cnt = 0;
4423 		}
4424 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4425 			/* SWS sender side engages */
4426 			asoc->peers_rwnd = 0;
4427 		}
4428 		/* stop any timers */
4429 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4430 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4431 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4432 			net->partial_bytes_acked = 0;
4433 			net->flight_size = 0;
4434 		}
4435 		asoc->total_flight = 0;
4436 		asoc->total_flight_count = 0;
4437 		return;
4438 	}
4439 	/*
4440 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4441 	 * things. The total byte count acked is tracked in netAckSz AND
4442 	 * netAck2 is used to track the total bytes acked that are un-
4443 	 * amibguious and were never retransmitted. We track these on a per
4444 	 * destination address basis.
4445 	 */
4446 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4447 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4448 			/* Drag along the window_tsn for cwr's */
4449 			net->cwr_window_tsn = cum_ack;
4450 		}
4451 		net->prev_cwnd = net->cwnd;
4452 		net->net_ack = 0;
4453 		net->net_ack2 = 0;
4454 
4455 		/*
4456 		 * CMT: Reset CUC and Fast recovery algo variables before
4457 		 * SACK processing
4458 		 */
4459 		net->new_pseudo_cumack = 0;
4460 		net->will_exit_fast_recovery = 0;
4461 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4462 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4463 		}
4464 	}
4465 	/* process the new consecutive TSN first */
4466 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4467 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4468 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4469 				accum_moved = 1;
4470 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4471 					/*
4472 					 * If it is less than ACKED, it is
4473 					 * now no-longer in flight. Higher
4474 					 * values may occur during marking
4475 					 */
4476 					if ((tp1->whoTo->dest_state &
4477 					    SCTP_ADDR_UNCONFIRMED) &&
4478 					    (tp1->snd_count < 2)) {
4479 						/*
4480 						 * If there was no retran
4481 						 * and the address is
4482 						 * un-confirmed and we sent
4483 						 * there and are now
4484 						 * sacked.. its confirmed,
4485 						 * mark it so.
4486 						 */
4487 						tp1->whoTo->dest_state &=
4488 						    ~SCTP_ADDR_UNCONFIRMED;
4489 					}
4490 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4491 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4492 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4493 							    tp1->whoTo->flight_size,
4494 							    tp1->book_size,
4495 							    (uint32_t) (uintptr_t) tp1->whoTo,
4496 							    tp1->rec.data.tsn);
4497 						}
4498 						sctp_flight_size_decrease(tp1);
4499 						sctp_total_flight_decrease(stcb, tp1);
4500 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4501 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4502 							    tp1);
4503 						}
4504 					}
4505 					tp1->whoTo->net_ack += tp1->send_size;
4506 
4507 					/* CMT SFR and DAC algos */
4508 					this_sack_lowest_newack = tp1->rec.data.tsn;
4509 					tp1->whoTo->saw_newack = 1;
4510 
4511 					if (tp1->snd_count < 2) {
4512 						/*
4513 						 * True non-retransmited
4514 						 * chunk
4515 						 */
4516 						tp1->whoTo->net_ack2 +=
4517 						    tp1->send_size;
4518 
4519 						/* update RTO too? */
4520 						if (tp1->do_rtt) {
4521 							if (rto_ok) {
4522 								tp1->whoTo->RTO =
4523 								    sctp_calculate_rto(stcb,
4524 								    asoc, tp1->whoTo,
4525 								    &tp1->sent_rcv_time,
4526 								    sctp_align_safe_nocopy,
4527 								    SCTP_RTT_FROM_DATA);
4528 								rto_ok = 0;
4529 							}
4530 							if (tp1->whoTo->rto_needed == 0) {
4531 								tp1->whoTo->rto_needed = 1;
4532 							}
4533 							tp1->do_rtt = 0;
4534 						}
4535 					}
4536 					/*
4537 					 * CMT: CUCv2 algorithm. From the
4538 					 * cumack'd TSNs, for each TSN being
4539 					 * acked for the first time, set the
4540 					 * following variables for the
4541 					 * corresp destination.
4542 					 * new_pseudo_cumack will trigger a
4543 					 * cwnd update.
4544 					 * find_(rtx_)pseudo_cumack will
4545 					 * trigger search for the next
4546 					 * expected (rtx-)pseudo-cumack.
4547 					 */
4548 					tp1->whoTo->new_pseudo_cumack = 1;
4549 					tp1->whoTo->find_pseudo_cumack = 1;
4550 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4551 
4552 
4553 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4554 						sctp_log_sack(asoc->last_acked_seq,
4555 						    cum_ack,
4556 						    tp1->rec.data.tsn,
4557 						    0,
4558 						    0,
4559 						    SCTP_LOG_TSN_ACKED);
4560 					}
4561 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4562 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4563 					}
4564 				}
4565 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4566 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4567 #ifdef SCTP_AUDITING_ENABLED
4568 					sctp_audit_log(0xB3,
4569 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4570 #endif
4571 				}
4572 				if (tp1->rec.data.chunk_was_revoked) {
4573 					/* deflate the cwnd */
4574 					tp1->whoTo->cwnd -= tp1->book_size;
4575 					tp1->rec.data.chunk_was_revoked = 0;
4576 				}
4577 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4578 					tp1->sent = SCTP_DATAGRAM_ACKED;
4579 				}
4580 			}
4581 		} else {
4582 			break;
4583 		}
4584 	}
4585 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4586 	/* always set this up to cum-ack */
4587 	asoc->this_sack_highest_gap = last_tsn;
4588 
4589 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4590 
4591 		/*
4592 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4593 		 * to be greater than the cumack. Also reset saw_newack to 0
4594 		 * for all dests.
4595 		 */
4596 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4597 			net->saw_newack = 0;
4598 			net->this_sack_highest_newack = last_tsn;
4599 		}
4600 
4601 		/*
4602 		 * thisSackHighestGap will increase while handling NEW
4603 		 * segments this_sack_highest_newack will increase while
4604 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4605 		 * used for CMT DAC algo. saw_newack will also change.
4606 		 */
4607 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4608 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4609 		    num_seg, num_nr_seg, &rto_ok)) {
4610 			wake_him++;
4611 		}
4612 		/*
4613 		 * validate the biggest_tsn_acked in the gap acks if strict
4614 		 * adherence is wanted.
4615 		 */
4616 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4617 			/*
4618 			 * peer is either confused or we are under attack.
4619 			 * We must abort.
4620 			 */
4621 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4622 			    biggest_tsn_acked, send_s);
4623 			goto hopeless_peer;
4624 		}
4625 	}
4626 	/*******************************************/
4627 	/* cancel ALL T3-send timer if accum moved */
4628 	/*******************************************/
4629 	if (asoc->sctp_cmt_on_off > 0) {
4630 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4631 			if (net->new_pseudo_cumack)
4632 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4633 				    stcb, net,
4634 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4635 
4636 		}
4637 	} else {
4638 		if (accum_moved) {
4639 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4640 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4641 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4642 			}
4643 		}
4644 	}
4645 	/********************************************/
4646 	/* drop the acked chunks from the sentqueue */
4647 	/********************************************/
4648 	asoc->last_acked_seq = cum_ack;
4649 
4650 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4651 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4652 			break;
4653 		}
4654 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4655 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4656 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4657 #ifdef INVARIANTS
4658 			} else {
4659 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4660 #endif
4661 			}
4662 		}
4663 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4664 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4665 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4666 			asoc->trigger_reset = 1;
4667 		}
4668 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4669 		if (PR_SCTP_ENABLED(tp1->flags)) {
4670 			if (asoc->pr_sctp_cnt != 0)
4671 				asoc->pr_sctp_cnt--;
4672 		}
4673 		asoc->sent_queue_cnt--;
4674 		if (tp1->data) {
4675 			/* sa_ignore NO_NULL_CHK */
4676 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4677 			sctp_m_freem(tp1->data);
4678 			tp1->data = NULL;
4679 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4680 				asoc->sent_queue_cnt_removeable--;
4681 			}
4682 		}
4683 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4684 			sctp_log_sack(asoc->last_acked_seq,
4685 			    cum_ack,
4686 			    tp1->rec.data.tsn,
4687 			    0,
4688 			    0,
4689 			    SCTP_LOG_FREE_SENT);
4690 		}
4691 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4692 		wake_him++;
4693 	}
4694 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4695 #ifdef INVARIANTS
4696 		panic("Warning flight size is positive and should be 0");
4697 #else
4698 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4699 		    asoc->total_flight);
4700 #endif
4701 		asoc->total_flight = 0;
4702 	}
4703 	/* sa_ignore NO_NULL_CHK */
4704 	if ((wake_him) && (stcb->sctp_socket)) {
4705 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4706 		struct socket *so;
4707 
4708 #endif
4709 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4710 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4711 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4712 		}
4713 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4714 		so = SCTP_INP_SO(stcb->sctp_ep);
4715 		atomic_add_int(&stcb->asoc.refcnt, 1);
4716 		SCTP_TCB_UNLOCK(stcb);
4717 		SCTP_SOCKET_LOCK(so, 1);
4718 		SCTP_TCB_LOCK(stcb);
4719 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4720 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4721 			/* assoc was freed while we were unlocked */
4722 			SCTP_SOCKET_UNLOCK(so, 1);
4723 			return;
4724 		}
4725 #endif
4726 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4727 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4728 		SCTP_SOCKET_UNLOCK(so, 1);
4729 #endif
4730 	} else {
4731 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4732 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4733 		}
4734 	}
4735 
4736 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4737 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4738 			/* Setup so we will exit RFC2582 fast recovery */
4739 			will_exit_fast_recovery = 1;
4740 		}
4741 	}
4742 	/*
4743 	 * Check for revoked fragments:
4744 	 *
4745 	 * if Previous sack - Had no frags then we can't have any revoked if
4746 	 * Previous sack - Had frag's then - If we now have frags aka
4747 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4748 	 * some of them. else - The peer revoked all ACKED fragments, since
4749 	 * we had some before and now we have NONE.
4750 	 */
4751 
4752 	if (num_seg) {
4753 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4754 		asoc->saw_sack_with_frags = 1;
4755 	} else if (asoc->saw_sack_with_frags) {
4756 		int cnt_revoked = 0;
4757 
4758 		/* Peer revoked all dg's marked or acked */
4759 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4760 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4761 				tp1->sent = SCTP_DATAGRAM_SENT;
4762 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4763 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4764 					    tp1->whoTo->flight_size,
4765 					    tp1->book_size,
4766 					    (uint32_t) (uintptr_t) tp1->whoTo,
4767 					    tp1->rec.data.tsn);
4768 				}
4769 				sctp_flight_size_increase(tp1);
4770 				sctp_total_flight_increase(stcb, tp1);
4771 				tp1->rec.data.chunk_was_revoked = 1;
4772 				/*
4773 				 * To ensure that this increase in
4774 				 * flightsize, which is artificial, does not
4775 				 * throttle the sender, we also increase the
4776 				 * cwnd artificially.
4777 				 */
4778 				tp1->whoTo->cwnd += tp1->book_size;
4779 				cnt_revoked++;
4780 			}
4781 		}
4782 		if (cnt_revoked) {
4783 			reneged_all = 1;
4784 		}
4785 		asoc->saw_sack_with_frags = 0;
4786 	}
4787 	if (num_nr_seg > 0)
4788 		asoc->saw_sack_with_nr_frags = 1;
4789 	else
4790 		asoc->saw_sack_with_nr_frags = 0;
4791 
4792 	/* JRS - Use the congestion control given in the CC module */
4793 	if (ecne_seen == 0) {
4794 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4795 			if (net->net_ack2 > 0) {
4796 				/*
4797 				 * Karn's rule applies to clearing error
4798 				 * count, this is optional.
4799 				 */
4800 				net->error_count = 0;
4801 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4802 					/* addr came good */
4803 					net->dest_state |= SCTP_ADDR_REACHABLE;
4804 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4805 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4806 				}
4807 				if (net == stcb->asoc.primary_destination) {
4808 					if (stcb->asoc.alternate) {
4809 						/* release the alternate,
4810 						 * primary is good */
4811 						sctp_free_remote_addr(stcb->asoc.alternate);
4812 						stcb->asoc.alternate = NULL;
4813 					}
4814 				}
4815 				if (net->dest_state & SCTP_ADDR_PF) {
4816 					net->dest_state &= ~SCTP_ADDR_PF;
4817 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4818 					    stcb->sctp_ep, stcb, net,
4819 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4820 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4821 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4822 					/* Done with this net */
4823 					net->net_ack = 0;
4824 				}
4825 				/* restore any doubled timers */
4826 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4827 				if (net->RTO < stcb->asoc.minrto) {
4828 					net->RTO = stcb->asoc.minrto;
4829 				}
4830 				if (net->RTO > stcb->asoc.maxrto) {
4831 					net->RTO = stcb->asoc.maxrto;
4832 				}
4833 			}
4834 		}
4835 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4836 	}
4837 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4838 		/* nothing left in-flight */
4839 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4840 			/* stop all timers */
4841 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4842 			    stcb, net,
4843 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4844 			net->flight_size = 0;
4845 			net->partial_bytes_acked = 0;
4846 		}
4847 		asoc->total_flight = 0;
4848 		asoc->total_flight_count = 0;
4849 	}
4850 	/**********************************/
4851 	/* Now what about shutdown issues */
4852 	/**********************************/
4853 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4854 		/* nothing left on sendqueue.. consider done */
4855 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4856 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4857 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4858 		}
4859 		asoc->peers_rwnd = a_rwnd;
4860 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4861 			/* SWS sender side engages */
4862 			asoc->peers_rwnd = 0;
4863 		}
4864 		/* clean up */
4865 		if ((asoc->stream_queue_cnt == 1) &&
4866 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4867 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4868 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4869 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4870 		}
4871 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4872 		    (asoc->stream_queue_cnt == 0)) {
4873 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4874 				/* Need to abort here */
4875 				struct mbuf *op_err;
4876 
4877 		abort_out_now:
4878 				*abort_now = 1;
4879 				/* XXX */
4880 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4881 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4882 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4883 				return;
4884 			} else {
4885 				struct sctp_nets *netp;
4886 
4887 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4888 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4889 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4890 				}
4891 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4892 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4893 				sctp_stop_timers_for_shutdown(stcb);
4894 				if (asoc->alternate) {
4895 					netp = asoc->alternate;
4896 				} else {
4897 					netp = asoc->primary_destination;
4898 				}
4899 				sctp_send_shutdown(stcb, netp);
4900 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4901 				    stcb->sctp_ep, stcb, netp);
4902 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4903 				    stcb->sctp_ep, stcb, netp);
4904 			}
4905 			return;
4906 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4907 		    (asoc->stream_queue_cnt == 0)) {
4908 			struct sctp_nets *netp;
4909 
4910 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4911 				goto abort_out_now;
4912 			}
4913 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4914 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4915 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4916 			sctp_stop_timers_for_shutdown(stcb);
4917 			if (asoc->alternate) {
4918 				netp = asoc->alternate;
4919 			} else {
4920 				netp = asoc->primary_destination;
4921 			}
4922 			sctp_send_shutdown_ack(stcb, netp);
4923 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4924 			    stcb->sctp_ep, stcb, netp);
4925 			return;
4926 		}
4927 	}
4928 	/*
4929 	 * Now here we are going to recycle net_ack for a different use...
4930 	 * HEADS UP.
4931 	 */
4932 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4933 		net->net_ack = 0;
4934 	}
4935 
4936 	/*
4937 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4938 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4939 	 * automatically ensure that.
4940 	 */
4941 	if ((asoc->sctp_cmt_on_off > 0) &&
4942 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4943 	    (cmt_dac_flag == 0)) {
4944 		this_sack_lowest_newack = cum_ack;
4945 	}
4946 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4947 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4948 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4949 	}
4950 	/* JRS - Use the congestion control given in the CC module */
4951 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4952 
4953 	/* Now are we exiting loss recovery ? */
4954 	if (will_exit_fast_recovery) {
4955 		/* Ok, we must exit fast recovery */
4956 		asoc->fast_retran_loss_recovery = 0;
4957 	}
4958 	if ((asoc->sat_t3_loss_recovery) &&
4959 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4960 		/* end satellite t3 loss recovery */
4961 		asoc->sat_t3_loss_recovery = 0;
4962 	}
4963 	/*
4964 	 * CMT Fast recovery
4965 	 */
4966 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4967 		if (net->will_exit_fast_recovery) {
4968 			/* Ok, we must exit fast recovery */
4969 			net->fast_retran_loss_recovery = 0;
4970 		}
4971 	}
4972 
4973 	/* Adjust and set the new rwnd value */
4974 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4975 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4976 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4977 	}
4978 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4979 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4980 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4981 		/* SWS sender side engages */
4982 		asoc->peers_rwnd = 0;
4983 	}
4984 	if (asoc->peers_rwnd > old_rwnd) {
4985 		win_probe_recovery = 1;
4986 	}
4987 	/*
4988 	 * Now we must setup so we have a timer up for anyone with
4989 	 * outstanding data.
4990 	 */
4991 	done_once = 0;
4992 again:
4993 	j = 0;
4994 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4995 		if (win_probe_recovery && (net->window_probe)) {
4996 			win_probe_recovered = 1;
4997 			/*-
4998 			 * Find first chunk that was used with
4999 			 * window probe and clear the event. Put
5000 			 * it back into the send queue as if has
5001 			 * not been sent.
5002 			 */
5003 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5004 				if (tp1->window_probe) {
5005 					sctp_window_probe_recovery(stcb, asoc, tp1);
5006 					break;
5007 				}
5008 			}
5009 		}
5010 		if (net->flight_size) {
5011 			j++;
5012 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5013 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5014 				    stcb->sctp_ep, stcb, net);
5015 			}
5016 			if (net->window_probe) {
5017 				net->window_probe = 0;
5018 			}
5019 		} else {
5020 			if (net->window_probe) {
5021 				/* In window probes we must assure a timer
5022 				 * is still running there */
5023 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5024 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5025 					    stcb->sctp_ep, stcb, net);
5026 
5027 				}
5028 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5029 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5030 				    stcb, net,
5031 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5032 			}
5033 		}
5034 	}
5035 	if ((j == 0) &&
5036 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5037 	    (asoc->sent_queue_retran_cnt == 0) &&
5038 	    (win_probe_recovered == 0) &&
5039 	    (done_once == 0)) {
5040 		/*
5041 		 * huh, this should not happen unless all packets are
5042 		 * PR-SCTP and marked to skip of course.
5043 		 */
5044 		if (sctp_fs_audit(asoc)) {
5045 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5046 				net->flight_size = 0;
5047 			}
5048 			asoc->total_flight = 0;
5049 			asoc->total_flight_count = 0;
5050 			asoc->sent_queue_retran_cnt = 0;
5051 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5052 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5053 					sctp_flight_size_increase(tp1);
5054 					sctp_total_flight_increase(stcb, tp1);
5055 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5056 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5057 				}
5058 			}
5059 		}
5060 		done_once = 1;
5061 		goto again;
5062 	}
5063 	/*********************************************/
5064 	/* Here we perform PR-SCTP procedures        */
5065 	/* (section 4.2)                             */
5066 	/*********************************************/
5067 	/* C1. update advancedPeerAckPoint */
5068 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5069 		asoc->advanced_peer_ack_point = cum_ack;
5070 	}
5071 	/* C2. try to further move advancedPeerAckPoint ahead */
5072 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5073 		struct sctp_tmit_chunk *lchk;
5074 		uint32_t old_adv_peer_ack_point;
5075 
5076 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5077 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5078 		/* C3. See if we need to send a Fwd-TSN */
5079 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5080 			/*
5081 			 * ISSUE with ECN, see FWD-TSN processing.
5082 			 */
5083 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5084 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5085 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5086 				    old_adv_peer_ack_point);
5087 			}
5088 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5089 				send_forward_tsn(stcb, asoc);
5090 			} else if (lchk) {
5091 				/* try to FR fwd-tsn's that get lost too */
5092 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5093 					send_forward_tsn(stcb, asoc);
5094 				}
5095 			}
5096 		}
5097 		if (lchk) {
5098 			/* Assure a timer is up */
5099 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5100 			    stcb->sctp_ep, stcb, lchk->whoTo);
5101 		}
5102 	}
5103 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5104 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5105 		    a_rwnd,
5106 		    stcb->asoc.peers_rwnd,
5107 		    stcb->asoc.total_flight,
5108 		    stcb->asoc.total_output_queue_size);
5109 	}
5110 }
5111 
5112 void
5113 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5114 {
5115 	/* Copy cum-ack */
5116 	uint32_t cum_ack, a_rwnd;
5117 
5118 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5119 	/* Arrange so a_rwnd does NOT change */
5120 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5121 
5122 	/* Now call the express sack handling */
5123 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5124 }
5125 
5126 static void
5127 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5128     struct sctp_stream_in *strmin)
5129 {
5130 	struct sctp_queued_to_read *ctl, *nctl;
5131 	struct sctp_association *asoc;
5132 	uint32_t mid;
5133 	int need_reasm_check = 0;
5134 
5135 	asoc = &stcb->asoc;
5136 	mid = strmin->last_mid_delivered;
5137 	/*
5138 	 * First deliver anything prior to and including the stream no that
5139 	 * came in.
5140 	 */
5141 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5142 		if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5143 			/* this is deliverable now */
5144 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5145 				if (ctl->on_strm_q) {
5146 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5147 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5148 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5149 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5150 #ifdef INVARIANTS
5151 					} else {
5152 						panic("strmin: %p ctl: %p unknown %d",
5153 						    strmin, ctl, ctl->on_strm_q);
5154 #endif
5155 					}
5156 					ctl->on_strm_q = 0;
5157 				}
5158 				/* subtract pending on streams */
5159 				asoc->size_on_all_streams -= ctl->length;
5160 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5161 				/* deliver it to at least the delivery-q */
5162 				if (stcb->sctp_socket) {
5163 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5164 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5165 					    ctl,
5166 					    &stcb->sctp_socket->so_rcv,
5167 					    1, SCTP_READ_LOCK_HELD,
5168 					    SCTP_SO_NOT_LOCKED);
5169 				}
5170 			} else {
5171 				/* Its a fragmented message */
5172 				if (ctl->first_frag_seen) {
5173 					/* Make it so this is next to
5174 					 * deliver, we restore later */
5175 					strmin->last_mid_delivered = ctl->mid - 1;
5176 					need_reasm_check = 1;
5177 					break;
5178 				}
5179 			}
5180 		} else {
5181 			/* no more delivery now. */
5182 			break;
5183 		}
5184 	}
5185 	if (need_reasm_check) {
5186 		int ret;
5187 
5188 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5189 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5190 			/* Restore the next to deliver unless we are ahead */
5191 			strmin->last_mid_delivered = mid;
5192 		}
5193 		if (ret == 0) {
5194 			/* Left the front Partial one on */
5195 			return;
5196 		}
5197 		need_reasm_check = 0;
5198 	}
5199 	/*
5200 	 * now we must deliver things in queue the normal way  if any are
5201 	 * now ready.
5202 	 */
5203 	mid = strmin->last_mid_delivered + 1;
5204 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5205 		if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5206 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5207 				/* this is deliverable now */
5208 				if (ctl->on_strm_q) {
5209 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5210 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5211 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5212 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5213 #ifdef INVARIANTS
5214 					} else {
5215 						panic("strmin: %p ctl: %p unknown %d",
5216 						    strmin, ctl, ctl->on_strm_q);
5217 #endif
5218 					}
5219 					ctl->on_strm_q = 0;
5220 				}
5221 				/* subtract pending on streams */
5222 				asoc->size_on_all_streams -= ctl->length;
5223 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5224 				/* deliver it to at least the delivery-q */
5225 				strmin->last_mid_delivered = ctl->mid;
5226 				if (stcb->sctp_socket) {
5227 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5228 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5229 					    ctl,
5230 					    &stcb->sctp_socket->so_rcv, 1,
5231 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5232 
5233 				}
5234 				mid = strmin->last_mid_delivered + 1;
5235 			} else {
5236 				/* Its a fragmented message */
5237 				if (ctl->first_frag_seen) {
5238 					/* Make it so this is next to
5239 					 * deliver */
5240 					strmin->last_mid_delivered = ctl->mid - 1;
5241 					need_reasm_check = 1;
5242 					break;
5243 				}
5244 			}
5245 		} else {
5246 			break;
5247 		}
5248 	}
5249 	if (need_reasm_check) {
5250 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5251 	}
5252 }
5253 
5254 
5255 
5256 static void
5257 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5258     struct sctp_association *asoc,
5259     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5260 {
5261 	struct sctp_queued_to_read *control;
5262 	struct sctp_stream_in *strm;
5263 	struct sctp_tmit_chunk *chk, *nchk;
5264 	int cnt_removed = 0;
5265 
5266 	/*
5267 	 * For now large messages held on the stream reasm that are complete
5268 	 * will be tossed too. We could in theory do more work to spin
5269 	 * through and stop after dumping one msg aka seeing the start of a
5270 	 * new msg at the head, and call the delivery function... to see if
5271 	 * it can be delivered... But for now we just dump everything on the
5272 	 * queue.
5273 	 */
5274 	strm = &asoc->strmin[stream];
5275 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5276 	if (control == NULL) {
5277 		/* Not found */
5278 		return;
5279 	}
5280 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5281 		return;
5282 	}
5283 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5284 		/* Purge hanging chunks */
5285 		if (!asoc->idata_supported && (ordered == 0)) {
5286 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5287 				break;
5288 			}
5289 		}
5290 		cnt_removed++;
5291 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5292 		asoc->size_on_reasm_queue -= chk->send_size;
5293 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5294 		if (chk->data) {
5295 			sctp_m_freem(chk->data);
5296 			chk->data = NULL;
5297 		}
5298 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5299 	}
5300 	if (!TAILQ_EMPTY(&control->reasm)) {
5301 		/* This has to be old data, unordered */
5302 		if (control->data) {
5303 			sctp_m_freem(control->data);
5304 			control->data = NULL;
5305 		}
5306 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5307 		chk = TAILQ_FIRST(&control->reasm);
5308 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5309 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5310 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5311 			    chk, SCTP_READ_LOCK_HELD);
5312 		}
5313 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5314 		return;
5315 	}
5316 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5317 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5318 		control->on_strm_q = 0;
5319 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5320 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5321 		control->on_strm_q = 0;
5322 #ifdef INVARIANTS
5323 	} else if (control->on_strm_q) {
5324 		panic("strm: %p ctl: %p unknown %d",
5325 		    strm, control, control->on_strm_q);
5326 #endif
5327 	}
5328 	control->on_strm_q = 0;
5329 	if (control->on_read_q == 0) {
5330 		sctp_free_remote_addr(control->whoFrom);
5331 		if (control->data) {
5332 			sctp_m_freem(control->data);
5333 			control->data = NULL;
5334 		}
5335 		sctp_free_a_readq(stcb, control);
5336 	}
5337 }
5338 
5339 void
5340 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5341     struct sctp_forward_tsn_chunk *fwd,
5342     int *abort_flag, struct mbuf *m, int offset)
5343 {
5344 	/* The pr-sctp fwd tsn */
5345 	/*
5346 	 * here we will perform all the data receiver side steps for
5347 	 * processing FwdTSN, as required in by pr-sctp draft:
5348 	 *
5349 	 * Assume we get FwdTSN(x):
5350 	 *
5351 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5352 	 * + others we have 3) examine and update re-ordering queue on
5353 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5354 	 * report where we are.
5355 	 */
5356 	struct sctp_association *asoc;
5357 	uint32_t new_cum_tsn, gap;
5358 	unsigned int i, fwd_sz, m_size;
5359 	uint32_t str_seq;
5360 	struct sctp_stream_in *strm;
5361 	struct sctp_queued_to_read *ctl, *sv;
5362 
5363 	asoc = &stcb->asoc;
5364 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5365 		SCTPDBG(SCTP_DEBUG_INDATA1,
5366 		    "Bad size too small/big fwd-tsn\n");
5367 		return;
5368 	}
5369 	m_size = (stcb->asoc.mapping_array_size << 3);
5370 	/*************************************************************/
5371 	/* 1. Here we update local cumTSN and shift the bitmap array */
5372 	/*************************************************************/
5373 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5374 
5375 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5376 		/* Already got there ... */
5377 		return;
5378 	}
5379 	/*
5380 	 * now we know the new TSN is more advanced, let's find the actual
5381 	 * gap
5382 	 */
5383 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5384 	asoc->cumulative_tsn = new_cum_tsn;
5385 	if (gap >= m_size) {
5386 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5387 			struct mbuf *op_err;
5388 			char msg[SCTP_DIAG_INFO_LEN];
5389 
5390 			/*
5391 			 * out of range (of single byte chunks in the rwnd I
5392 			 * give out). This must be an attacker.
5393 			 */
5394 			*abort_flag = 1;
5395 			snprintf(msg, sizeof(msg),
5396 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5397 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5398 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5399 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5400 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5401 			return;
5402 		}
5403 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5404 
5405 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5406 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5407 		asoc->highest_tsn_inside_map = new_cum_tsn;
5408 
5409 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5410 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5411 
5412 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5413 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5414 		}
5415 	} else {
5416 		SCTP_TCB_LOCK_ASSERT(stcb);
5417 		for (i = 0; i <= gap; i++) {
5418 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5419 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5420 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5421 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5422 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5423 				}
5424 			}
5425 		}
5426 	}
5427 	/*************************************************************/
5428 	/* 2. Clear up re-assembly queue                             */
5429 	/*************************************************************/
5430 
5431 	/* This is now done as part of clearing up the stream/seq */
5432 	if (asoc->idata_supported == 0) {
5433 		uint16_t sid;
5434 
5435 		/* Flush all the un-ordered data based on cum-tsn */
5436 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5437 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5438 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5439 		}
5440 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5441 	}
5442 	/*******************************************************/
5443 	/* 3. Update the PR-stream re-ordering queues and fix  */
5444 	/* delivery issues as needed.                       */
5445 	/*******************************************************/
5446 	fwd_sz -= sizeof(*fwd);
5447 	if (m && fwd_sz) {
5448 		/* New method. */
5449 		unsigned int num_str;
5450 		uint32_t mid, cur_mid;
5451 		uint16_t sid;
5452 		uint16_t ordered, flags;
5453 		struct sctp_strseq *stseq, strseqbuf;
5454 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5455 
5456 		offset += sizeof(*fwd);
5457 
5458 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5459 		if (asoc->idata_supported) {
5460 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5461 		} else {
5462 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5463 		}
5464 		for (i = 0; i < num_str; i++) {
5465 			if (asoc->idata_supported) {
5466 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5467 				    sizeof(struct sctp_strseq_mid),
5468 				    (uint8_t *) & strseqbuf_m);
5469 				offset += sizeof(struct sctp_strseq_mid);
5470 				if (stseq_m == NULL) {
5471 					break;
5472 				}
5473 				sid = ntohs(stseq_m->sid);
5474 				mid = ntohl(stseq_m->mid);
5475 				flags = ntohs(stseq_m->flags);
5476 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5477 					ordered = 0;
5478 				} else {
5479 					ordered = 1;
5480 				}
5481 			} else {
5482 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5483 				    sizeof(struct sctp_strseq),
5484 				    (uint8_t *) & strseqbuf);
5485 				offset += sizeof(struct sctp_strseq);
5486 				if (stseq == NULL) {
5487 					break;
5488 				}
5489 				sid = ntohs(stseq->sid);
5490 				mid = (uint32_t) ntohs(stseq->ssn);
5491 				ordered = 1;
5492 			}
5493 			/* Convert */
5494 
5495 			/* now process */
5496 
5497 			/*
5498 			 * Ok we now look for the stream/seq on the read
5499 			 * queue where its not all delivered. If we find it
5500 			 * we transmute the read entry into a PDI_ABORTED.
5501 			 */
5502 			if (sid >= asoc->streamincnt) {
5503 				/* screwed up streams, stop!  */
5504 				break;
5505 			}
5506 			if ((asoc->str_of_pdapi == sid) &&
5507 			    (asoc->ssn_of_pdapi == mid)) {
5508 				/*
5509 				 * If this is the one we were partially
5510 				 * delivering now then we no longer are.
5511 				 * Note this will change with the reassembly
5512 				 * re-write.
5513 				 */
5514 				asoc->fragmented_delivery_inprogress = 0;
5515 			}
5516 			strm = &asoc->strmin[sid];
5517 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5518 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5519 			}
5520 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5521 				if ((ctl->sinfo_stream == sid) &&
5522 				    (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5523 					str_seq = (sid << 16) | (0x0000ffff & mid);
5524 					ctl->pdapi_aborted = 1;
5525 					sv = stcb->asoc.control_pdapi;
5526 					ctl->end_added = 1;
5527 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5528 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5529 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5530 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5531 #ifdef INVARIANTS
5532 					} else if (ctl->on_strm_q) {
5533 						panic("strm: %p ctl: %p unknown %d",
5534 						    strm, ctl, ctl->on_strm_q);
5535 #endif
5536 					}
5537 					ctl->on_strm_q = 0;
5538 					stcb->asoc.control_pdapi = ctl;
5539 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5540 					    stcb,
5541 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5542 					    (void *)&str_seq,
5543 					    SCTP_SO_NOT_LOCKED);
5544 					stcb->asoc.control_pdapi = sv;
5545 					break;
5546 				} else if ((ctl->sinfo_stream == sid) &&
5547 				    SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5548 					/* We are past our victim SSN */
5549 					break;
5550 				}
5551 			}
5552 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5553 				/* Update the sequence number */
5554 				strm->last_mid_delivered = mid;
5555 			}
5556 			/* now kick the stream the new way */
5557 			/* sa_ignore NO_NULL_CHK */
5558 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5559 		}
5560 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5561 	}
5562 	/*
5563 	 * Now slide thing forward.
5564 	 */
5565 	sctp_slide_mapping_arrays(stcb);
5566 }
5567