xref: /freebsd/sys/netinet/sctp_indata.c (revision 9ff086544d5f85b58349e28ed36a9811b8fe5cf9)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t sid,
134     uint32_t mid, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = sid;
145 	read_queue_e->sinfo_flags = (flags << 8);
146 	read_queue_e->sinfo_ppid = ppid;
147 	read_queue_e->sinfo_context = context;
148 	read_queue_e->sinfo_tsn = tsn;
149 	read_queue_e->sinfo_cumtsn = tsn;
150 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 	read_queue_e->mid = mid;
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t flags, unordered;
337 
338 	flags = (control->sinfo_flags >> 8);
339 	unordered = flags & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/* Only one stream can be here in old style
345 				 * -- abort */
346 				return (-1);
347 			}
348 			TAILQ_INSERT_TAIL(q, control, next_instrm);
349 			control->on_strm_q = SCTP_ON_UNORDERED;
350 			return (0);
351 		}
352 	} else {
353 		q = &strm->inqueue;
354 	}
355 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 		control->end_added = 1;
357 		control->first_frag_seen = 1;
358 		control->last_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
400 					if (unordered) {
401 						control->on_strm_q = SCTP_ON_UNORDERED;
402 					} else {
403 						control->on_strm_q = SCTP_ON_ORDERED;
404 					}
405 					break;
406 				}
407 			}
408 		}
409 	}
410 	return (0);
411 }
412 
413 static void
414 sctp_abort_in_reasm(struct sctp_tcb *stcb,
415     struct sctp_queued_to_read *control,
416     struct sctp_tmit_chunk *chk,
417     int *abort_flag, int opspot)
418 {
419 	char msg[SCTP_DIAG_INFO_LEN];
420 	struct mbuf *oper;
421 
422 	if (stcb->asoc.idata_supported) {
423 		snprintf(msg, sizeof(msg),
424 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
425 		    opspot,
426 		    control->fsn_included,
427 		    chk->rec.data.tsn,
428 		    chk->rec.data.sid,
429 		    chk->rec.data.fsn, chk->rec.data.mid);
430 	} else {
431 		snprintf(msg, sizeof(msg),
432 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
433 		    opspot,
434 		    control->fsn_included,
435 		    chk->rec.data.tsn,
436 		    chk->rec.data.sid,
437 		    chk->rec.data.fsn,
438 		    (uint16_t) chk->rec.data.mid);
439 	}
440 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
441 	sctp_m_freem(chk->data);
442 	chk->data = NULL;
443 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
444 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
445 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
446 	*abort_flag = 1;
447 }
448 
449 static void
450 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
451 {
452 	/*
453 	 * The control could not be placed and must be cleaned.
454 	 */
455 	struct sctp_tmit_chunk *chk, *nchk;
456 
457 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
458 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
459 		if (chk->data)
460 			sctp_m_freem(chk->data);
461 		chk->data = NULL;
462 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
463 	}
464 	sctp_free_a_readq(stcb, control);
465 }
466 
467 /*
468  * Queue the chunk either right into the socket buffer if it is the next one
469  * to go OR put it in the correct place in the delivery queue.  If we do
470  * append to the so_buf, keep doing so until we are out of order as
471  * long as the control's entered are non-fragmented.
472  */
473 static void
474 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
475     struct sctp_association *asoc,
476     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
477 {
478 	/*
479 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
480 	 * all the data in one stream this could happen quite rapidly. One
481 	 * could use the TSN to keep track of things, but this scheme breaks
482 	 * down in the other type of stream usage that could occur. Send a
483 	 * single msg to stream 0, send 4Billion messages to stream 1, now
484 	 * send a message to stream 0. You have a situation where the TSN
485 	 * has wrapped but not in the stream. Is this worth worrying about
486 	 * or should we just change our queue sort at the bottom to be by
487 	 * TSN.
488 	 *
489 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
490 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
491 	 * assignment this could happen... and I don't see how this would be
492 	 * a violation. So for now I am undecided an will leave the sort by
493 	 * SSN alone. Maybe a hybred approach is the answer
494 	 *
495 	 */
496 	struct sctp_queued_to_read *at;
497 	int queue_needed;
498 	uint32_t nxt_todel;
499 	struct mbuf *op_err;
500 	struct sctp_stream_in *strm;
501 	char msg[SCTP_DIAG_INFO_LEN];
502 
503 	strm = &asoc->strmin[control->sinfo_stream];
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    strm->last_mid_delivered, control->mid);
511 		/*
512 		 * throw it in the stream so it gets cleaned up in
513 		 * association destruction
514 		 */
515 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
516 		if (asoc->idata_supported) {
517 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
518 			    strm->last_mid_delivered, control->sinfo_tsn,
519 			    control->sinfo_stream, control->mid);
520 		} else {
521 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
522 			    (uint16_t) strm->last_mid_delivered,
523 			    control->sinfo_tsn,
524 			    control->sinfo_stream,
525 			    (uint16_t) control->mid);
526 		}
527 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
528 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
529 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
530 		*abort_flag = 1;
531 		return;
532 
533 	}
534 	queue_needed = 1;
535 	asoc->size_on_all_streams += control->length;
536 	sctp_ucount_incr(asoc->cnt_on_all_streams);
537 	nxt_todel = strm->last_mid_delivered + 1;
538 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
539 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
540 		struct socket *so;
541 
542 		so = SCTP_INP_SO(stcb->sctp_ep);
543 		atomic_add_int(&stcb->asoc.refcnt, 1);
544 		SCTP_TCB_UNLOCK(stcb);
545 		SCTP_SOCKET_LOCK(so, 1);
546 		SCTP_TCB_LOCK(stcb);
547 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
548 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
549 			SCTP_SOCKET_UNLOCK(so, 1);
550 			return;
551 		}
552 #endif
553 		/* can be delivered right away? */
554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
555 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
556 		}
557 		/* EY it wont be queued if it could be delivered directly */
558 		queue_needed = 0;
559 		asoc->size_on_all_streams -= control->length;
560 		sctp_ucount_decr(asoc->cnt_on_all_streams);
561 		strm->last_mid_delivered++;
562 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
563 		sctp_add_to_readq(stcb->sctp_ep, stcb,
564 		    control,
565 		    &stcb->sctp_socket->so_rcv, 1,
566 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
567 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
568 			/* all delivered */
569 			nxt_todel = strm->last_mid_delivered + 1;
570 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
571 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
572 				asoc->size_on_all_streams -= control->length;
573 				sctp_ucount_decr(asoc->cnt_on_all_streams);
574 				if (control->on_strm_q == SCTP_ON_ORDERED) {
575 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
576 #ifdef INVARIANTS
577 				} else {
578 					panic("Huh control: %p is on_strm_q: %d",
579 					    control, control->on_strm_q);
580 #endif
581 				}
582 				control->on_strm_q = 0;
583 				strm->last_mid_delivered++;
584 				/*
585 				 * We ignore the return of deliver_data here
586 				 * since we always can hold the chunk on the
587 				 * d-queue. And we have a finite number that
588 				 * can be delivered from the strq.
589 				 */
590 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
591 					sctp_log_strm_del(control, NULL,
592 					    SCTP_STR_LOG_FROM_IMMED_DEL);
593 				}
594 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
595 				sctp_add_to_readq(stcb->sctp_ep, stcb,
596 				    control,
597 				    &stcb->sctp_socket->so_rcv, 1,
598 				    SCTP_READ_LOCK_NOT_HELD,
599 				    SCTP_SO_LOCKED);
600 				continue;
601 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
602 				*need_reasm = 1;
603 			}
604 			break;
605 		}
606 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
607 		SCTP_SOCKET_UNLOCK(so, 1);
608 #endif
609 	}
610 	if (queue_needed) {
611 		/*
612 		 * Ok, we did not deliver this guy, find the correct place
613 		 * to put it on the queue.
614 		 */
615 		if (sctp_place_control_in_stream(strm, asoc, control)) {
616 			snprintf(msg, sizeof(msg),
617 			    "Queue to str MID: %u duplicate",
618 			    control->mid);
619 			sctp_clean_up_control(stcb, control);
620 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
621 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
622 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
623 			*abort_flag = 1;
624 		}
625 	}
626 }
627 
628 
629 static void
630 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
631 {
632 	struct mbuf *m, *prev = NULL;
633 	struct sctp_tcb *stcb;
634 
635 	stcb = control->stcb;
636 	control->held_length = 0;
637 	control->length = 0;
638 	m = control->data;
639 	while (m) {
640 		if (SCTP_BUF_LEN(m) == 0) {
641 			/* Skip mbufs with NO length */
642 			if (prev == NULL) {
643 				/* First one */
644 				control->data = sctp_m_free(m);
645 				m = control->data;
646 			} else {
647 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
648 				m = SCTP_BUF_NEXT(prev);
649 			}
650 			if (m == NULL) {
651 				control->tail_mbuf = prev;
652 			}
653 			continue;
654 		}
655 		prev = m;
656 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
657 		if (control->on_read_q) {
658 			/*
659 			 * On read queue so we must increment the SB stuff,
660 			 * we assume caller has done any locks of SB.
661 			 */
662 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
663 		}
664 		m = SCTP_BUF_NEXT(m);
665 	}
666 	if (prev) {
667 		control->tail_mbuf = prev;
668 	}
669 }
670 
671 static void
672 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
673 {
674 	struct mbuf *prev = NULL;
675 	struct sctp_tcb *stcb;
676 
677 	stcb = control->stcb;
678 	if (stcb == NULL) {
679 #ifdef INVARIANTS
680 		panic("Control broken");
681 #else
682 		return;
683 #endif
684 	}
685 	if (control->tail_mbuf == NULL) {
686 		/* TSNH */
687 		control->data = m;
688 		sctp_setup_tail_pointer(control);
689 		return;
690 	}
691 	control->tail_mbuf->m_next = m;
692 	while (m) {
693 		if (SCTP_BUF_LEN(m) == 0) {
694 			/* Skip mbufs with NO length */
695 			if (prev == NULL) {
696 				/* First one */
697 				control->tail_mbuf->m_next = sctp_m_free(m);
698 				m = control->tail_mbuf->m_next;
699 			} else {
700 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
701 				m = SCTP_BUF_NEXT(prev);
702 			}
703 			if (m == NULL) {
704 				control->tail_mbuf = prev;
705 			}
706 			continue;
707 		}
708 		prev = m;
709 		if (control->on_read_q) {
710 			/*
711 			 * On read queue so we must increment the SB stuff,
712 			 * we assume caller has done any locks of SB.
713 			 */
714 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
715 		}
716 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
717 		m = SCTP_BUF_NEXT(m);
718 	}
719 	if (prev) {
720 		control->tail_mbuf = prev;
721 	}
722 }
723 
724 static void
725 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
726 {
727 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
728 	nc->sinfo_stream = control->sinfo_stream;
729 	nc->mid = control->mid;
730 	TAILQ_INIT(&nc->reasm);
731 	nc->top_fsn = control->top_fsn;
732 	nc->mid = control->mid;
733 	nc->sinfo_flags = control->sinfo_flags;
734 	nc->sinfo_ppid = control->sinfo_ppid;
735 	nc->sinfo_context = control->sinfo_context;
736 	nc->fsn_included = 0xffffffff;
737 	nc->sinfo_tsn = control->sinfo_tsn;
738 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
739 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
740 	nc->whoFrom = control->whoFrom;
741 	atomic_add_int(&nc->whoFrom->ref_count, 1);
742 	nc->stcb = control->stcb;
743 	nc->port_from = control->port_from;
744 }
745 
746 static void
747 sctp_reset_a_control(struct sctp_queued_to_read *control,
748     struct sctp_inpcb *inp, uint32_t tsn)
749 {
750 	control->fsn_included = tsn;
751 	if (control->on_read_q) {
752 		/*
753 		 * We have to purge it from there, hopefully this will work
754 		 * :-)
755 		 */
756 		TAILQ_REMOVE(&inp->read_queue, control, next);
757 		control->on_read_q = 0;
758 	}
759 }
760 
761 static int
762 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
763     struct sctp_association *asoc,
764     struct sctp_stream_in *strm,
765     struct sctp_queued_to_read *control,
766     uint32_t pd_point,
767     int inp_read_lock_held)
768 {
769 	/*
770 	 * Special handling for the old un-ordered data chunk. All the
771 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
772 	 * to see if we have it all. If you return one, no other control
773 	 * entries on the un-ordered queue will be looked at. In theory
774 	 * there should be no others entries in reality, unless the guy is
775 	 * sending both unordered NDATA and unordered DATA...
776 	 */
777 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
778 	uint32_t fsn;
779 	struct sctp_queued_to_read *nc;
780 	int cnt_added;
781 
782 	if (control->first_frag_seen == 0) {
783 		/* Nothing we can do, we have not seen the first piece yet */
784 		return (1);
785 	}
786 	/* Collapse any we can */
787 	cnt_added = 0;
788 restart:
789 	fsn = control->fsn_included + 1;
790 	/* Now what can we add? */
791 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
792 		if (chk->rec.data.fsn == fsn) {
793 			/* Ok lets add it */
794 			sctp_alloc_a_readq(stcb, nc);
795 			if (nc == NULL) {
796 				break;
797 			}
798 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
799 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
800 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
801 			fsn++;
802 			cnt_added++;
803 			chk = NULL;
804 			if (control->end_added) {
805 				/* We are done */
806 				if (!TAILQ_EMPTY(&control->reasm)) {
807 					/*
808 					 * Ok we have to move anything left
809 					 * on the control queue to a new
810 					 * control.
811 					 */
812 					sctp_build_readq_entry_from_ctl(nc, control);
813 					tchk = TAILQ_FIRST(&control->reasm);
814 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
815 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
816 						asoc->size_on_reasm_queue -= tchk->send_size;
817 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
818 						nc->first_frag_seen = 1;
819 						nc->fsn_included = tchk->rec.data.fsn;
820 						nc->data = tchk->data;
821 						nc->sinfo_ppid = tchk->rec.data.ppid;
822 						nc->sinfo_tsn = tchk->rec.data.tsn;
823 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
824 						tchk->data = NULL;
825 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
826 						sctp_setup_tail_pointer(nc);
827 						tchk = TAILQ_FIRST(&control->reasm);
828 					}
829 					/* Spin the rest onto the queue */
830 					while (tchk) {
831 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
832 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
833 						tchk = TAILQ_FIRST(&control->reasm);
834 					}
835 					/* Now lets add it to the queue
836 					 * after removing control */
837 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
838 					nc->on_strm_q = SCTP_ON_UNORDERED;
839 					if (control->on_strm_q) {
840 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
841 						control->on_strm_q = 0;
842 					}
843 				}
844 				if (control->pdapi_started) {
845 					strm->pd_api_started = 0;
846 					control->pdapi_started = 0;
847 				}
848 				if (control->on_strm_q) {
849 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
850 					control->on_strm_q = 0;
851 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
852 				}
853 				if (control->on_read_q == 0) {
854 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
855 					    &stcb->sctp_socket->so_rcv, control->end_added,
856 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
857 				}
858 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
859 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
860 					/* Switch to the new guy and
861 					 * continue */
862 					control = nc;
863 					goto restart;
864 				} else {
865 					if (nc->on_strm_q == 0) {
866 						sctp_free_a_readq(stcb, nc);
867 					}
868 				}
869 				return (1);
870 			} else {
871 				sctp_free_a_readq(stcb, nc);
872 			}
873 		} else {
874 			/* Can't add more */
875 			break;
876 		}
877 	}
878 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
879 		strm->pd_api_started = 1;
880 		control->pdapi_started = 1;
881 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
882 		    &stcb->sctp_socket->so_rcv, control->end_added,
883 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
884 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
885 		return (0);
886 	} else {
887 		return (1);
888 	}
889 }
890 
891 static void
892 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
893     struct sctp_association *asoc,
894     struct sctp_queued_to_read *control,
895     struct sctp_tmit_chunk *chk,
896     int *abort_flag)
897 {
898 	struct sctp_tmit_chunk *at;
899 	int inserted;
900 
901 	/*
902 	 * Here we need to place the chunk into the control structure sorted
903 	 * in the correct order.
904 	 */
905 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
906 		/* Its the very first one. */
907 		SCTPDBG(SCTP_DEBUG_XXX,
908 		    "chunk is a first fsn: %u becomes fsn_included\n",
909 		    chk->rec.data.fsn);
910 		if (control->first_frag_seen) {
911 			/*
912 			 * In old un-ordered we can reassembly on one
913 			 * control multiple messages. As long as the next
914 			 * FIRST is greater then the old first (TSN i.e. FSN
915 			 * wise)
916 			 */
917 			struct mbuf *tdata;
918 			uint32_t tmp;
919 
920 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
921 				/* Easy way the start of a new guy beyond
922 				 * the lowest */
923 				goto place_chunk;
924 			}
925 			if ((chk->rec.data.fsn == control->fsn_included) ||
926 			    (control->pdapi_started)) {
927 				/*
928 				 * Ok this should not happen, if it does we
929 				 * started the pd-api on the higher TSN
930 				 * (since the equals part is a TSN failure
931 				 * it must be that).
932 				 *
933 				 * We are completly hosed in that case since
934 				 * I have no way to recover. This really
935 				 * will only happen if we can get more TSN's
936 				 * higher before the pd-api-point.
937 				 */
938 				sctp_abort_in_reasm(stcb, control, chk,
939 				    abort_flag,
940 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
941 
942 				return;
943 			}
944 			/*
945 			 * Ok we have two firsts and the one we just got is
946 			 * smaller than the one we previously placed.. yuck!
947 			 * We must swap them out.
948 			 */
949 			/* swap the mbufs */
950 			tdata = control->data;
951 			control->data = chk->data;
952 			chk->data = tdata;
953 			/* Save the lengths */
954 			chk->send_size = control->length;
955 			/* Recompute length of control and tail pointer */
956 			sctp_setup_tail_pointer(control);
957 			/* Fix the FSN included */
958 			tmp = control->fsn_included;
959 			control->fsn_included = chk->rec.data.fsn;
960 			chk->rec.data.fsn = tmp;
961 			/* Fix the TSN included */
962 			tmp = control->sinfo_tsn;
963 			control->sinfo_tsn = chk->rec.data.tsn;
964 			chk->rec.data.tsn = tmp;
965 			/* Fix the PPID included */
966 			tmp = control->sinfo_ppid;
967 			control->sinfo_ppid = chk->rec.data.ppid;
968 			chk->rec.data.ppid = tmp;
969 			/* Fix tail pointer */
970 			goto place_chunk;
971 		}
972 		control->first_frag_seen = 1;
973 		control->fsn_included = chk->rec.data.fsn;
974 		control->top_fsn = chk->rec.data.fsn;
975 		control->sinfo_tsn = chk->rec.data.tsn;
976 		control->sinfo_ppid = chk->rec.data.ppid;
977 		control->data = chk->data;
978 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
979 		chk->data = NULL;
980 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
981 		sctp_setup_tail_pointer(control);
982 		return;
983 	}
984 place_chunk:
985 	inserted = 0;
986 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
987 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
988 			/*
989 			 * This one in queue is bigger than the new one,
990 			 * insert the new one before at.
991 			 */
992 			asoc->size_on_reasm_queue += chk->send_size;
993 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
994 			inserted = 1;
995 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
996 			break;
997 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
998 			/*
999 			 * They sent a duplicate fsn number. This really
1000 			 * should not happen since the FSN is a TSN and it
1001 			 * should have been dropped earlier.
1002 			 */
1003 			sctp_abort_in_reasm(stcb, control, chk,
1004 			    abort_flag,
1005 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1006 			return;
1007 		}
1008 	}
1009 	if (inserted == 0) {
1010 		/* Its at the end */
1011 		asoc->size_on_reasm_queue += chk->send_size;
1012 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1013 		control->top_fsn = chk->rec.data.fsn;
1014 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1015 	}
1016 }
1017 
1018 static int
1019 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1020     struct sctp_stream_in *strm, int inp_read_lock_held)
1021 {
1022 	/*
1023 	 * Given a stream, strm, see if any of the SSN's on it that are
1024 	 * fragmented are ready to deliver. If so go ahead and place them on
1025 	 * the read queue. In so placing if we have hit the end, then we
1026 	 * need to remove them from the stream's queue.
1027 	 */
1028 	struct sctp_queued_to_read *control, *nctl = NULL;
1029 	uint32_t next_to_del;
1030 	uint32_t pd_point;
1031 	int ret = 0;
1032 
1033 	if (stcb->sctp_socket) {
1034 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1035 		    stcb->sctp_ep->partial_delivery_point);
1036 	} else {
1037 		pd_point = stcb->sctp_ep->partial_delivery_point;
1038 	}
1039 	control = TAILQ_FIRST(&strm->uno_inqueue);
1040 
1041 	if ((control != NULL) &&
1042 	    (asoc->idata_supported == 0)) {
1043 		/* Special handling needed for "old" data format */
1044 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1045 			goto done_un;
1046 		}
1047 	}
1048 	if (strm->pd_api_started) {
1049 		/* Can't add more */
1050 		return (0);
1051 	}
1052 	while (control) {
1053 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1054 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1055 		nctl = TAILQ_NEXT(control, next_instrm);
1056 		if (control->end_added) {
1057 			/* We just put the last bit on */
1058 			if (control->on_strm_q) {
1059 #ifdef INVARIANTS
1060 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1061 					panic("Huh control: %p on_q: %d -- not unordered?",
1062 					    control, control->on_strm_q);
1063 				}
1064 #endif
1065 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1066 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1067 				control->on_strm_q = 0;
1068 			}
1069 			if (control->on_read_q == 0) {
1070 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1071 				    control,
1072 				    &stcb->sctp_socket->so_rcv, control->end_added,
1073 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1074 			}
1075 		} else {
1076 			/* Can we do a PD-API for this un-ordered guy? */
1077 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1078 				strm->pd_api_started = 1;
1079 				control->pdapi_started = 1;
1080 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1081 				    control,
1082 				    &stcb->sctp_socket->so_rcv, control->end_added,
1083 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1084 
1085 				break;
1086 			}
1087 		}
1088 		control = nctl;
1089 	}
1090 done_un:
1091 	control = TAILQ_FIRST(&strm->inqueue);
1092 	if (strm->pd_api_started) {
1093 		/* Can't add more */
1094 		return (0);
1095 	}
1096 	if (control == NULL) {
1097 		return (ret);
1098 	}
1099 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1100 		/*
1101 		 * Ok the guy at the top was being partially delivered
1102 		 * completed, so we remove it. Note the pd_api flag was
1103 		 * taken off when the chunk was merged on in
1104 		 * sctp_queue_data_for_reasm below.
1105 		 */
1106 		nctl = TAILQ_NEXT(control, next_instrm);
1107 		SCTPDBG(SCTP_DEBUG_XXX,
1108 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1109 		    control, control->end_added, control->mid,
1110 		    control->top_fsn, control->fsn_included,
1111 		    strm->last_mid_delivered);
1112 		if (control->end_added) {
1113 			if (control->on_strm_q) {
1114 #ifdef INVARIANTS
1115 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1116 					panic("Huh control: %p on_q: %d -- not ordered?",
1117 					    control, control->on_strm_q);
1118 				}
1119 #endif
1120 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1121 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1122 				control->on_strm_q = 0;
1123 			}
1124 			if (strm->pd_api_started && control->pdapi_started) {
1125 				control->pdapi_started = 0;
1126 				strm->pd_api_started = 0;
1127 			}
1128 			if (control->on_read_q == 0) {
1129 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1130 				    control,
1131 				    &stcb->sctp_socket->so_rcv, control->end_added,
1132 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1133 			}
1134 			control = nctl;
1135 		}
1136 	}
1137 	if (strm->pd_api_started) {
1138 		/* Can't add more must have gotten an un-ordered above being
1139 		 * partially delivered. */
1140 		return (0);
1141 	}
1142 deliver_more:
1143 	next_to_del = strm->last_mid_delivered + 1;
1144 	if (control) {
1145 		SCTPDBG(SCTP_DEBUG_XXX,
1146 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1147 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1148 		    next_to_del);
1149 		nctl = TAILQ_NEXT(control, next_instrm);
1150 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1151 		    (control->first_frag_seen)) {
1152 			int done;
1153 
1154 			/* Ok we can deliver it onto the stream. */
1155 			if (control->end_added) {
1156 				/* We are done with it afterwards */
1157 				if (control->on_strm_q) {
1158 #ifdef INVARIANTS
1159 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1160 						panic("Huh control: %p on_q: %d -- not ordered?",
1161 						    control, control->on_strm_q);
1162 					}
1163 #endif
1164 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1165 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1166 					control->on_strm_q = 0;
1167 				}
1168 				ret++;
1169 			}
1170 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1171 				/* A singleton now slipping through - mark
1172 				 * it non-revokable too */
1173 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1174 			} else if (control->end_added == 0) {
1175 				/* Check if we can defer adding until its
1176 				 * all there */
1177 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1178 					/* Don't need it or cannot add more
1179 					 * (one being delivered that way) */
1180 					goto out;
1181 				}
1182 			}
1183 			done = (control->end_added) && (control->last_frag_seen);
1184 			if (control->on_read_q == 0) {
1185 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1186 				    control,
1187 				    &stcb->sctp_socket->so_rcv, control->end_added,
1188 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1189 			}
1190 			strm->last_mid_delivered = next_to_del;
1191 			if (done) {
1192 				control = nctl;
1193 				goto deliver_more;
1194 			} else {
1195 				/* We are now doing PD API */
1196 				strm->pd_api_started = 1;
1197 				control->pdapi_started = 1;
1198 			}
1199 		}
1200 	}
1201 out:
1202 	return (ret);
1203 }
1204 
1205 
1206 void
1207 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1208     struct sctp_stream_in *strm,
1209     struct sctp_tcb *stcb, struct sctp_association *asoc,
1210     struct sctp_tmit_chunk *chk, int hold_rlock)
1211 {
1212 	/*
1213 	 * Given a control and a chunk, merge the data from the chk onto the
1214 	 * control and free up the chunk resources.
1215 	 */
1216 	int i_locked = 0;
1217 
1218 	if (control->on_read_q && (hold_rlock == 0)) {
1219 		/*
1220 		 * Its being pd-api'd so we must do some locks.
1221 		 */
1222 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1223 		i_locked = 1;
1224 	}
1225 	if (control->data == NULL) {
1226 		control->data = chk->data;
1227 		sctp_setup_tail_pointer(control);
1228 	} else {
1229 		sctp_add_to_tail_pointer(control, chk->data);
1230 	}
1231 	control->fsn_included = chk->rec.data.fsn;
1232 	asoc->size_on_reasm_queue -= chk->send_size;
1233 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1234 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1235 	chk->data = NULL;
1236 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1237 		control->first_frag_seen = 1;
1238 		control->sinfo_tsn = chk->rec.data.tsn;
1239 		control->sinfo_ppid = chk->rec.data.ppid;
1240 	}
1241 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1242 		/* Its complete */
1243 		if ((control->on_strm_q) && (control->on_read_q)) {
1244 			if (control->pdapi_started) {
1245 				control->pdapi_started = 0;
1246 				strm->pd_api_started = 0;
1247 			}
1248 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1249 				/* Unordered */
1250 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1251 				control->on_strm_q = 0;
1252 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1253 				/* Ordered */
1254 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1255 				control->on_strm_q = 0;
1256 #ifdef INVARIANTS
1257 			} else if (control->on_strm_q) {
1258 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1259 				    control->on_strm_q);
1260 #endif
1261 			}
1262 		}
1263 		control->end_added = 1;
1264 		control->last_frag_seen = 1;
1265 	}
1266 	if (i_locked) {
1267 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1268 	}
1269 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1270 }
1271 
1272 /*
1273  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1274  * queue, see if anthing can be delivered. If so pull it off (or as much as
1275  * we can. If we run out of space then we must dump what we can and set the
1276  * appropriate flag to say we queued what we could.
1277  */
1278 static void
1279 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1280     struct sctp_queued_to_read *control,
1281     struct sctp_tmit_chunk *chk,
1282     int created_control,
1283     int *abort_flag, uint32_t tsn)
1284 {
1285 	uint32_t next_fsn;
1286 	struct sctp_tmit_chunk *at, *nat;
1287 	struct sctp_stream_in *strm;
1288 	int do_wakeup, unordered;
1289 
1290 	strm = &asoc->strmin[control->sinfo_stream];
1291 	/*
1292 	 * For old un-ordered data chunks.
1293 	 */
1294 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1295 		unordered = 1;
1296 	} else {
1297 		unordered = 0;
1298 	}
1299 	/* Must be added to the stream-in queue */
1300 	if (created_control) {
1301 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1302 			/* Duplicate SSN? */
1303 			sctp_clean_up_control(stcb, control);
1304 			sctp_abort_in_reasm(stcb, control, chk,
1305 			    abort_flag,
1306 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1307 			return;
1308 		}
1309 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1310 			/*
1311 			 * Ok we created this control and now lets validate
1312 			 * that its legal i.e. there is a B bit set, if not
1313 			 * and we have up to the cum-ack then its invalid.
1314 			 */
1315 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1316 				sctp_abort_in_reasm(stcb, control, chk,
1317 				    abort_flag,
1318 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1319 				return;
1320 			}
1321 		}
1322 	}
1323 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1324 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1325 		return;
1326 	}
1327 	/*
1328 	 * Ok we must queue the chunk into the reasembly portion: o if its
1329 	 * the first it goes to the control mbuf. o if its not first but the
1330 	 * next in sequence it goes to the control, and each succeeding one
1331 	 * in order also goes. o if its not in order we place it on the list
1332 	 * in its place.
1333 	 */
1334 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1335 		/* Its the very first one. */
1336 		SCTPDBG(SCTP_DEBUG_XXX,
1337 		    "chunk is a first fsn: %u becomes fsn_included\n",
1338 		    chk->rec.data.fsn);
1339 		if (control->first_frag_seen) {
1340 			/*
1341 			 * Error on senders part, they either sent us two
1342 			 * data chunks with FIRST, or they sent two
1343 			 * un-ordered chunks that were fragmented at the
1344 			 * same time in the same stream.
1345 			 */
1346 			sctp_abort_in_reasm(stcb, control, chk,
1347 			    abort_flag,
1348 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1349 			return;
1350 		}
1351 		control->first_frag_seen = 1;
1352 		control->sinfo_ppid = chk->rec.data.ppid;
1353 		control->sinfo_tsn = chk->rec.data.tsn;
1354 		control->fsn_included = chk->rec.data.fsn;
1355 		control->data = chk->data;
1356 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1357 		chk->data = NULL;
1358 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1359 		sctp_setup_tail_pointer(control);
1360 	} else {
1361 		/* Place the chunk in our list */
1362 		int inserted = 0;
1363 
1364 		if (control->last_frag_seen == 0) {
1365 			/* Still willing to raise highest FSN seen */
1366 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1367 				SCTPDBG(SCTP_DEBUG_XXX,
1368 				    "We have a new top_fsn: %u\n",
1369 				    chk->rec.data.fsn);
1370 				control->top_fsn = chk->rec.data.fsn;
1371 			}
1372 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1373 				SCTPDBG(SCTP_DEBUG_XXX,
1374 				    "The last fsn is now in place fsn: %u\n",
1375 				    chk->rec.data.fsn);
1376 				control->last_frag_seen = 1;
1377 			}
1378 			if (asoc->idata_supported || control->first_frag_seen) {
1379 				/*
1380 				 * For IDATA we always check since we know
1381 				 * that the first fragment is 0. For old
1382 				 * DATA we have to receive the first before
1383 				 * we know the first FSN (which is the TSN).
1384 				 */
1385 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1386 					/* We have already delivered up to
1387 					 * this so its a dup */
1388 					sctp_abort_in_reasm(stcb, control, chk,
1389 					    abort_flag,
1390 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1391 					return;
1392 				}
1393 			}
1394 		} else {
1395 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1396 				/* Second last? huh? */
1397 				SCTPDBG(SCTP_DEBUG_XXX,
1398 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1399 				    chk->rec.data.fsn, control->top_fsn);
1400 				sctp_abort_in_reasm(stcb, control,
1401 				    chk, abort_flag,
1402 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1403 				return;
1404 			}
1405 			if (asoc->idata_supported || control->first_frag_seen) {
1406 				/*
1407 				 * For IDATA we always check since we know
1408 				 * that the first fragment is 0. For old
1409 				 * DATA we have to receive the first before
1410 				 * we know the first FSN (which is the TSN).
1411 				 */
1412 
1413 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1414 					/* We have already delivered up to
1415 					 * this so its a dup */
1416 					SCTPDBG(SCTP_DEBUG_XXX,
1417 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1418 					    chk->rec.data.fsn, control->fsn_included);
1419 					sctp_abort_in_reasm(stcb, control, chk,
1420 					    abort_flag,
1421 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1422 					return;
1423 				}
1424 			}
1425 			/* validate not beyond top FSN if we have seen last
1426 			 * one */
1427 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1428 				SCTPDBG(SCTP_DEBUG_XXX,
1429 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1430 				    chk->rec.data.fsn,
1431 				    control->top_fsn);
1432 				sctp_abort_in_reasm(stcb, control, chk,
1433 				    abort_flag,
1434 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1435 				return;
1436 			}
1437 		}
1438 		/*
1439 		 * If we reach here, we need to place the new chunk in the
1440 		 * reassembly for this control.
1441 		 */
1442 		SCTPDBG(SCTP_DEBUG_XXX,
1443 		    "chunk is a not first fsn: %u needs to be inserted\n",
1444 		    chk->rec.data.fsn);
1445 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1446 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1447 				/*
1448 				 * This one in queue is bigger than the new
1449 				 * one, insert the new one before at.
1450 				 */
1451 				SCTPDBG(SCTP_DEBUG_XXX,
1452 				    "Insert it before fsn: %u\n",
1453 				    at->rec.data.fsn);
1454 				asoc->size_on_reasm_queue += chk->send_size;
1455 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1456 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1457 				inserted = 1;
1458 				break;
1459 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1460 				/* Gak, He sent me a duplicate str seq
1461 				 * number */
1462 				/*
1463 				 * foo bar, I guess I will just free this
1464 				 * new guy, should we abort too? FIX ME
1465 				 * MAYBE? Or it COULD be that the SSN's have
1466 				 * wrapped. Maybe I should compare to TSN
1467 				 * somehow... sigh for now just blow away
1468 				 * the chunk!
1469 				 */
1470 				SCTPDBG(SCTP_DEBUG_XXX,
1471 				    "Duplicate to fsn: %u -- abort\n",
1472 				    at->rec.data.fsn);
1473 				sctp_abort_in_reasm(stcb, control,
1474 				    chk, abort_flag,
1475 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1476 				return;
1477 			}
1478 		}
1479 		if (inserted == 0) {
1480 			/* Goes on the end */
1481 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1482 			    chk->rec.data.fsn);
1483 			asoc->size_on_reasm_queue += chk->send_size;
1484 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1485 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1486 		}
1487 	}
1488 	/*
1489 	 * Ok lets see if we can suck any up into the control structure that
1490 	 * are in seq if it makes sense.
1491 	 */
1492 	do_wakeup = 0;
1493 	/*
1494 	 * If the first fragment has not been seen there is no sense in
1495 	 * looking.
1496 	 */
1497 	if (control->first_frag_seen) {
1498 		next_fsn = control->fsn_included + 1;
1499 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1500 			if (at->rec.data.fsn == next_fsn) {
1501 				/* We can add this one now to the control */
1502 				SCTPDBG(SCTP_DEBUG_XXX,
1503 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1504 				    control, at,
1505 				    at->rec.data.fsn,
1506 				    next_fsn, control->fsn_included);
1507 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1508 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1509 				if (control->on_read_q) {
1510 					do_wakeup = 1;
1511 				}
1512 				next_fsn++;
1513 				if (control->end_added && control->pdapi_started) {
1514 					if (strm->pd_api_started) {
1515 						strm->pd_api_started = 0;
1516 						control->pdapi_started = 0;
1517 					}
1518 					if (control->on_read_q == 0) {
1519 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1520 						    control,
1521 						    &stcb->sctp_socket->so_rcv, control->end_added,
1522 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1523 						do_wakeup = 1;
1524 					}
1525 					break;
1526 				}
1527 			} else {
1528 				break;
1529 			}
1530 		}
1531 	}
1532 	if (do_wakeup) {
1533 		/* Need to wakeup the reader */
1534 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1535 	}
1536 }
1537 
1538 static struct sctp_queued_to_read *
1539 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1540 {
1541 	struct sctp_queued_to_read *control;
1542 
1543 	if (ordered) {
1544 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1545 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1546 				break;
1547 			}
1548 		}
1549 	} else {
1550 		if (idata_supported) {
1551 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1552 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1553 					break;
1554 				}
1555 			}
1556 		} else {
1557 			control = TAILQ_FIRST(&strm->uno_inqueue);
1558 		}
1559 	}
1560 	return (control);
1561 }
1562 
1563 static int
1564 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1565     struct mbuf **m, int offset, int chk_length,
1566     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1567     int *break_flag, int last_chunk, uint8_t chk_type)
1568 {
1569 	/* Process a data chunk */
1570 	/* struct sctp_tmit_chunk *chk; */
1571 	struct sctp_tmit_chunk *chk;
1572 	uint32_t tsn, fsn, gap, mid;
1573 	struct mbuf *dmbuf;
1574 	int the_len;
1575 	int need_reasm_check = 0;
1576 	uint16_t sid;
1577 	struct mbuf *op_err;
1578 	char msg[SCTP_DIAG_INFO_LEN];
1579 	struct sctp_queued_to_read *control = NULL;
1580 	uint32_t ppid;
1581 	uint8_t chk_flags;
1582 	struct sctp_stream_reset_list *liste;
1583 	int ordered;
1584 	size_t clen;
1585 	int created_control = 0;
1586 
1587 	if (chk_type == SCTP_IDATA) {
1588 		struct sctp_idata_chunk *chunk, chunk_buf;
1589 
1590 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1591 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1592 		chk_flags = chunk->ch.chunk_flags;
1593 		clen = sizeof(struct sctp_idata_chunk);
1594 		tsn = ntohl(chunk->dp.tsn);
1595 		sid = ntohs(chunk->dp.sid);
1596 		mid = ntohl(chunk->dp.mid);
1597 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1598 			fsn = 0;
1599 			ppid = chunk->dp.ppid_fsn.ppid;
1600 		} else {
1601 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1602 			ppid = 0xffffffff;	/* Use as an invalid value. */
1603 		}
1604 	} else {
1605 		struct sctp_data_chunk *chunk, chunk_buf;
1606 
1607 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1608 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1609 		chk_flags = chunk->ch.chunk_flags;
1610 		clen = sizeof(struct sctp_data_chunk);
1611 		tsn = ntohl(chunk->dp.tsn);
1612 		sid = ntohs(chunk->dp.sid);
1613 		mid = (uint32_t) (ntohs(chunk->dp.ssn));
1614 		fsn = tsn;
1615 		ppid = chunk->dp.ppid;
1616 	}
1617 	if ((size_t)chk_length == clen) {
1618 		/*
1619 		 * Need to send an abort since we had a empty data chunk.
1620 		 */
1621 		op_err = sctp_generate_no_user_data_cause(tsn);
1622 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1623 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1624 		*abort_flag = 1;
1625 		return (0);
1626 	}
1627 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1628 		asoc->send_sack = 1;
1629 	}
1630 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1631 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1632 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1633 	}
1634 	if (stcb == NULL) {
1635 		return (0);
1636 	}
1637 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1638 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1639 		/* It is a duplicate */
1640 		SCTP_STAT_INCR(sctps_recvdupdata);
1641 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1642 			/* Record a dup for the next outbound sack */
1643 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1644 			asoc->numduptsns++;
1645 		}
1646 		asoc->send_sack = 1;
1647 		return (0);
1648 	}
1649 	/* Calculate the number of TSN's between the base and this TSN */
1650 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1651 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1652 		/* Can't hold the bit in the mapping at max array, toss it */
1653 		return (0);
1654 	}
1655 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1656 		SCTP_TCB_LOCK_ASSERT(stcb);
1657 		if (sctp_expand_mapping_array(asoc, gap)) {
1658 			/* Can't expand, drop it */
1659 			return (0);
1660 		}
1661 	}
1662 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1663 		*high_tsn = tsn;
1664 	}
1665 	/* See if we have received this one already */
1666 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1667 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1668 		SCTP_STAT_INCR(sctps_recvdupdata);
1669 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1670 			/* Record a dup for the next outbound sack */
1671 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1672 			asoc->numduptsns++;
1673 		}
1674 		asoc->send_sack = 1;
1675 		return (0);
1676 	}
1677 	/*
1678 	 * Check to see about the GONE flag, duplicates would cause a sack
1679 	 * to be sent up above
1680 	 */
1681 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1682 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1683 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1684 		/*
1685 		 * wait a minute, this guy is gone, there is no longer a
1686 		 * receiver. Send peer an ABORT!
1687 		 */
1688 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1689 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1690 		*abort_flag = 1;
1691 		return (0);
1692 	}
1693 	/*
1694 	 * Now before going further we see if there is room. If NOT then we
1695 	 * MAY let one through only IF this TSN is the one we are waiting
1696 	 * for on a partial delivery API.
1697 	 */
1698 
1699 	/* Is the stream valid? */
1700 	if (sid >= asoc->streamincnt) {
1701 		struct sctp_error_invalid_stream *cause;
1702 
1703 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1704 		    0, M_NOWAIT, 1, MT_DATA);
1705 		if (op_err != NULL) {
1706 			/* add some space up front so prepend will work well */
1707 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1708 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1709 			/*
1710 			 * Error causes are just param's and this one has
1711 			 * two back to back phdr, one with the error type
1712 			 * and size, the other with the streamid and a rsvd
1713 			 */
1714 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1715 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1716 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1717 			cause->stream_id = htons(sid);
1718 			cause->reserved = htons(0);
1719 			sctp_queue_op_err(stcb, op_err);
1720 		}
1721 		SCTP_STAT_INCR(sctps_badsid);
1722 		SCTP_TCB_LOCK_ASSERT(stcb);
1723 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1724 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1725 			asoc->highest_tsn_inside_nr_map = tsn;
1726 		}
1727 		if (tsn == (asoc->cumulative_tsn + 1)) {
1728 			/* Update cum-ack */
1729 			asoc->cumulative_tsn = tsn;
1730 		}
1731 		return (0);
1732 	}
1733 	/*
1734 	 * If its a fragmented message, lets see if we can find the control
1735 	 * on the reassembly queues.
1736 	 */
1737 	if ((chk_type == SCTP_IDATA) &&
1738 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1739 	    (fsn == 0)) {
1740 		/*
1741 		 * The first *must* be fsn 0, and other (middle/end) pieces
1742 		 * can *not* be fsn 0. XXX: This can happen in case of a
1743 		 * wrap around. Ignore is for now.
1744 		 */
1745 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1746 		    mid, chk_flags);
1747 		goto err_out;
1748 	}
1749 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1750 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1751 	    chk_flags, control);
1752 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1753 		/* See if we can find the re-assembly entity */
1754 		if (control != NULL) {
1755 			/* We found something, does it belong? */
1756 			if (ordered && (mid != control->mid)) {
1757 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1758 		err_out:
1759 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1760 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1761 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1762 				*abort_flag = 1;
1763 				return (0);
1764 			}
1765 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1766 				/* We can't have a switched order with an
1767 				 * unordered chunk */
1768 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1769 				    tsn);
1770 				goto err_out;
1771 			}
1772 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1773 				/* We can't have a switched unordered with a
1774 				 * ordered chunk */
1775 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1776 				    tsn);
1777 				goto err_out;
1778 			}
1779 		}
1780 	} else {
1781 		/*
1782 		 * Its a complete segment. Lets validate we don't have a
1783 		 * re-assembly going on with the same Stream/Seq (for
1784 		 * ordered) or in the same Stream for unordered.
1785 		 */
1786 		if (control != NULL) {
1787 			if (ordered || asoc->idata_supported) {
1788 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1789 				    chk_flags, mid);
1790 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1791 				goto err_out;
1792 			} else {
1793 				if ((tsn == control->fsn_included + 1) &&
1794 				    (control->end_added == 0)) {
1795 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1796 					goto err_out;
1797 				} else {
1798 					control = NULL;
1799 				}
1800 			}
1801 		}
1802 	}
1803 	/* now do the tests */
1804 	if (((asoc->cnt_on_all_streams +
1805 	    asoc->cnt_on_reasm_queue +
1806 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1807 	    (((int)asoc->my_rwnd) <= 0)) {
1808 		/*
1809 		 * When we have NO room in the rwnd we check to make sure
1810 		 * the reader is doing its job...
1811 		 */
1812 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1813 			/* some to read, wake-up */
1814 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1815 			struct socket *so;
1816 
1817 			so = SCTP_INP_SO(stcb->sctp_ep);
1818 			atomic_add_int(&stcb->asoc.refcnt, 1);
1819 			SCTP_TCB_UNLOCK(stcb);
1820 			SCTP_SOCKET_LOCK(so, 1);
1821 			SCTP_TCB_LOCK(stcb);
1822 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1823 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1824 				/* assoc was freed while we were unlocked */
1825 				SCTP_SOCKET_UNLOCK(so, 1);
1826 				return (0);
1827 			}
1828 #endif
1829 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1830 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1831 			SCTP_SOCKET_UNLOCK(so, 1);
1832 #endif
1833 		}
1834 		/* now is it in the mapping array of what we have accepted? */
1835 		if (chk_type == SCTP_DATA) {
1836 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1837 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1838 				/* Nope not in the valid range dump it */
1839 		dump_packet:
1840 				sctp_set_rwnd(stcb, asoc);
1841 				if ((asoc->cnt_on_all_streams +
1842 				    asoc->cnt_on_reasm_queue +
1843 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1844 					SCTP_STAT_INCR(sctps_datadropchklmt);
1845 				} else {
1846 					SCTP_STAT_INCR(sctps_datadroprwnd);
1847 				}
1848 				*break_flag = 1;
1849 				return (0);
1850 			}
1851 		} else {
1852 			if (control == NULL) {
1853 				goto dump_packet;
1854 			}
1855 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1856 				goto dump_packet;
1857 			}
1858 		}
1859 	}
1860 #ifdef SCTP_ASOCLOG_OF_TSNS
1861 	SCTP_TCB_LOCK_ASSERT(stcb);
1862 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1863 		asoc->tsn_in_at = 0;
1864 		asoc->tsn_in_wrapped = 1;
1865 	}
1866 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1867 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1868 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1869 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1870 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1871 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1872 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1873 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1874 	asoc->tsn_in_at++;
1875 #endif
1876 	/*
1877 	 * Before we continue lets validate that we are not being fooled by
1878 	 * an evil attacker. We can only have Nk chunks based on our TSN
1879 	 * spread allowed by the mapping array N * 8 bits, so there is no
1880 	 * way our stream sequence numbers could have wrapped. We of course
1881 	 * only validate the FIRST fragment so the bit must be set.
1882 	 */
1883 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1884 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1885 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1886 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1887 		/* The incoming sseq is behind where we last delivered? */
1888 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1889 		    mid, asoc->strmin[sid].last_mid_delivered);
1890 
1891 		if (asoc->idata_supported) {
1892 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1893 			    asoc->strmin[sid].last_mid_delivered,
1894 			    tsn,
1895 			    sid,
1896 			    mid);
1897 		} else {
1898 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1899 			    (uint16_t) asoc->strmin[sid].last_mid_delivered,
1900 			    tsn,
1901 			    sid,
1902 			    (uint16_t) mid);
1903 		}
1904 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1905 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1906 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1907 		*abort_flag = 1;
1908 		return (0);
1909 	}
1910 	if (chk_type == SCTP_IDATA) {
1911 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1912 	} else {
1913 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1914 	}
1915 	if (last_chunk == 0) {
1916 		if (chk_type == SCTP_IDATA) {
1917 			dmbuf = SCTP_M_COPYM(*m,
1918 			    (offset + sizeof(struct sctp_idata_chunk)),
1919 			    the_len, M_NOWAIT);
1920 		} else {
1921 			dmbuf = SCTP_M_COPYM(*m,
1922 			    (offset + sizeof(struct sctp_data_chunk)),
1923 			    the_len, M_NOWAIT);
1924 		}
1925 #ifdef SCTP_MBUF_LOGGING
1926 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1927 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1928 		}
1929 #endif
1930 	} else {
1931 		/* We can steal the last chunk */
1932 		int l_len;
1933 
1934 		dmbuf = *m;
1935 		/* lop off the top part */
1936 		if (chk_type == SCTP_IDATA) {
1937 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1938 		} else {
1939 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1940 		}
1941 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1942 			l_len = SCTP_BUF_LEN(dmbuf);
1943 		} else {
1944 			/*
1945 			 * need to count up the size hopefully does not hit
1946 			 * this to often :-0
1947 			 */
1948 			struct mbuf *lat;
1949 
1950 			l_len = 0;
1951 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1952 				l_len += SCTP_BUF_LEN(lat);
1953 			}
1954 		}
1955 		if (l_len > the_len) {
1956 			/* Trim the end round bytes off  too */
1957 			m_adj(dmbuf, -(l_len - the_len));
1958 		}
1959 	}
1960 	if (dmbuf == NULL) {
1961 		SCTP_STAT_INCR(sctps_nomem);
1962 		return (0);
1963 	}
1964 	/*
1965 	 * Now no matter what, we need a control, get one if we don't have
1966 	 * one (we may have gotten it above when we found the message was
1967 	 * fragmented
1968 	 */
1969 	if (control == NULL) {
1970 		sctp_alloc_a_readq(stcb, control);
1971 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1972 		    ppid,
1973 		    sid,
1974 		    chk_flags,
1975 		    NULL, fsn, mid);
1976 		if (control == NULL) {
1977 			SCTP_STAT_INCR(sctps_nomem);
1978 			return (0);
1979 		}
1980 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1981 			control->data = dmbuf;
1982 			control->tail_mbuf = NULL;
1983 			control->end_added = 1;
1984 			control->last_frag_seen = 1;
1985 			control->first_frag_seen = 1;
1986 			control->fsn_included = fsn;
1987 			control->top_fsn = fsn;
1988 		}
1989 		created_control = 1;
1990 	}
1991 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
1992 	    chk_flags, ordered, mid, control);
1993 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1994 	    TAILQ_EMPTY(&asoc->resetHead) &&
1995 	    ((ordered == 0) ||
1996 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
1997 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
1998 		/* Candidate for express delivery */
1999 		/*
2000 		 * Its not fragmented, No PD-API is up, Nothing in the
2001 		 * delivery queue, Its un-ordered OR ordered and the next to
2002 		 * deliver AND nothing else is stuck on the stream queue,
2003 		 * And there is room for it in the socket buffer. Lets just
2004 		 * stuff it up the buffer....
2005 		 */
2006 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2007 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2008 			asoc->highest_tsn_inside_nr_map = tsn;
2009 		}
2010 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2011 		    control, mid);
2012 
2013 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2014 		    control, &stcb->sctp_socket->so_rcv,
2015 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2016 
2017 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2018 			/* for ordered, bump what we delivered */
2019 			asoc->strmin[sid].last_mid_delivered++;
2020 		}
2021 		SCTP_STAT_INCR(sctps_recvexpress);
2022 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2023 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2024 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2025 		}
2026 		control = NULL;
2027 		goto finish_express_del;
2028 	}
2029 	/* Now will we need a chunk too? */
2030 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2031 		sctp_alloc_a_chunk(stcb, chk);
2032 		if (chk == NULL) {
2033 			/* No memory so we drop the chunk */
2034 			SCTP_STAT_INCR(sctps_nomem);
2035 			if (last_chunk == 0) {
2036 				/* we copied it, free the copy */
2037 				sctp_m_freem(dmbuf);
2038 			}
2039 			return (0);
2040 		}
2041 		chk->rec.data.tsn = tsn;
2042 		chk->no_fr_allowed = 0;
2043 		chk->rec.data.fsn = fsn;
2044 		chk->rec.data.mid = mid;
2045 		chk->rec.data.sid = sid;
2046 		chk->rec.data.ppid = ppid;
2047 		chk->rec.data.context = stcb->asoc.context;
2048 		chk->rec.data.doing_fast_retransmit = 0;
2049 		chk->rec.data.rcv_flags = chk_flags;
2050 		chk->asoc = asoc;
2051 		chk->send_size = the_len;
2052 		chk->whoTo = net;
2053 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2054 		    chk,
2055 		    control, mid);
2056 		atomic_add_int(&net->ref_count, 1);
2057 		chk->data = dmbuf;
2058 	}
2059 	/* Set the appropriate TSN mark */
2060 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2061 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2062 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2063 			asoc->highest_tsn_inside_nr_map = tsn;
2064 		}
2065 	} else {
2066 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2067 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2068 			asoc->highest_tsn_inside_map = tsn;
2069 		}
2070 	}
2071 	/* Now is it complete (i.e. not fragmented)? */
2072 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2073 		/*
2074 		 * Special check for when streams are resetting. We could be
2075 		 * more smart about this and check the actual stream to see
2076 		 * if it is not being reset.. that way we would not create a
2077 		 * HOLB when amongst streams being reset and those not being
2078 		 * reset.
2079 		 *
2080 		 */
2081 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2082 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2083 			/*
2084 			 * yep its past where we need to reset... go ahead
2085 			 * and queue it.
2086 			 */
2087 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2088 				/* first one on */
2089 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2090 			} else {
2091 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2092 				unsigned char inserted = 0;
2093 
2094 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2095 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2096 
2097 						continue;
2098 					} else {
2099 						/* found it */
2100 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2101 						inserted = 1;
2102 						break;
2103 					}
2104 				}
2105 				if (inserted == 0) {
2106 					/*
2107 					 * must be put at end, use prevP
2108 					 * (all setup from loop) to setup
2109 					 * nextP.
2110 					 */
2111 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2112 				}
2113 			}
2114 			goto finish_express_del;
2115 		}
2116 		if (chk_flags & SCTP_DATA_UNORDERED) {
2117 			/* queue directly into socket buffer */
2118 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2119 			    control, mid);
2120 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2121 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2122 			    control,
2123 			    &stcb->sctp_socket->so_rcv, 1,
2124 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2125 
2126 		} else {
2127 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2128 			    mid);
2129 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2130 			if (*abort_flag) {
2131 				if (last_chunk) {
2132 					*m = NULL;
2133 				}
2134 				return (0);
2135 			}
2136 		}
2137 		goto finish_express_del;
2138 	}
2139 	/* If we reach here its a reassembly */
2140 	need_reasm_check = 1;
2141 	SCTPDBG(SCTP_DEBUG_XXX,
2142 	    "Queue data to stream for reasm control: %p MID: %u\n",
2143 	    control, mid);
2144 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2145 	if (*abort_flag) {
2146 		/*
2147 		 * the assoc is now gone and chk was put onto the reasm
2148 		 * queue, which has all been freed.
2149 		 */
2150 		if (last_chunk) {
2151 			*m = NULL;
2152 		}
2153 		return (0);
2154 	}
2155 finish_express_del:
2156 	/* Here we tidy up things */
2157 	if (tsn == (asoc->cumulative_tsn + 1)) {
2158 		/* Update cum-ack */
2159 		asoc->cumulative_tsn = tsn;
2160 	}
2161 	if (last_chunk) {
2162 		*m = NULL;
2163 	}
2164 	if (ordered) {
2165 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2166 	} else {
2167 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2168 	}
2169 	SCTP_STAT_INCR(sctps_recvdata);
2170 	/* Set it present please */
2171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2172 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2173 	}
2174 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2175 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2176 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2177 	}
2178 	if (need_reasm_check) {
2179 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2180 		need_reasm_check = 0;
2181 	}
2182 	/* check the special flag for stream resets */
2183 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2184 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2185 		/*
2186 		 * we have finished working through the backlogged TSN's now
2187 		 * time to reset streams. 1: call reset function. 2: free
2188 		 * pending_reply space 3: distribute any chunks in
2189 		 * pending_reply_queue.
2190 		 */
2191 		struct sctp_queued_to_read *ctl, *nctl;
2192 
2193 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2194 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2195 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2196 		SCTP_FREE(liste, SCTP_M_STRESET);
2197 		/* sa_ignore FREED_MEMORY */
2198 		liste = TAILQ_FIRST(&asoc->resetHead);
2199 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2200 			/* All can be removed */
2201 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2202 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2203 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2204 				if (*abort_flag) {
2205 					return (0);
2206 				}
2207 				if (need_reasm_check) {
2208 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2209 					need_reasm_check = 0;
2210 				}
2211 			}
2212 		} else {
2213 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2214 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2215 					break;
2216 				}
2217 				/*
2218 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2219 				 * process it which is the NOT of
2220 				 * ctl->sinfo_tsn > liste->tsn
2221 				 */
2222 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2223 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2224 				if (*abort_flag) {
2225 					return (0);
2226 				}
2227 				if (need_reasm_check) {
2228 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2229 					need_reasm_check = 0;
2230 				}
2231 			}
2232 		}
2233 	}
2234 	return (1);
2235 }
2236 
2237 static const int8_t sctp_map_lookup_tab[256] = {
2238 	0, 1, 0, 2, 0, 1, 0, 3,
2239 	0, 1, 0, 2, 0, 1, 0, 4,
2240 	0, 1, 0, 2, 0, 1, 0, 3,
2241 	0, 1, 0, 2, 0, 1, 0, 5,
2242 	0, 1, 0, 2, 0, 1, 0, 3,
2243 	0, 1, 0, 2, 0, 1, 0, 4,
2244 	0, 1, 0, 2, 0, 1, 0, 3,
2245 	0, 1, 0, 2, 0, 1, 0, 6,
2246 	0, 1, 0, 2, 0, 1, 0, 3,
2247 	0, 1, 0, 2, 0, 1, 0, 4,
2248 	0, 1, 0, 2, 0, 1, 0, 3,
2249 	0, 1, 0, 2, 0, 1, 0, 5,
2250 	0, 1, 0, 2, 0, 1, 0, 3,
2251 	0, 1, 0, 2, 0, 1, 0, 4,
2252 	0, 1, 0, 2, 0, 1, 0, 3,
2253 	0, 1, 0, 2, 0, 1, 0, 7,
2254 	0, 1, 0, 2, 0, 1, 0, 3,
2255 	0, 1, 0, 2, 0, 1, 0, 4,
2256 	0, 1, 0, 2, 0, 1, 0, 3,
2257 	0, 1, 0, 2, 0, 1, 0, 5,
2258 	0, 1, 0, 2, 0, 1, 0, 3,
2259 	0, 1, 0, 2, 0, 1, 0, 4,
2260 	0, 1, 0, 2, 0, 1, 0, 3,
2261 	0, 1, 0, 2, 0, 1, 0, 6,
2262 	0, 1, 0, 2, 0, 1, 0, 3,
2263 	0, 1, 0, 2, 0, 1, 0, 4,
2264 	0, 1, 0, 2, 0, 1, 0, 3,
2265 	0, 1, 0, 2, 0, 1, 0, 5,
2266 	0, 1, 0, 2, 0, 1, 0, 3,
2267 	0, 1, 0, 2, 0, 1, 0, 4,
2268 	0, 1, 0, 2, 0, 1, 0, 3,
2269 	0, 1, 0, 2, 0, 1, 0, 8
2270 };
2271 
2272 
2273 void
2274 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2275 {
2276 	/*
2277 	 * Now we also need to check the mapping array in a couple of ways.
2278 	 * 1) Did we move the cum-ack point?
2279 	 *
2280 	 * When you first glance at this you might think that all entries
2281 	 * that make up the position of the cum-ack would be in the
2282 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2283 	 * deliverable. Thats true with one exception, when its a fragmented
2284 	 * message we may not deliver the data until some threshold (or all
2285 	 * of it) is in place. So we must OR the nr_mapping_array and
2286 	 * mapping_array to get a true picture of the cum-ack.
2287 	 */
2288 	struct sctp_association *asoc;
2289 	int at;
2290 	uint8_t val;
2291 	int slide_from, slide_end, lgap, distance;
2292 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2293 
2294 	asoc = &stcb->asoc;
2295 
2296 	old_cumack = asoc->cumulative_tsn;
2297 	old_base = asoc->mapping_array_base_tsn;
2298 	old_highest = asoc->highest_tsn_inside_map;
2299 	/*
2300 	 * We could probably improve this a small bit by calculating the
2301 	 * offset of the current cum-ack as the starting point.
2302 	 */
2303 	at = 0;
2304 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2305 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2306 		if (val == 0xff) {
2307 			at += 8;
2308 		} else {
2309 			/* there is a 0 bit */
2310 			at += sctp_map_lookup_tab[val];
2311 			break;
2312 		}
2313 	}
2314 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2315 
2316 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2317 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2318 #ifdef INVARIANTS
2319 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2320 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2321 #else
2322 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2323 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2324 		sctp_print_mapping_array(asoc);
2325 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2326 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2327 		}
2328 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2329 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2330 #endif
2331 	}
2332 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2333 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2334 	} else {
2335 		highest_tsn = asoc->highest_tsn_inside_map;
2336 	}
2337 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2338 		/* The complete array was completed by a single FR */
2339 		/* highest becomes the cum-ack */
2340 		int clr;
2341 #ifdef INVARIANTS
2342 		unsigned int i;
2343 #endif
2344 
2345 		/* clear the array */
2346 		clr = ((at + 7) >> 3);
2347 		if (clr > asoc->mapping_array_size) {
2348 			clr = asoc->mapping_array_size;
2349 		}
2350 		memset(asoc->mapping_array, 0, clr);
2351 		memset(asoc->nr_mapping_array, 0, clr);
2352 #ifdef INVARIANTS
2353 		for (i = 0; i < asoc->mapping_array_size; i++) {
2354 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2355 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2356 				sctp_print_mapping_array(asoc);
2357 			}
2358 		}
2359 #endif
2360 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2361 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2362 	} else if (at >= 8) {
2363 		/* we can slide the mapping array down */
2364 		/* slide_from holds where we hit the first NON 0xff byte */
2365 
2366 		/*
2367 		 * now calculate the ceiling of the move using our highest
2368 		 * TSN value
2369 		 */
2370 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2371 		slide_end = (lgap >> 3);
2372 		if (slide_end < slide_from) {
2373 			sctp_print_mapping_array(asoc);
2374 #ifdef INVARIANTS
2375 			panic("impossible slide");
2376 #else
2377 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2378 			    lgap, slide_end, slide_from, at);
2379 			return;
2380 #endif
2381 		}
2382 		if (slide_end > asoc->mapping_array_size) {
2383 #ifdef INVARIANTS
2384 			panic("would overrun buffer");
2385 #else
2386 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2387 			    asoc->mapping_array_size, slide_end);
2388 			slide_end = asoc->mapping_array_size;
2389 #endif
2390 		}
2391 		distance = (slide_end - slide_from) + 1;
2392 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2393 			sctp_log_map(old_base, old_cumack, old_highest,
2394 			    SCTP_MAP_PREPARE_SLIDE);
2395 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2396 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2397 		}
2398 		if (distance + slide_from > asoc->mapping_array_size ||
2399 		    distance < 0) {
2400 			/*
2401 			 * Here we do NOT slide forward the array so that
2402 			 * hopefully when more data comes in to fill it up
2403 			 * we will be able to slide it forward. Really I
2404 			 * don't think this should happen :-0
2405 			 */
2406 
2407 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2408 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2409 				    (uint32_t) asoc->mapping_array_size,
2410 				    SCTP_MAP_SLIDE_NONE);
2411 			}
2412 		} else {
2413 			int ii;
2414 
2415 			for (ii = 0; ii < distance; ii++) {
2416 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2417 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2418 
2419 			}
2420 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2421 				asoc->mapping_array[ii] = 0;
2422 				asoc->nr_mapping_array[ii] = 0;
2423 			}
2424 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2425 				asoc->highest_tsn_inside_map += (slide_from << 3);
2426 			}
2427 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2428 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2429 			}
2430 			asoc->mapping_array_base_tsn += (slide_from << 3);
2431 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2432 				sctp_log_map(asoc->mapping_array_base_tsn,
2433 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2434 				    SCTP_MAP_SLIDE_RESULT);
2435 			}
2436 		}
2437 	}
2438 }
2439 
2440 void
2441 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2442 {
2443 	struct sctp_association *asoc;
2444 	uint32_t highest_tsn;
2445 	int is_a_gap;
2446 
2447 	sctp_slide_mapping_arrays(stcb);
2448 	asoc = &stcb->asoc;
2449 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2450 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2451 	} else {
2452 		highest_tsn = asoc->highest_tsn_inside_map;
2453 	}
2454 	/* Is there a gap now? */
2455 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2456 
2457 	/*
2458 	 * Now we need to see if we need to queue a sack or just start the
2459 	 * timer (if allowed).
2460 	 */
2461 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2462 		/*
2463 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2464 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2465 		 * SACK
2466 		 */
2467 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2468 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2469 			    stcb->sctp_ep, stcb, NULL,
2470 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2471 		}
2472 		sctp_send_shutdown(stcb,
2473 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2474 		if (is_a_gap) {
2475 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2476 		}
2477 	} else {
2478 		/*
2479 		 * CMT DAC algorithm: increase number of packets received
2480 		 * since last ack
2481 		 */
2482 		stcb->asoc.cmt_dac_pkts_rcvd++;
2483 
2484 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2485 							 * SACK */
2486 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2487 							 * longer is one */
2488 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2489 		    (is_a_gap) ||	/* is still a gap */
2490 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2491 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2492 
2493 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2494 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2495 			    (stcb->asoc.send_sack == 0) &&
2496 			    (stcb->asoc.numduptsns == 0) &&
2497 			    (stcb->asoc.delayed_ack) &&
2498 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2499 
2500 				/*
2501 				 * CMT DAC algorithm: With CMT, delay acks
2502 				 * even in the face of
2503 				 *
2504 				 * reordering. Therefore, if acks that do
2505 				 * not have to be sent because of the above
2506 				 * reasons, will be delayed. That is, acks
2507 				 * that would have been sent due to gap
2508 				 * reports will be delayed with DAC. Start
2509 				 * the delayed ack timer.
2510 				 */
2511 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2512 				    stcb->sctp_ep, stcb, NULL);
2513 			} else {
2514 				/*
2515 				 * Ok we must build a SACK since the timer
2516 				 * is pending, we got our first packet OR
2517 				 * there are gaps or duplicates.
2518 				 */
2519 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2520 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2521 			}
2522 		} else {
2523 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2524 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2525 				    stcb->sctp_ep, stcb, NULL);
2526 			}
2527 		}
2528 	}
2529 }
2530 
2531 int
2532 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2533     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2534     struct sctp_nets *net, uint32_t * high_tsn)
2535 {
2536 	struct sctp_chunkhdr *ch, chunk_buf;
2537 	struct sctp_association *asoc;
2538 	int num_chunks = 0;	/* number of control chunks processed */
2539 	int stop_proc = 0;
2540 	int chk_length, break_flag, last_chunk;
2541 	int abort_flag = 0, was_a_gap;
2542 	struct mbuf *m;
2543 	uint32_t highest_tsn;
2544 
2545 	/* set the rwnd */
2546 	sctp_set_rwnd(stcb, &stcb->asoc);
2547 
2548 	m = *mm;
2549 	SCTP_TCB_LOCK_ASSERT(stcb);
2550 	asoc = &stcb->asoc;
2551 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2552 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2553 	} else {
2554 		highest_tsn = asoc->highest_tsn_inside_map;
2555 	}
2556 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2557 	/*
2558 	 * setup where we got the last DATA packet from for any SACK that
2559 	 * may need to go out. Don't bump the net. This is done ONLY when a
2560 	 * chunk is assigned.
2561 	 */
2562 	asoc->last_data_chunk_from = net;
2563 
2564 	/*-
2565 	 * Now before we proceed we must figure out if this is a wasted
2566 	 * cluster... i.e. it is a small packet sent in and yet the driver
2567 	 * underneath allocated a full cluster for it. If so we must copy it
2568 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2569 	 * with cluster starvation. Note for __Panda__ we don't do this
2570 	 * since it has clusters all the way down to 64 bytes.
2571 	 */
2572 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2573 		/* we only handle mbufs that are singletons.. not chains */
2574 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2575 		if (m) {
2576 			/* ok lets see if we can copy the data up */
2577 			caddr_t *from, *to;
2578 
2579 			/* get the pointers and copy */
2580 			to = mtod(m, caddr_t *);
2581 			from = mtod((*mm), caddr_t *);
2582 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2583 			/* copy the length and free up the old */
2584 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2585 			sctp_m_freem(*mm);
2586 			/* success, back copy */
2587 			*mm = m;
2588 		} else {
2589 			/* We are in trouble in the mbuf world .. yikes */
2590 			m = *mm;
2591 		}
2592 	}
2593 	/* get pointer to the first chunk header */
2594 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2595 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2596 	if (ch == NULL) {
2597 		return (1);
2598 	}
2599 	/*
2600 	 * process all DATA chunks...
2601 	 */
2602 	*high_tsn = asoc->cumulative_tsn;
2603 	break_flag = 0;
2604 	asoc->data_pkts_seen++;
2605 	while (stop_proc == 0) {
2606 		/* validate chunk length */
2607 		chk_length = ntohs(ch->chunk_length);
2608 		if (length - *offset < chk_length) {
2609 			/* all done, mutulated chunk */
2610 			stop_proc = 1;
2611 			continue;
2612 		}
2613 		if ((asoc->idata_supported == 1) &&
2614 		    (ch->chunk_type == SCTP_DATA)) {
2615 			struct mbuf *op_err;
2616 			char msg[SCTP_DIAG_INFO_LEN];
2617 
2618 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2619 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2620 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2621 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2622 			return (2);
2623 		}
2624 		if ((asoc->idata_supported == 0) &&
2625 		    (ch->chunk_type == SCTP_IDATA)) {
2626 			struct mbuf *op_err;
2627 			char msg[SCTP_DIAG_INFO_LEN];
2628 
2629 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2630 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2631 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2632 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2633 			return (2);
2634 		}
2635 		if ((ch->chunk_type == SCTP_DATA) ||
2636 		    (ch->chunk_type == SCTP_IDATA)) {
2637 			int clen;
2638 
2639 			if (ch->chunk_type == SCTP_DATA) {
2640 				clen = sizeof(struct sctp_data_chunk);
2641 			} else {
2642 				clen = sizeof(struct sctp_idata_chunk);
2643 			}
2644 			if (chk_length < clen) {
2645 				/*
2646 				 * Need to send an abort since we had a
2647 				 * invalid data chunk.
2648 				 */
2649 				struct mbuf *op_err;
2650 				char msg[SCTP_DIAG_INFO_LEN];
2651 
2652 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2653 				    chk_length);
2654 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2655 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2656 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2657 				return (2);
2658 			}
2659 #ifdef SCTP_AUDITING_ENABLED
2660 			sctp_audit_log(0xB1, 0);
2661 #endif
2662 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2663 				last_chunk = 1;
2664 			} else {
2665 				last_chunk = 0;
2666 			}
2667 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2668 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2669 			    last_chunk, ch->chunk_type)) {
2670 				num_chunks++;
2671 			}
2672 			if (abort_flag)
2673 				return (2);
2674 
2675 			if (break_flag) {
2676 				/*
2677 				 * Set because of out of rwnd space and no
2678 				 * drop rep space left.
2679 				 */
2680 				stop_proc = 1;
2681 				continue;
2682 			}
2683 		} else {
2684 			/* not a data chunk in the data region */
2685 			switch (ch->chunk_type) {
2686 			case SCTP_INITIATION:
2687 			case SCTP_INITIATION_ACK:
2688 			case SCTP_SELECTIVE_ACK:
2689 			case SCTP_NR_SELECTIVE_ACK:
2690 			case SCTP_HEARTBEAT_REQUEST:
2691 			case SCTP_HEARTBEAT_ACK:
2692 			case SCTP_ABORT_ASSOCIATION:
2693 			case SCTP_SHUTDOWN:
2694 			case SCTP_SHUTDOWN_ACK:
2695 			case SCTP_OPERATION_ERROR:
2696 			case SCTP_COOKIE_ECHO:
2697 			case SCTP_COOKIE_ACK:
2698 			case SCTP_ECN_ECHO:
2699 			case SCTP_ECN_CWR:
2700 			case SCTP_SHUTDOWN_COMPLETE:
2701 			case SCTP_AUTHENTICATION:
2702 			case SCTP_ASCONF_ACK:
2703 			case SCTP_PACKET_DROPPED:
2704 			case SCTP_STREAM_RESET:
2705 			case SCTP_FORWARD_CUM_TSN:
2706 			case SCTP_ASCONF:
2707 				{
2708 					/*
2709 					 * Now, what do we do with KNOWN
2710 					 * chunks that are NOT in the right
2711 					 * place?
2712 					 *
2713 					 * For now, I do nothing but ignore
2714 					 * them. We may later want to add
2715 					 * sysctl stuff to switch out and do
2716 					 * either an ABORT() or possibly
2717 					 * process them.
2718 					 */
2719 					struct mbuf *op_err;
2720 					char msg[SCTP_DIAG_INFO_LEN];
2721 
2722 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2723 					    ch->chunk_type);
2724 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2725 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2726 					return (2);
2727 				}
2728 			default:
2729 				/* unknown chunk type, use bit rules */
2730 				if (ch->chunk_type & 0x40) {
2731 					/* Add a error report to the queue */
2732 					struct mbuf *op_err;
2733 					struct sctp_gen_error_cause *cause;
2734 
2735 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2736 					    0, M_NOWAIT, 1, MT_DATA);
2737 					if (op_err != NULL) {
2738 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2739 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2740 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2741 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2742 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2743 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2744 							sctp_queue_op_err(stcb, op_err);
2745 						} else {
2746 							sctp_m_freem(op_err);
2747 						}
2748 					}
2749 				}
2750 				if ((ch->chunk_type & 0x80) == 0) {
2751 					/* discard the rest of this packet */
2752 					stop_proc = 1;
2753 				}	/* else skip this bad chunk and
2754 				  * continue... */ break;
2755 			}	/* switch of chunk type */
2756 		}
2757 		*offset += SCTP_SIZE32(chk_length);
2758 		if ((*offset >= length) || stop_proc) {
2759 			/* no more data left in the mbuf chain */
2760 			stop_proc = 1;
2761 			continue;
2762 		}
2763 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2764 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2765 		if (ch == NULL) {
2766 			*offset = length;
2767 			stop_proc = 1;
2768 			continue;
2769 		}
2770 	}
2771 	if (break_flag) {
2772 		/*
2773 		 * we need to report rwnd overrun drops.
2774 		 */
2775 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2776 	}
2777 	if (num_chunks) {
2778 		/*
2779 		 * Did we get data, if so update the time for auto-close and
2780 		 * give peer credit for being alive.
2781 		 */
2782 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2783 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2784 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2785 			    stcb->asoc.overall_error_count,
2786 			    0,
2787 			    SCTP_FROM_SCTP_INDATA,
2788 			    __LINE__);
2789 		}
2790 		stcb->asoc.overall_error_count = 0;
2791 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2792 	}
2793 	/* now service all of the reassm queue if needed */
2794 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2795 		/* Assure that we ack right away */
2796 		stcb->asoc.send_sack = 1;
2797 	}
2798 	/* Start a sack timer or QUEUE a SACK for sending */
2799 	sctp_sack_check(stcb, was_a_gap);
2800 	return (0);
2801 }
2802 
2803 static int
2804 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2805     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2806     int *num_frs,
2807     uint32_t * biggest_newly_acked_tsn,
2808     uint32_t * this_sack_lowest_newack,
2809     int *rto_ok)
2810 {
2811 	struct sctp_tmit_chunk *tp1;
2812 	unsigned int theTSN;
2813 	int j, wake_him = 0, circled = 0;
2814 
2815 	/* Recover the tp1 we last saw */
2816 	tp1 = *p_tp1;
2817 	if (tp1 == NULL) {
2818 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2819 	}
2820 	for (j = frag_strt; j <= frag_end; j++) {
2821 		theTSN = j + last_tsn;
2822 		while (tp1) {
2823 			if (tp1->rec.data.doing_fast_retransmit)
2824 				(*num_frs) += 1;
2825 
2826 			/*-
2827 			 * CMT: CUCv2 algorithm. For each TSN being
2828 			 * processed from the sent queue, track the
2829 			 * next expected pseudo-cumack, or
2830 			 * rtx_pseudo_cumack, if required. Separate
2831 			 * cumack trackers for first transmissions,
2832 			 * and retransmissions.
2833 			 */
2834 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2835 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2836 			    (tp1->snd_count == 1)) {
2837 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2838 				tp1->whoTo->find_pseudo_cumack = 0;
2839 			}
2840 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2841 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2842 			    (tp1->snd_count > 1)) {
2843 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2844 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2845 			}
2846 			if (tp1->rec.data.tsn == theTSN) {
2847 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2848 					/*-
2849 					 * must be held until
2850 					 * cum-ack passes
2851 					 */
2852 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2853 						/*-
2854 						 * If it is less than RESEND, it is
2855 						 * now no-longer in flight.
2856 						 * Higher values may already be set
2857 						 * via previous Gap Ack Blocks...
2858 						 * i.e. ACKED or RESEND.
2859 						 */
2860 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2861 						    *biggest_newly_acked_tsn)) {
2862 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2863 						}
2864 						/*-
2865 						 * CMT: SFR algo (and HTNA) - set
2866 						 * saw_newack to 1 for dest being
2867 						 * newly acked. update
2868 						 * this_sack_highest_newack if
2869 						 * appropriate.
2870 						 */
2871 						if (tp1->rec.data.chunk_was_revoked == 0)
2872 							tp1->whoTo->saw_newack = 1;
2873 
2874 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2875 						    tp1->whoTo->this_sack_highest_newack)) {
2876 							tp1->whoTo->this_sack_highest_newack =
2877 							    tp1->rec.data.tsn;
2878 						}
2879 						/*-
2880 						 * CMT DAC algo: also update
2881 						 * this_sack_lowest_newack
2882 						 */
2883 						if (*this_sack_lowest_newack == 0) {
2884 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2885 								sctp_log_sack(*this_sack_lowest_newack,
2886 								    last_tsn,
2887 								    tp1->rec.data.tsn,
2888 								    0,
2889 								    0,
2890 								    SCTP_LOG_TSN_ACKED);
2891 							}
2892 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2893 						}
2894 						/*-
2895 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2896 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2897 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2898 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2899 						 * Separate pseudo_cumack trackers for first transmissions and
2900 						 * retransmissions.
2901 						 */
2902 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2903 							if (tp1->rec.data.chunk_was_revoked == 0) {
2904 								tp1->whoTo->new_pseudo_cumack = 1;
2905 							}
2906 							tp1->whoTo->find_pseudo_cumack = 1;
2907 						}
2908 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2909 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2910 						}
2911 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2912 							if (tp1->rec.data.chunk_was_revoked == 0) {
2913 								tp1->whoTo->new_pseudo_cumack = 1;
2914 							}
2915 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2916 						}
2917 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2918 							sctp_log_sack(*biggest_newly_acked_tsn,
2919 							    last_tsn,
2920 							    tp1->rec.data.tsn,
2921 							    frag_strt,
2922 							    frag_end,
2923 							    SCTP_LOG_TSN_ACKED);
2924 						}
2925 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2926 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2927 							    tp1->whoTo->flight_size,
2928 							    tp1->book_size,
2929 							    (uint32_t) (uintptr_t) tp1->whoTo,
2930 							    tp1->rec.data.tsn);
2931 						}
2932 						sctp_flight_size_decrease(tp1);
2933 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2934 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2935 							    tp1);
2936 						}
2937 						sctp_total_flight_decrease(stcb, tp1);
2938 
2939 						tp1->whoTo->net_ack += tp1->send_size;
2940 						if (tp1->snd_count < 2) {
2941 							/*-
2942 							 * True non-retransmited chunk
2943 							 */
2944 							tp1->whoTo->net_ack2 += tp1->send_size;
2945 
2946 							/*-
2947 							 * update RTO too ?
2948 							 */
2949 							if (tp1->do_rtt) {
2950 								if (*rto_ok) {
2951 									tp1->whoTo->RTO =
2952 									    sctp_calculate_rto(stcb,
2953 									    &stcb->asoc,
2954 									    tp1->whoTo,
2955 									    &tp1->sent_rcv_time,
2956 									    sctp_align_safe_nocopy,
2957 									    SCTP_RTT_FROM_DATA);
2958 									*rto_ok = 0;
2959 								}
2960 								if (tp1->whoTo->rto_needed == 0) {
2961 									tp1->whoTo->rto_needed = 1;
2962 								}
2963 								tp1->do_rtt = 0;
2964 							}
2965 						}
2966 					}
2967 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2968 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2969 						    stcb->asoc.this_sack_highest_gap)) {
2970 							stcb->asoc.this_sack_highest_gap =
2971 							    tp1->rec.data.tsn;
2972 						}
2973 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2974 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2975 #ifdef SCTP_AUDITING_ENABLED
2976 							sctp_audit_log(0xB2,
2977 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2978 #endif
2979 						}
2980 					}
2981 					/*-
2982 					 * All chunks NOT UNSENT fall through here and are marked
2983 					 * (leave PR-SCTP ones that are to skip alone though)
2984 					 */
2985 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2986 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2987 						tp1->sent = SCTP_DATAGRAM_MARKED;
2988 					}
2989 					if (tp1->rec.data.chunk_was_revoked) {
2990 						/* deflate the cwnd */
2991 						tp1->whoTo->cwnd -= tp1->book_size;
2992 						tp1->rec.data.chunk_was_revoked = 0;
2993 					}
2994 					/* NR Sack code here */
2995 					if (nr_sacking &&
2996 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2997 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
2998 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
2999 #ifdef INVARIANTS
3000 						} else {
3001 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3002 #endif
3003 						}
3004 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3005 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3006 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3007 							stcb->asoc.trigger_reset = 1;
3008 						}
3009 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3010 						if (tp1->data) {
3011 							/* sa_ignore
3012 							 * NO_NULL_CHK */
3013 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3014 							sctp_m_freem(tp1->data);
3015 							tp1->data = NULL;
3016 						}
3017 						wake_him++;
3018 					}
3019 				}
3020 				break;
3021 			} /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3022 				break;
3023 			}
3024 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3025 			if ((tp1 == NULL) && (circled == 0)) {
3026 				circled++;
3027 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3028 			}
3029 		}		/* end while (tp1) */
3030 		if (tp1 == NULL) {
3031 			circled = 0;
3032 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3033 		}
3034 		/* In case the fragments were not in order we must reset */
3035 	}			/* end for (j = fragStart */
3036 	*p_tp1 = tp1;
3037 	return (wake_him);	/* Return value only used for nr-sack */
3038 }
3039 
3040 
3041 static int
3042 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3043     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3044     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3045     int num_seg, int num_nr_seg, int *rto_ok)
3046 {
3047 	struct sctp_gap_ack_block *frag, block;
3048 	struct sctp_tmit_chunk *tp1;
3049 	int i;
3050 	int num_frs = 0;
3051 	int chunk_freed;
3052 	int non_revocable;
3053 	uint16_t frag_strt, frag_end, prev_frag_end;
3054 
3055 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3056 	prev_frag_end = 0;
3057 	chunk_freed = 0;
3058 
3059 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3060 		if (i == num_seg) {
3061 			prev_frag_end = 0;
3062 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3063 		}
3064 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3065 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3066 		*offset += sizeof(block);
3067 		if (frag == NULL) {
3068 			return (chunk_freed);
3069 		}
3070 		frag_strt = ntohs(frag->start);
3071 		frag_end = ntohs(frag->end);
3072 
3073 		if (frag_strt > frag_end) {
3074 			/* This gap report is malformed, skip it. */
3075 			continue;
3076 		}
3077 		if (frag_strt <= prev_frag_end) {
3078 			/* This gap report is not in order, so restart. */
3079 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3080 		}
3081 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3082 			*biggest_tsn_acked = last_tsn + frag_end;
3083 		}
3084 		if (i < num_seg) {
3085 			non_revocable = 0;
3086 		} else {
3087 			non_revocable = 1;
3088 		}
3089 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3090 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3091 		    this_sack_lowest_newack, rto_ok)) {
3092 			chunk_freed = 1;
3093 		}
3094 		prev_frag_end = frag_end;
3095 	}
3096 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3097 		if (num_frs)
3098 			sctp_log_fr(*biggest_tsn_acked,
3099 			    *biggest_newly_acked_tsn,
3100 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3101 	}
3102 	return (chunk_freed);
3103 }
3104 
3105 static void
3106 sctp_check_for_revoked(struct sctp_tcb *stcb,
3107     struct sctp_association *asoc, uint32_t cumack,
3108     uint32_t biggest_tsn_acked)
3109 {
3110 	struct sctp_tmit_chunk *tp1;
3111 
3112 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3113 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3114 			/*
3115 			 * ok this guy is either ACK or MARKED. If it is
3116 			 * ACKED it has been previously acked but not this
3117 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3118 			 * again.
3119 			 */
3120 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3121 				break;
3122 			}
3123 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3124 				/* it has been revoked */
3125 				tp1->sent = SCTP_DATAGRAM_SENT;
3126 				tp1->rec.data.chunk_was_revoked = 1;
3127 				/*
3128 				 * We must add this stuff back in to assure
3129 				 * timers and such get started.
3130 				 */
3131 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3132 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3133 					    tp1->whoTo->flight_size,
3134 					    tp1->book_size,
3135 					    (uint32_t) (uintptr_t) tp1->whoTo,
3136 					    tp1->rec.data.tsn);
3137 				}
3138 				sctp_flight_size_increase(tp1);
3139 				sctp_total_flight_increase(stcb, tp1);
3140 				/*
3141 				 * We inflate the cwnd to compensate for our
3142 				 * artificial inflation of the flight_size.
3143 				 */
3144 				tp1->whoTo->cwnd += tp1->book_size;
3145 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3146 					sctp_log_sack(asoc->last_acked_seq,
3147 					    cumack,
3148 					    tp1->rec.data.tsn,
3149 					    0,
3150 					    0,
3151 					    SCTP_LOG_TSN_REVOKED);
3152 				}
3153 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3154 				/* it has been re-acked in this SACK */
3155 				tp1->sent = SCTP_DATAGRAM_ACKED;
3156 			}
3157 		}
3158 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3159 			break;
3160 	}
3161 }
3162 
3163 
3164 static void
3165 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3166     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3167 {
3168 	struct sctp_tmit_chunk *tp1;
3169 	int strike_flag = 0;
3170 	struct timeval now;
3171 	int tot_retrans = 0;
3172 	uint32_t sending_seq;
3173 	struct sctp_nets *net;
3174 	int num_dests_sacked = 0;
3175 
3176 	/*
3177 	 * select the sending_seq, this is either the next thing ready to be
3178 	 * sent but not transmitted, OR, the next seq we assign.
3179 	 */
3180 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3181 	if (tp1 == NULL) {
3182 		sending_seq = asoc->sending_seq;
3183 	} else {
3184 		sending_seq = tp1->rec.data.tsn;
3185 	}
3186 
3187 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3188 	if ((asoc->sctp_cmt_on_off > 0) &&
3189 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3190 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3191 			if (net->saw_newack)
3192 				num_dests_sacked++;
3193 		}
3194 	}
3195 	if (stcb->asoc.prsctp_supported) {
3196 		(void)SCTP_GETTIME_TIMEVAL(&now);
3197 	}
3198 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3199 		strike_flag = 0;
3200 		if (tp1->no_fr_allowed) {
3201 			/* this one had a timeout or something */
3202 			continue;
3203 		}
3204 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3205 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3206 				sctp_log_fr(biggest_tsn_newly_acked,
3207 				    tp1->rec.data.tsn,
3208 				    tp1->sent,
3209 				    SCTP_FR_LOG_CHECK_STRIKE);
3210 		}
3211 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3212 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3213 			/* done */
3214 			break;
3215 		}
3216 		if (stcb->asoc.prsctp_supported) {
3217 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3218 				/* Is it expired? */
3219 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3220 					/* Yes so drop it */
3221 					if (tp1->data != NULL) {
3222 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3223 						    SCTP_SO_NOT_LOCKED);
3224 					}
3225 					continue;
3226 				}
3227 			}
3228 		}
3229 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3230 			/* we are beyond the tsn in the sack  */
3231 			break;
3232 		}
3233 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3234 			/* either a RESEND, ACKED, or MARKED */
3235 			/* skip */
3236 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3237 				/* Continue strikin FWD-TSN chunks */
3238 				tp1->rec.data.fwd_tsn_cnt++;
3239 			}
3240 			continue;
3241 		}
3242 		/*
3243 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3244 		 */
3245 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3246 			/*
3247 			 * No new acks were receieved for data sent to this
3248 			 * dest. Therefore, according to the SFR algo for
3249 			 * CMT, no data sent to this dest can be marked for
3250 			 * FR using this SACK.
3251 			 */
3252 			continue;
3253 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3254 		    tp1->whoTo->this_sack_highest_newack)) {
3255 			/*
3256 			 * CMT: New acks were receieved for data sent to
3257 			 * this dest. But no new acks were seen for data
3258 			 * sent after tp1. Therefore, according to the SFR
3259 			 * algo for CMT, tp1 cannot be marked for FR using
3260 			 * this SACK. This step covers part of the DAC algo
3261 			 * and the HTNA algo as well.
3262 			 */
3263 			continue;
3264 		}
3265 		/*
3266 		 * Here we check to see if we were have already done a FR
3267 		 * and if so we see if the biggest TSN we saw in the sack is
3268 		 * smaller than the recovery point. If so we don't strike
3269 		 * the tsn... otherwise we CAN strike the TSN.
3270 		 */
3271 		/*
3272 		 * @@@ JRI: Check for CMT if (accum_moved &&
3273 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3274 		 * 0)) {
3275 		 */
3276 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3277 			/*
3278 			 * Strike the TSN if in fast-recovery and cum-ack
3279 			 * moved.
3280 			 */
3281 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3282 				sctp_log_fr(biggest_tsn_newly_acked,
3283 				    tp1->rec.data.tsn,
3284 				    tp1->sent,
3285 				    SCTP_FR_LOG_STRIKE_CHUNK);
3286 			}
3287 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3288 				tp1->sent++;
3289 			}
3290 			if ((asoc->sctp_cmt_on_off > 0) &&
3291 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3292 				/*
3293 				 * CMT DAC algorithm: If SACK flag is set to
3294 				 * 0, then lowest_newack test will not pass
3295 				 * because it would have been set to the
3296 				 * cumack earlier. If not already to be
3297 				 * rtx'd, If not a mixed sack and if tp1 is
3298 				 * not between two sacked TSNs, then mark by
3299 				 * one more. NOTE that we are marking by one
3300 				 * additional time since the SACK DAC flag
3301 				 * indicates that two packets have been
3302 				 * received after this missing TSN.
3303 				 */
3304 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3305 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3306 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3307 						sctp_log_fr(16 + num_dests_sacked,
3308 						    tp1->rec.data.tsn,
3309 						    tp1->sent,
3310 						    SCTP_FR_LOG_STRIKE_CHUNK);
3311 					}
3312 					tp1->sent++;
3313 				}
3314 			}
3315 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3316 		    (asoc->sctp_cmt_on_off == 0)) {
3317 			/*
3318 			 * For those that have done a FR we must take
3319 			 * special consideration if we strike. I.e the
3320 			 * biggest_newly_acked must be higher than the
3321 			 * sending_seq at the time we did the FR.
3322 			 */
3323 			if (
3324 #ifdef SCTP_FR_TO_ALTERNATE
3325 			/*
3326 			 * If FR's go to new networks, then we must only do
3327 			 * this for singly homed asoc's. However if the FR's
3328 			 * go to the same network (Armando's work) then its
3329 			 * ok to FR multiple times.
3330 			 */
3331 			    (asoc->numnets < 2)
3332 #else
3333 			    (1)
3334 #endif
3335 			    ) {
3336 
3337 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3338 				    tp1->rec.data.fast_retran_tsn)) {
3339 					/*
3340 					 * Strike the TSN, since this ack is
3341 					 * beyond where things were when we
3342 					 * did a FR.
3343 					 */
3344 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3345 						sctp_log_fr(biggest_tsn_newly_acked,
3346 						    tp1->rec.data.tsn,
3347 						    tp1->sent,
3348 						    SCTP_FR_LOG_STRIKE_CHUNK);
3349 					}
3350 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3351 						tp1->sent++;
3352 					}
3353 					strike_flag = 1;
3354 					if ((asoc->sctp_cmt_on_off > 0) &&
3355 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3356 						/*
3357 						 * CMT DAC algorithm: If
3358 						 * SACK flag is set to 0,
3359 						 * then lowest_newack test
3360 						 * will not pass because it
3361 						 * would have been set to
3362 						 * the cumack earlier. If
3363 						 * not already to be rtx'd,
3364 						 * If not a mixed sack and
3365 						 * if tp1 is not between two
3366 						 * sacked TSNs, then mark by
3367 						 * one more. NOTE that we
3368 						 * are marking by one
3369 						 * additional time since the
3370 						 * SACK DAC flag indicates
3371 						 * that two packets have
3372 						 * been received after this
3373 						 * missing TSN.
3374 						 */
3375 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3376 						    (num_dests_sacked == 1) &&
3377 						    SCTP_TSN_GT(this_sack_lowest_newack,
3378 						    tp1->rec.data.tsn)) {
3379 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3380 								sctp_log_fr(32 + num_dests_sacked,
3381 								    tp1->rec.data.tsn,
3382 								    tp1->sent,
3383 								    SCTP_FR_LOG_STRIKE_CHUNK);
3384 							}
3385 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3386 								tp1->sent++;
3387 							}
3388 						}
3389 					}
3390 				}
3391 			}
3392 			/*
3393 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3394 			 * algo covers HTNA.
3395 			 */
3396 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3397 		    biggest_tsn_newly_acked)) {
3398 			/*
3399 			 * We don't strike these: This is the  HTNA
3400 			 * algorithm i.e. we don't strike If our TSN is
3401 			 * larger than the Highest TSN Newly Acked.
3402 			 */
3403 			;
3404 		} else {
3405 			/* Strike the TSN */
3406 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3407 				sctp_log_fr(biggest_tsn_newly_acked,
3408 				    tp1->rec.data.tsn,
3409 				    tp1->sent,
3410 				    SCTP_FR_LOG_STRIKE_CHUNK);
3411 			}
3412 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3413 				tp1->sent++;
3414 			}
3415 			if ((asoc->sctp_cmt_on_off > 0) &&
3416 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3417 				/*
3418 				 * CMT DAC algorithm: If SACK flag is set to
3419 				 * 0, then lowest_newack test will not pass
3420 				 * because it would have been set to the
3421 				 * cumack earlier. If not already to be
3422 				 * rtx'd, If not a mixed sack and if tp1 is
3423 				 * not between two sacked TSNs, then mark by
3424 				 * one more. NOTE that we are marking by one
3425 				 * additional time since the SACK DAC flag
3426 				 * indicates that two packets have been
3427 				 * received after this missing TSN.
3428 				 */
3429 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3430 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3431 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3432 						sctp_log_fr(48 + num_dests_sacked,
3433 						    tp1->rec.data.tsn,
3434 						    tp1->sent,
3435 						    SCTP_FR_LOG_STRIKE_CHUNK);
3436 					}
3437 					tp1->sent++;
3438 				}
3439 			}
3440 		}
3441 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3442 			struct sctp_nets *alt;
3443 
3444 			/* fix counts and things */
3445 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3446 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3447 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3448 				    tp1->book_size,
3449 				    (uint32_t) (uintptr_t) tp1->whoTo,
3450 				    tp1->rec.data.tsn);
3451 			}
3452 			if (tp1->whoTo) {
3453 				tp1->whoTo->net_ack++;
3454 				sctp_flight_size_decrease(tp1);
3455 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3456 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3457 					    tp1);
3458 				}
3459 			}
3460 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3461 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3462 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3463 			}
3464 			/* add back to the rwnd */
3465 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3466 
3467 			/* remove from the total flight */
3468 			sctp_total_flight_decrease(stcb, tp1);
3469 
3470 			if ((stcb->asoc.prsctp_supported) &&
3471 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3472 				/* Has it been retransmitted tv_sec times? -
3473 				 * we store the retran count there. */
3474 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3475 					/* Yes, so drop it */
3476 					if (tp1->data != NULL) {
3477 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3478 						    SCTP_SO_NOT_LOCKED);
3479 					}
3480 					/* Make sure to flag we had a FR */
3481 					tp1->whoTo->net_ack++;
3482 					continue;
3483 				}
3484 			}
3485 			/* SCTP_PRINTF("OK, we are now ready to FR this
3486 			 * guy\n"); */
3487 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3488 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3489 				    0, SCTP_FR_MARKED);
3490 			}
3491 			if (strike_flag) {
3492 				/* This is a subsequent FR */
3493 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3494 			}
3495 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3496 			if (asoc->sctp_cmt_on_off > 0) {
3497 				/*
3498 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3499 				 * If CMT is being used, then pick dest with
3500 				 * largest ssthresh for any retransmission.
3501 				 */
3502 				tp1->no_fr_allowed = 1;
3503 				alt = tp1->whoTo;
3504 				/* sa_ignore NO_NULL_CHK */
3505 				if (asoc->sctp_cmt_pf > 0) {
3506 					/* JRS 5/18/07 - If CMT PF is on,
3507 					 * use the PF version of
3508 					 * find_alt_net() */
3509 					alt = sctp_find_alternate_net(stcb, alt, 2);
3510 				} else {
3511 					/* JRS 5/18/07 - If only CMT is on,
3512 					 * use the CMT version of
3513 					 * find_alt_net() */
3514 					/* sa_ignore NO_NULL_CHK */
3515 					alt = sctp_find_alternate_net(stcb, alt, 1);
3516 				}
3517 				if (alt == NULL) {
3518 					alt = tp1->whoTo;
3519 				}
3520 				/*
3521 				 * CUCv2: If a different dest is picked for
3522 				 * the retransmission, then new
3523 				 * (rtx-)pseudo_cumack needs to be tracked
3524 				 * for orig dest. Let CUCv2 track new (rtx-)
3525 				 * pseudo-cumack always.
3526 				 */
3527 				if (tp1->whoTo) {
3528 					tp1->whoTo->find_pseudo_cumack = 1;
3529 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3530 				}
3531 			} else {/* CMT is OFF */
3532 
3533 #ifdef SCTP_FR_TO_ALTERNATE
3534 				/* Can we find an alternate? */
3535 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3536 #else
3537 				/*
3538 				 * default behavior is to NOT retransmit
3539 				 * FR's to an alternate. Armando Caro's
3540 				 * paper details why.
3541 				 */
3542 				alt = tp1->whoTo;
3543 #endif
3544 			}
3545 
3546 			tp1->rec.data.doing_fast_retransmit = 1;
3547 			tot_retrans++;
3548 			/* mark the sending seq for possible subsequent FR's */
3549 			/*
3550 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3551 			 * (uint32_t)tpi->rec.data.tsn);
3552 			 */
3553 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3554 				/*
3555 				 * If the queue of send is empty then its
3556 				 * the next sequence number that will be
3557 				 * assigned so we subtract one from this to
3558 				 * get the one we last sent.
3559 				 */
3560 				tp1->rec.data.fast_retran_tsn = sending_seq;
3561 			} else {
3562 				/*
3563 				 * If there are chunks on the send queue
3564 				 * (unsent data that has made it from the
3565 				 * stream queues but not out the door, we
3566 				 * take the first one (which will have the
3567 				 * lowest TSN) and subtract one to get the
3568 				 * one we last sent.
3569 				 */
3570 				struct sctp_tmit_chunk *ttt;
3571 
3572 				ttt = TAILQ_FIRST(&asoc->send_queue);
3573 				tp1->rec.data.fast_retran_tsn =
3574 				    ttt->rec.data.tsn;
3575 			}
3576 
3577 			if (tp1->do_rtt) {
3578 				/*
3579 				 * this guy had a RTO calculation pending on
3580 				 * it, cancel it
3581 				 */
3582 				if ((tp1->whoTo != NULL) &&
3583 				    (tp1->whoTo->rto_needed == 0)) {
3584 					tp1->whoTo->rto_needed = 1;
3585 				}
3586 				tp1->do_rtt = 0;
3587 			}
3588 			if (alt != tp1->whoTo) {
3589 				/* yes, there is an alternate. */
3590 				sctp_free_remote_addr(tp1->whoTo);
3591 				/* sa_ignore FREED_MEMORY */
3592 				tp1->whoTo = alt;
3593 				atomic_add_int(&alt->ref_count, 1);
3594 			}
3595 		}
3596 	}
3597 }
3598 
3599 struct sctp_tmit_chunk *
3600 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3601     struct sctp_association *asoc)
3602 {
3603 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3604 	struct timeval now;
3605 	int now_filled = 0;
3606 
3607 	if (asoc->prsctp_supported == 0) {
3608 		return (NULL);
3609 	}
3610 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3611 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3612 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3613 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3614 			/* no chance to advance, out of here */
3615 			break;
3616 		}
3617 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3618 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3619 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3620 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3621 				    asoc->advanced_peer_ack_point,
3622 				    tp1->rec.data.tsn, 0, 0);
3623 			}
3624 		}
3625 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3626 			/*
3627 			 * We can't fwd-tsn past any that are reliable aka
3628 			 * retransmitted until the asoc fails.
3629 			 */
3630 			break;
3631 		}
3632 		if (!now_filled) {
3633 			(void)SCTP_GETTIME_TIMEVAL(&now);
3634 			now_filled = 1;
3635 		}
3636 		/*
3637 		 * now we got a chunk which is marked for another
3638 		 * retransmission to a PR-stream but has run out its chances
3639 		 * already maybe OR has been marked to skip now. Can we skip
3640 		 * it if its a resend?
3641 		 */
3642 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3643 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3644 			/*
3645 			 * Now is this one marked for resend and its time is
3646 			 * now up?
3647 			 */
3648 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3649 				/* Yes so drop it */
3650 				if (tp1->data) {
3651 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3652 					    1, SCTP_SO_NOT_LOCKED);
3653 				}
3654 			} else {
3655 				/*
3656 				 * No, we are done when hit one for resend
3657 				 * whos time as not expired.
3658 				 */
3659 				break;
3660 			}
3661 		}
3662 		/*
3663 		 * Ok now if this chunk is marked to drop it we can clean up
3664 		 * the chunk, advance our peer ack point and we can check
3665 		 * the next chunk.
3666 		 */
3667 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3668 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3669 			/* advance PeerAckPoint goes forward */
3670 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3671 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3672 				a_adv = tp1;
3673 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3674 				/* No update but we do save the chk */
3675 				a_adv = tp1;
3676 			}
3677 		} else {
3678 			/*
3679 			 * If it is still in RESEND we can advance no
3680 			 * further
3681 			 */
3682 			break;
3683 		}
3684 	}
3685 	return (a_adv);
3686 }
3687 
3688 static int
3689 sctp_fs_audit(struct sctp_association *asoc)
3690 {
3691 	struct sctp_tmit_chunk *chk;
3692 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3693 	int ret;
3694 #ifndef INVARIANTS
3695 	int entry_flight, entry_cnt;
3696 #endif
3697 
3698 	ret = 0;
3699 #ifndef INVARIANTS
3700 	entry_flight = asoc->total_flight;
3701 	entry_cnt = asoc->total_flight_count;
3702 #endif
3703 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3704 		return (0);
3705 
3706 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3707 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3708 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3709 			    chk->rec.data.tsn,
3710 			    chk->send_size,
3711 			    chk->snd_count);
3712 			inflight++;
3713 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3714 			resend++;
3715 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3716 			inbetween++;
3717 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3718 			above++;
3719 		} else {
3720 			acked++;
3721 		}
3722 	}
3723 
3724 	if ((inflight > 0) || (inbetween > 0)) {
3725 #ifdef INVARIANTS
3726 		panic("Flight size-express incorrect? \n");
3727 #else
3728 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3729 		    entry_flight, entry_cnt);
3730 
3731 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3732 		    inflight, inbetween, resend, above, acked);
3733 		ret = 1;
3734 #endif
3735 	}
3736 	return (ret);
3737 }
3738 
3739 
3740 static void
3741 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3742     struct sctp_association *asoc,
3743     struct sctp_tmit_chunk *tp1)
3744 {
3745 	tp1->window_probe = 0;
3746 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3747 		/* TSN's skipped we do NOT move back. */
3748 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3749 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3750 		    tp1->book_size,
3751 		    (uint32_t) (uintptr_t) tp1->whoTo,
3752 		    tp1->rec.data.tsn);
3753 		return;
3754 	}
3755 	/* First setup this by shrinking flight */
3756 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3757 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3758 		    tp1);
3759 	}
3760 	sctp_flight_size_decrease(tp1);
3761 	sctp_total_flight_decrease(stcb, tp1);
3762 	/* Now mark for resend */
3763 	tp1->sent = SCTP_DATAGRAM_RESEND;
3764 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3765 
3766 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3767 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3768 		    tp1->whoTo->flight_size,
3769 		    tp1->book_size,
3770 		    (uint32_t) (uintptr_t) tp1->whoTo,
3771 		    tp1->rec.data.tsn);
3772 	}
3773 }
3774 
3775 void
3776 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3777     uint32_t rwnd, int *abort_now, int ecne_seen)
3778 {
3779 	struct sctp_nets *net;
3780 	struct sctp_association *asoc;
3781 	struct sctp_tmit_chunk *tp1, *tp2;
3782 	uint32_t old_rwnd;
3783 	int win_probe_recovery = 0;
3784 	int win_probe_recovered = 0;
3785 	int j, done_once = 0;
3786 	int rto_ok = 1;
3787 	uint32_t send_s;
3788 
3789 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3790 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3791 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3792 	}
3793 	SCTP_TCB_LOCK_ASSERT(stcb);
3794 #ifdef SCTP_ASOCLOG_OF_TSNS
3795 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3796 	stcb->asoc.cumack_log_at++;
3797 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3798 		stcb->asoc.cumack_log_at = 0;
3799 	}
3800 #endif
3801 	asoc = &stcb->asoc;
3802 	old_rwnd = asoc->peers_rwnd;
3803 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3804 		/* old ack */
3805 		return;
3806 	} else if (asoc->last_acked_seq == cumack) {
3807 		/* Window update sack */
3808 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3809 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3810 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3811 			/* SWS sender side engages */
3812 			asoc->peers_rwnd = 0;
3813 		}
3814 		if (asoc->peers_rwnd > old_rwnd) {
3815 			goto again;
3816 		}
3817 		return;
3818 	}
3819 	/* First setup for CC stuff */
3820 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3821 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3822 			/* Drag along the window_tsn for cwr's */
3823 			net->cwr_window_tsn = cumack;
3824 		}
3825 		net->prev_cwnd = net->cwnd;
3826 		net->net_ack = 0;
3827 		net->net_ack2 = 0;
3828 
3829 		/*
3830 		 * CMT: Reset CUC and Fast recovery algo variables before
3831 		 * SACK processing
3832 		 */
3833 		net->new_pseudo_cumack = 0;
3834 		net->will_exit_fast_recovery = 0;
3835 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3836 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3837 		}
3838 	}
3839 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3840 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3841 		    sctpchunk_listhead);
3842 		send_s = tp1->rec.data.tsn + 1;
3843 	} else {
3844 		send_s = asoc->sending_seq;
3845 	}
3846 	if (SCTP_TSN_GE(cumack, send_s)) {
3847 		struct mbuf *op_err;
3848 		char msg[SCTP_DIAG_INFO_LEN];
3849 
3850 		*abort_now = 1;
3851 		/* XXX */
3852 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3853 		    cumack, send_s);
3854 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3855 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3856 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3857 		return;
3858 	}
3859 	asoc->this_sack_highest_gap = cumack;
3860 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3861 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3862 		    stcb->asoc.overall_error_count,
3863 		    0,
3864 		    SCTP_FROM_SCTP_INDATA,
3865 		    __LINE__);
3866 	}
3867 	stcb->asoc.overall_error_count = 0;
3868 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3869 		/* process the new consecutive TSN first */
3870 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3871 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3872 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3873 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3874 				}
3875 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3876 					/*
3877 					 * If it is less than ACKED, it is
3878 					 * now no-longer in flight. Higher
3879 					 * values may occur during marking
3880 					 */
3881 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3882 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3883 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3884 							    tp1->whoTo->flight_size,
3885 							    tp1->book_size,
3886 							    (uint32_t) (uintptr_t) tp1->whoTo,
3887 							    tp1->rec.data.tsn);
3888 						}
3889 						sctp_flight_size_decrease(tp1);
3890 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3891 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3892 							    tp1);
3893 						}
3894 						/* sa_ignore NO_NULL_CHK */
3895 						sctp_total_flight_decrease(stcb, tp1);
3896 					}
3897 					tp1->whoTo->net_ack += tp1->send_size;
3898 					if (tp1->snd_count < 2) {
3899 						/*
3900 						 * True non-retransmited
3901 						 * chunk
3902 						 */
3903 						tp1->whoTo->net_ack2 +=
3904 						    tp1->send_size;
3905 
3906 						/* update RTO too? */
3907 						if (tp1->do_rtt) {
3908 							if (rto_ok) {
3909 								tp1->whoTo->RTO =
3910 								/*
3911 								 * sa_ignore
3912 								 * NO_NULL_CHK
3913 								 */
3914 								    sctp_calculate_rto(stcb,
3915 								    asoc, tp1->whoTo,
3916 								    &tp1->sent_rcv_time,
3917 								    sctp_align_safe_nocopy,
3918 								    SCTP_RTT_FROM_DATA);
3919 								rto_ok = 0;
3920 							}
3921 							if (tp1->whoTo->rto_needed == 0) {
3922 								tp1->whoTo->rto_needed = 1;
3923 							}
3924 							tp1->do_rtt = 0;
3925 						}
3926 					}
3927 					/*
3928 					 * CMT: CUCv2 algorithm. From the
3929 					 * cumack'd TSNs, for each TSN being
3930 					 * acked for the first time, set the
3931 					 * following variables for the
3932 					 * corresp destination.
3933 					 * new_pseudo_cumack will trigger a
3934 					 * cwnd update.
3935 					 * find_(rtx_)pseudo_cumack will
3936 					 * trigger search for the next
3937 					 * expected (rtx-)pseudo-cumack.
3938 					 */
3939 					tp1->whoTo->new_pseudo_cumack = 1;
3940 					tp1->whoTo->find_pseudo_cumack = 1;
3941 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3942 
3943 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3944 						/* sa_ignore NO_NULL_CHK */
3945 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3946 					}
3947 				}
3948 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3949 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3950 				}
3951 				if (tp1->rec.data.chunk_was_revoked) {
3952 					/* deflate the cwnd */
3953 					tp1->whoTo->cwnd -= tp1->book_size;
3954 					tp1->rec.data.chunk_was_revoked = 0;
3955 				}
3956 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3957 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3958 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
3959 #ifdef INVARIANTS
3960 					} else {
3961 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3962 #endif
3963 					}
3964 				}
3965 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3966 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3967 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
3968 					asoc->trigger_reset = 1;
3969 				}
3970 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3971 				if (tp1->data) {
3972 					/* sa_ignore NO_NULL_CHK */
3973 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3974 					sctp_m_freem(tp1->data);
3975 					tp1->data = NULL;
3976 				}
3977 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3978 					sctp_log_sack(asoc->last_acked_seq,
3979 					    cumack,
3980 					    tp1->rec.data.tsn,
3981 					    0,
3982 					    0,
3983 					    SCTP_LOG_FREE_SENT);
3984 				}
3985 				asoc->sent_queue_cnt--;
3986 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3987 			} else {
3988 				break;
3989 			}
3990 		}
3991 
3992 	}
3993 	/* sa_ignore NO_NULL_CHK */
3994 	if (stcb->sctp_socket) {
3995 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3996 		struct socket *so;
3997 
3998 #endif
3999 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4000 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4001 			/* sa_ignore NO_NULL_CHK */
4002 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4003 		}
4004 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4005 		so = SCTP_INP_SO(stcb->sctp_ep);
4006 		atomic_add_int(&stcb->asoc.refcnt, 1);
4007 		SCTP_TCB_UNLOCK(stcb);
4008 		SCTP_SOCKET_LOCK(so, 1);
4009 		SCTP_TCB_LOCK(stcb);
4010 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4011 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4012 			/* assoc was freed while we were unlocked */
4013 			SCTP_SOCKET_UNLOCK(so, 1);
4014 			return;
4015 		}
4016 #endif
4017 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4018 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4019 		SCTP_SOCKET_UNLOCK(so, 1);
4020 #endif
4021 	} else {
4022 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4023 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4024 		}
4025 	}
4026 
4027 	/* JRS - Use the congestion control given in the CC module */
4028 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4029 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4030 			if (net->net_ack2 > 0) {
4031 				/*
4032 				 * Karn's rule applies to clearing error
4033 				 * count, this is optional.
4034 				 */
4035 				net->error_count = 0;
4036 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4037 					/* addr came good */
4038 					net->dest_state |= SCTP_ADDR_REACHABLE;
4039 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4040 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4041 				}
4042 				if (net == stcb->asoc.primary_destination) {
4043 					if (stcb->asoc.alternate) {
4044 						/* release the alternate,
4045 						 * primary is good */
4046 						sctp_free_remote_addr(stcb->asoc.alternate);
4047 						stcb->asoc.alternate = NULL;
4048 					}
4049 				}
4050 				if (net->dest_state & SCTP_ADDR_PF) {
4051 					net->dest_state &= ~SCTP_ADDR_PF;
4052 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4053 					    stcb->sctp_ep, stcb, net,
4054 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4055 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4056 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4057 					/* Done with this net */
4058 					net->net_ack = 0;
4059 				}
4060 				/* restore any doubled timers */
4061 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4062 				if (net->RTO < stcb->asoc.minrto) {
4063 					net->RTO = stcb->asoc.minrto;
4064 				}
4065 				if (net->RTO > stcb->asoc.maxrto) {
4066 					net->RTO = stcb->asoc.maxrto;
4067 				}
4068 			}
4069 		}
4070 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4071 	}
4072 	asoc->last_acked_seq = cumack;
4073 
4074 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4075 		/* nothing left in-flight */
4076 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4077 			net->flight_size = 0;
4078 			net->partial_bytes_acked = 0;
4079 		}
4080 		asoc->total_flight = 0;
4081 		asoc->total_flight_count = 0;
4082 	}
4083 	/* RWND update */
4084 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4085 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4086 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4087 		/* SWS sender side engages */
4088 		asoc->peers_rwnd = 0;
4089 	}
4090 	if (asoc->peers_rwnd > old_rwnd) {
4091 		win_probe_recovery = 1;
4092 	}
4093 	/* Now assure a timer where data is queued at */
4094 again:
4095 	j = 0;
4096 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4097 		int to_ticks;
4098 
4099 		if (win_probe_recovery && (net->window_probe)) {
4100 			win_probe_recovered = 1;
4101 			/*
4102 			 * Find first chunk that was used with window probe
4103 			 * and clear the sent
4104 			 */
4105 			/* sa_ignore FREED_MEMORY */
4106 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4107 				if (tp1->window_probe) {
4108 					/* move back to data send queue */
4109 					sctp_window_probe_recovery(stcb, asoc, tp1);
4110 					break;
4111 				}
4112 			}
4113 		}
4114 		if (net->RTO == 0) {
4115 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4116 		} else {
4117 			to_ticks = MSEC_TO_TICKS(net->RTO);
4118 		}
4119 		if (net->flight_size) {
4120 			j++;
4121 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4122 			    sctp_timeout_handler, &net->rxt_timer);
4123 			if (net->window_probe) {
4124 				net->window_probe = 0;
4125 			}
4126 		} else {
4127 			if (net->window_probe) {
4128 				/* In window probes we must assure a timer
4129 				 * is still running there */
4130 				net->window_probe = 0;
4131 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4132 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4133 					    sctp_timeout_handler, &net->rxt_timer);
4134 				}
4135 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4136 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4137 				    stcb, net,
4138 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4139 			}
4140 		}
4141 	}
4142 	if ((j == 0) &&
4143 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4144 	    (asoc->sent_queue_retran_cnt == 0) &&
4145 	    (win_probe_recovered == 0) &&
4146 	    (done_once == 0)) {
4147 		/*
4148 		 * huh, this should not happen unless all packets are
4149 		 * PR-SCTP and marked to skip of course.
4150 		 */
4151 		if (sctp_fs_audit(asoc)) {
4152 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4153 				net->flight_size = 0;
4154 			}
4155 			asoc->total_flight = 0;
4156 			asoc->total_flight_count = 0;
4157 			asoc->sent_queue_retran_cnt = 0;
4158 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4159 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4160 					sctp_flight_size_increase(tp1);
4161 					sctp_total_flight_increase(stcb, tp1);
4162 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4163 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4164 				}
4165 			}
4166 		}
4167 		done_once = 1;
4168 		goto again;
4169 	}
4170 	/**********************************/
4171 	/* Now what about shutdown issues */
4172 	/**********************************/
4173 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4174 		/* nothing left on sendqueue.. consider done */
4175 		/* clean up */
4176 		if ((asoc->stream_queue_cnt == 1) &&
4177 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4178 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4179 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4180 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4181 		}
4182 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4183 		    (asoc->stream_queue_cnt == 0)) {
4184 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4185 				/* Need to abort here */
4186 				struct mbuf *op_err;
4187 
4188 		abort_out_now:
4189 				*abort_now = 1;
4190 				/* XXX */
4191 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4192 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4193 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4194 				return;
4195 			} else {
4196 				struct sctp_nets *netp;
4197 
4198 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4199 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4200 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4201 				}
4202 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4203 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4204 				sctp_stop_timers_for_shutdown(stcb);
4205 				if (asoc->alternate) {
4206 					netp = asoc->alternate;
4207 				} else {
4208 					netp = asoc->primary_destination;
4209 				}
4210 				sctp_send_shutdown(stcb, netp);
4211 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4212 				    stcb->sctp_ep, stcb, netp);
4213 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4214 				    stcb->sctp_ep, stcb, netp);
4215 			}
4216 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4217 		    (asoc->stream_queue_cnt == 0)) {
4218 			struct sctp_nets *netp;
4219 
4220 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4221 				goto abort_out_now;
4222 			}
4223 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4224 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4225 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4226 			sctp_stop_timers_for_shutdown(stcb);
4227 			if (asoc->alternate) {
4228 				netp = asoc->alternate;
4229 			} else {
4230 				netp = asoc->primary_destination;
4231 			}
4232 			sctp_send_shutdown_ack(stcb, netp);
4233 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4234 			    stcb->sctp_ep, stcb, netp);
4235 		}
4236 	}
4237 	/*********************************************/
4238 	/* Here we perform PR-SCTP procedures        */
4239 	/* (section 4.2)                             */
4240 	/*********************************************/
4241 	/* C1. update advancedPeerAckPoint */
4242 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4243 		asoc->advanced_peer_ack_point = cumack;
4244 	}
4245 	/* PR-Sctp issues need to be addressed too */
4246 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4247 		struct sctp_tmit_chunk *lchk;
4248 		uint32_t old_adv_peer_ack_point;
4249 
4250 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4251 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4252 		/* C3. See if we need to send a Fwd-TSN */
4253 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4254 			/*
4255 			 * ISSUE with ECN, see FWD-TSN processing.
4256 			 */
4257 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4258 				send_forward_tsn(stcb, asoc);
4259 			} else if (lchk) {
4260 				/* try to FR fwd-tsn's that get lost too */
4261 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4262 					send_forward_tsn(stcb, asoc);
4263 				}
4264 			}
4265 		}
4266 		if (lchk) {
4267 			/* Assure a timer is up */
4268 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4269 			    stcb->sctp_ep, stcb, lchk->whoTo);
4270 		}
4271 	}
4272 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4273 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4274 		    rwnd,
4275 		    stcb->asoc.peers_rwnd,
4276 		    stcb->asoc.total_flight,
4277 		    stcb->asoc.total_output_queue_size);
4278 	}
4279 }
4280 
4281 void
4282 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4283     struct sctp_tcb *stcb,
4284     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4285     int *abort_now, uint8_t flags,
4286     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4287 {
4288 	struct sctp_association *asoc;
4289 	struct sctp_tmit_chunk *tp1, *tp2;
4290 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4291 	uint16_t wake_him = 0;
4292 	uint32_t send_s = 0;
4293 	long j;
4294 	int accum_moved = 0;
4295 	int will_exit_fast_recovery = 0;
4296 	uint32_t a_rwnd, old_rwnd;
4297 	int win_probe_recovery = 0;
4298 	int win_probe_recovered = 0;
4299 	struct sctp_nets *net = NULL;
4300 	int done_once;
4301 	int rto_ok = 1;
4302 	uint8_t reneged_all = 0;
4303 	uint8_t cmt_dac_flag;
4304 
4305 	/*
4306 	 * we take any chance we can to service our queues since we cannot
4307 	 * get awoken when the socket is read from :<
4308 	 */
4309 	/*
4310 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4311 	 * old sack, if so discard. 2) If there is nothing left in the send
4312 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4313 	 * too, update any rwnd change and verify no timers are running.
4314 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4315 	 * moved process these first and note that it moved. 4) Process any
4316 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4317 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4318 	 * sync up flightsizes and things, stop all timers and also check
4319 	 * for shutdown_pending state. If so then go ahead and send off the
4320 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4321 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4322 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4323 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4324 	 * if in shutdown_recv state.
4325 	 */
4326 	SCTP_TCB_LOCK_ASSERT(stcb);
4327 	/* CMT DAC algo */
4328 	this_sack_lowest_newack = 0;
4329 	SCTP_STAT_INCR(sctps_slowpath_sack);
4330 	last_tsn = cum_ack;
4331 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4332 #ifdef SCTP_ASOCLOG_OF_TSNS
4333 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4334 	stcb->asoc.cumack_log_at++;
4335 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4336 		stcb->asoc.cumack_log_at = 0;
4337 	}
4338 #endif
4339 	a_rwnd = rwnd;
4340 
4341 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4342 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4343 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4344 	}
4345 	old_rwnd = stcb->asoc.peers_rwnd;
4346 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4347 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4348 		    stcb->asoc.overall_error_count,
4349 		    0,
4350 		    SCTP_FROM_SCTP_INDATA,
4351 		    __LINE__);
4352 	}
4353 	stcb->asoc.overall_error_count = 0;
4354 	asoc = &stcb->asoc;
4355 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4356 		sctp_log_sack(asoc->last_acked_seq,
4357 		    cum_ack,
4358 		    0,
4359 		    num_seg,
4360 		    num_dup,
4361 		    SCTP_LOG_NEW_SACK);
4362 	}
4363 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4364 		uint16_t i;
4365 		uint32_t *dupdata, dblock;
4366 
4367 		for (i = 0; i < num_dup; i++) {
4368 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4369 			    sizeof(uint32_t), (uint8_t *) & dblock);
4370 			if (dupdata == NULL) {
4371 				break;
4372 			}
4373 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4374 		}
4375 	}
4376 	/* reality check */
4377 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4378 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4379 		    sctpchunk_listhead);
4380 		send_s = tp1->rec.data.tsn + 1;
4381 	} else {
4382 		tp1 = NULL;
4383 		send_s = asoc->sending_seq;
4384 	}
4385 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4386 		struct mbuf *op_err;
4387 		char msg[SCTP_DIAG_INFO_LEN];
4388 
4389 		/*
4390 		 * no way, we have not even sent this TSN out yet. Peer is
4391 		 * hopelessly messed up with us.
4392 		 */
4393 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4394 		    cum_ack, send_s);
4395 		if (tp1) {
4396 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4397 			    tp1->rec.data.tsn, (void *)tp1);
4398 		}
4399 hopeless_peer:
4400 		*abort_now = 1;
4401 		/* XXX */
4402 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4403 		    cum_ack, send_s);
4404 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4405 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4406 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4407 		return;
4408 	}
4409 	/**********************/
4410 	/* 1) check the range */
4411 	/**********************/
4412 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4413 		/* acking something behind */
4414 		return;
4415 	}
4416 	/* update the Rwnd of the peer */
4417 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4418 	    TAILQ_EMPTY(&asoc->send_queue) &&
4419 	    (asoc->stream_queue_cnt == 0)) {
4420 		/* nothing left on send/sent and strmq */
4421 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4422 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4423 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4424 		}
4425 		asoc->peers_rwnd = a_rwnd;
4426 		if (asoc->sent_queue_retran_cnt) {
4427 			asoc->sent_queue_retran_cnt = 0;
4428 		}
4429 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4430 			/* SWS sender side engages */
4431 			asoc->peers_rwnd = 0;
4432 		}
4433 		/* stop any timers */
4434 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4435 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4436 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4437 			net->partial_bytes_acked = 0;
4438 			net->flight_size = 0;
4439 		}
4440 		asoc->total_flight = 0;
4441 		asoc->total_flight_count = 0;
4442 		return;
4443 	}
4444 	/*
4445 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4446 	 * things. The total byte count acked is tracked in netAckSz AND
4447 	 * netAck2 is used to track the total bytes acked that are un-
4448 	 * amibguious and were never retransmitted. We track these on a per
4449 	 * destination address basis.
4450 	 */
4451 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4452 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4453 			/* Drag along the window_tsn for cwr's */
4454 			net->cwr_window_tsn = cum_ack;
4455 		}
4456 		net->prev_cwnd = net->cwnd;
4457 		net->net_ack = 0;
4458 		net->net_ack2 = 0;
4459 
4460 		/*
4461 		 * CMT: Reset CUC and Fast recovery algo variables before
4462 		 * SACK processing
4463 		 */
4464 		net->new_pseudo_cumack = 0;
4465 		net->will_exit_fast_recovery = 0;
4466 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4467 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4468 		}
4469 	}
4470 	/* process the new consecutive TSN first */
4471 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4472 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4473 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4474 				accum_moved = 1;
4475 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4476 					/*
4477 					 * If it is less than ACKED, it is
4478 					 * now no-longer in flight. Higher
4479 					 * values may occur during marking
4480 					 */
4481 					if ((tp1->whoTo->dest_state &
4482 					    SCTP_ADDR_UNCONFIRMED) &&
4483 					    (tp1->snd_count < 2)) {
4484 						/*
4485 						 * If there was no retran
4486 						 * and the address is
4487 						 * un-confirmed and we sent
4488 						 * there and are now
4489 						 * sacked.. its confirmed,
4490 						 * mark it so.
4491 						 */
4492 						tp1->whoTo->dest_state &=
4493 						    ~SCTP_ADDR_UNCONFIRMED;
4494 					}
4495 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4496 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4497 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4498 							    tp1->whoTo->flight_size,
4499 							    tp1->book_size,
4500 							    (uint32_t) (uintptr_t) tp1->whoTo,
4501 							    tp1->rec.data.tsn);
4502 						}
4503 						sctp_flight_size_decrease(tp1);
4504 						sctp_total_flight_decrease(stcb, tp1);
4505 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4506 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4507 							    tp1);
4508 						}
4509 					}
4510 					tp1->whoTo->net_ack += tp1->send_size;
4511 
4512 					/* CMT SFR and DAC algos */
4513 					this_sack_lowest_newack = tp1->rec.data.tsn;
4514 					tp1->whoTo->saw_newack = 1;
4515 
4516 					if (tp1->snd_count < 2) {
4517 						/*
4518 						 * True non-retransmited
4519 						 * chunk
4520 						 */
4521 						tp1->whoTo->net_ack2 +=
4522 						    tp1->send_size;
4523 
4524 						/* update RTO too? */
4525 						if (tp1->do_rtt) {
4526 							if (rto_ok) {
4527 								tp1->whoTo->RTO =
4528 								    sctp_calculate_rto(stcb,
4529 								    asoc, tp1->whoTo,
4530 								    &tp1->sent_rcv_time,
4531 								    sctp_align_safe_nocopy,
4532 								    SCTP_RTT_FROM_DATA);
4533 								rto_ok = 0;
4534 							}
4535 							if (tp1->whoTo->rto_needed == 0) {
4536 								tp1->whoTo->rto_needed = 1;
4537 							}
4538 							tp1->do_rtt = 0;
4539 						}
4540 					}
4541 					/*
4542 					 * CMT: CUCv2 algorithm. From the
4543 					 * cumack'd TSNs, for each TSN being
4544 					 * acked for the first time, set the
4545 					 * following variables for the
4546 					 * corresp destination.
4547 					 * new_pseudo_cumack will trigger a
4548 					 * cwnd update.
4549 					 * find_(rtx_)pseudo_cumack will
4550 					 * trigger search for the next
4551 					 * expected (rtx-)pseudo-cumack.
4552 					 */
4553 					tp1->whoTo->new_pseudo_cumack = 1;
4554 					tp1->whoTo->find_pseudo_cumack = 1;
4555 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4556 
4557 
4558 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4559 						sctp_log_sack(asoc->last_acked_seq,
4560 						    cum_ack,
4561 						    tp1->rec.data.tsn,
4562 						    0,
4563 						    0,
4564 						    SCTP_LOG_TSN_ACKED);
4565 					}
4566 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4567 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4568 					}
4569 				}
4570 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4571 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4572 #ifdef SCTP_AUDITING_ENABLED
4573 					sctp_audit_log(0xB3,
4574 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4575 #endif
4576 				}
4577 				if (tp1->rec.data.chunk_was_revoked) {
4578 					/* deflate the cwnd */
4579 					tp1->whoTo->cwnd -= tp1->book_size;
4580 					tp1->rec.data.chunk_was_revoked = 0;
4581 				}
4582 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4583 					tp1->sent = SCTP_DATAGRAM_ACKED;
4584 				}
4585 			}
4586 		} else {
4587 			break;
4588 		}
4589 	}
4590 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4591 	/* always set this up to cum-ack */
4592 	asoc->this_sack_highest_gap = last_tsn;
4593 
4594 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4595 
4596 		/*
4597 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4598 		 * to be greater than the cumack. Also reset saw_newack to 0
4599 		 * for all dests.
4600 		 */
4601 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4602 			net->saw_newack = 0;
4603 			net->this_sack_highest_newack = last_tsn;
4604 		}
4605 
4606 		/*
4607 		 * thisSackHighestGap will increase while handling NEW
4608 		 * segments this_sack_highest_newack will increase while
4609 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4610 		 * used for CMT DAC algo. saw_newack will also change.
4611 		 */
4612 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4613 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4614 		    num_seg, num_nr_seg, &rto_ok)) {
4615 			wake_him++;
4616 		}
4617 		/*
4618 		 * validate the biggest_tsn_acked in the gap acks if strict
4619 		 * adherence is wanted.
4620 		 */
4621 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4622 			/*
4623 			 * peer is either confused or we are under attack.
4624 			 * We must abort.
4625 			 */
4626 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4627 			    biggest_tsn_acked, send_s);
4628 			goto hopeless_peer;
4629 		}
4630 	}
4631 	/*******************************************/
4632 	/* cancel ALL T3-send timer if accum moved */
4633 	/*******************************************/
4634 	if (asoc->sctp_cmt_on_off > 0) {
4635 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4636 			if (net->new_pseudo_cumack)
4637 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4638 				    stcb, net,
4639 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4640 
4641 		}
4642 	} else {
4643 		if (accum_moved) {
4644 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4645 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4646 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4647 			}
4648 		}
4649 	}
4650 	/********************************************/
4651 	/* drop the acked chunks from the sentqueue */
4652 	/********************************************/
4653 	asoc->last_acked_seq = cum_ack;
4654 
4655 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4656 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4657 			break;
4658 		}
4659 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4660 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4661 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4662 #ifdef INVARIANTS
4663 			} else {
4664 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4665 #endif
4666 			}
4667 		}
4668 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4669 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4670 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4671 			asoc->trigger_reset = 1;
4672 		}
4673 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4674 		if (PR_SCTP_ENABLED(tp1->flags)) {
4675 			if (asoc->pr_sctp_cnt != 0)
4676 				asoc->pr_sctp_cnt--;
4677 		}
4678 		asoc->sent_queue_cnt--;
4679 		if (tp1->data) {
4680 			/* sa_ignore NO_NULL_CHK */
4681 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4682 			sctp_m_freem(tp1->data);
4683 			tp1->data = NULL;
4684 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4685 				asoc->sent_queue_cnt_removeable--;
4686 			}
4687 		}
4688 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4689 			sctp_log_sack(asoc->last_acked_seq,
4690 			    cum_ack,
4691 			    tp1->rec.data.tsn,
4692 			    0,
4693 			    0,
4694 			    SCTP_LOG_FREE_SENT);
4695 		}
4696 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4697 		wake_him++;
4698 	}
4699 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4700 #ifdef INVARIANTS
4701 		panic("Warning flight size is positive and should be 0");
4702 #else
4703 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4704 		    asoc->total_flight);
4705 #endif
4706 		asoc->total_flight = 0;
4707 	}
4708 	/* sa_ignore NO_NULL_CHK */
4709 	if ((wake_him) && (stcb->sctp_socket)) {
4710 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4711 		struct socket *so;
4712 
4713 #endif
4714 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4715 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4716 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4717 		}
4718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4719 		so = SCTP_INP_SO(stcb->sctp_ep);
4720 		atomic_add_int(&stcb->asoc.refcnt, 1);
4721 		SCTP_TCB_UNLOCK(stcb);
4722 		SCTP_SOCKET_LOCK(so, 1);
4723 		SCTP_TCB_LOCK(stcb);
4724 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4725 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4726 			/* assoc was freed while we were unlocked */
4727 			SCTP_SOCKET_UNLOCK(so, 1);
4728 			return;
4729 		}
4730 #endif
4731 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4732 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4733 		SCTP_SOCKET_UNLOCK(so, 1);
4734 #endif
4735 	} else {
4736 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4737 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4738 		}
4739 	}
4740 
4741 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4742 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4743 			/* Setup so we will exit RFC2582 fast recovery */
4744 			will_exit_fast_recovery = 1;
4745 		}
4746 	}
4747 	/*
4748 	 * Check for revoked fragments:
4749 	 *
4750 	 * if Previous sack - Had no frags then we can't have any revoked if
4751 	 * Previous sack - Had frag's then - If we now have frags aka
4752 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4753 	 * some of them. else - The peer revoked all ACKED fragments, since
4754 	 * we had some before and now we have NONE.
4755 	 */
4756 
4757 	if (num_seg) {
4758 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4759 		asoc->saw_sack_with_frags = 1;
4760 	} else if (asoc->saw_sack_with_frags) {
4761 		int cnt_revoked = 0;
4762 
4763 		/* Peer revoked all dg's marked or acked */
4764 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4765 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4766 				tp1->sent = SCTP_DATAGRAM_SENT;
4767 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4768 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4769 					    tp1->whoTo->flight_size,
4770 					    tp1->book_size,
4771 					    (uint32_t) (uintptr_t) tp1->whoTo,
4772 					    tp1->rec.data.tsn);
4773 				}
4774 				sctp_flight_size_increase(tp1);
4775 				sctp_total_flight_increase(stcb, tp1);
4776 				tp1->rec.data.chunk_was_revoked = 1;
4777 				/*
4778 				 * To ensure that this increase in
4779 				 * flightsize, which is artificial, does not
4780 				 * throttle the sender, we also increase the
4781 				 * cwnd artificially.
4782 				 */
4783 				tp1->whoTo->cwnd += tp1->book_size;
4784 				cnt_revoked++;
4785 			}
4786 		}
4787 		if (cnt_revoked) {
4788 			reneged_all = 1;
4789 		}
4790 		asoc->saw_sack_with_frags = 0;
4791 	}
4792 	if (num_nr_seg > 0)
4793 		asoc->saw_sack_with_nr_frags = 1;
4794 	else
4795 		asoc->saw_sack_with_nr_frags = 0;
4796 
4797 	/* JRS - Use the congestion control given in the CC module */
4798 	if (ecne_seen == 0) {
4799 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4800 			if (net->net_ack2 > 0) {
4801 				/*
4802 				 * Karn's rule applies to clearing error
4803 				 * count, this is optional.
4804 				 */
4805 				net->error_count = 0;
4806 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4807 					/* addr came good */
4808 					net->dest_state |= SCTP_ADDR_REACHABLE;
4809 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4810 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4811 				}
4812 				if (net == stcb->asoc.primary_destination) {
4813 					if (stcb->asoc.alternate) {
4814 						/* release the alternate,
4815 						 * primary is good */
4816 						sctp_free_remote_addr(stcb->asoc.alternate);
4817 						stcb->asoc.alternate = NULL;
4818 					}
4819 				}
4820 				if (net->dest_state & SCTP_ADDR_PF) {
4821 					net->dest_state &= ~SCTP_ADDR_PF;
4822 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4823 					    stcb->sctp_ep, stcb, net,
4824 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4825 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4826 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4827 					/* Done with this net */
4828 					net->net_ack = 0;
4829 				}
4830 				/* restore any doubled timers */
4831 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4832 				if (net->RTO < stcb->asoc.minrto) {
4833 					net->RTO = stcb->asoc.minrto;
4834 				}
4835 				if (net->RTO > stcb->asoc.maxrto) {
4836 					net->RTO = stcb->asoc.maxrto;
4837 				}
4838 			}
4839 		}
4840 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4841 	}
4842 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4843 		/* nothing left in-flight */
4844 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4845 			/* stop all timers */
4846 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4847 			    stcb, net,
4848 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4849 			net->flight_size = 0;
4850 			net->partial_bytes_acked = 0;
4851 		}
4852 		asoc->total_flight = 0;
4853 		asoc->total_flight_count = 0;
4854 	}
4855 	/**********************************/
4856 	/* Now what about shutdown issues */
4857 	/**********************************/
4858 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4859 		/* nothing left on sendqueue.. consider done */
4860 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4861 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4862 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4863 		}
4864 		asoc->peers_rwnd = a_rwnd;
4865 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4866 			/* SWS sender side engages */
4867 			asoc->peers_rwnd = 0;
4868 		}
4869 		/* clean up */
4870 		if ((asoc->stream_queue_cnt == 1) &&
4871 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4872 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4873 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4874 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4875 		}
4876 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4877 		    (asoc->stream_queue_cnt == 0)) {
4878 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4879 				/* Need to abort here */
4880 				struct mbuf *op_err;
4881 
4882 		abort_out_now:
4883 				*abort_now = 1;
4884 				/* XXX */
4885 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4886 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4887 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4888 				return;
4889 			} else {
4890 				struct sctp_nets *netp;
4891 
4892 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4893 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4894 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4895 				}
4896 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4897 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4898 				sctp_stop_timers_for_shutdown(stcb);
4899 				if (asoc->alternate) {
4900 					netp = asoc->alternate;
4901 				} else {
4902 					netp = asoc->primary_destination;
4903 				}
4904 				sctp_send_shutdown(stcb, netp);
4905 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4906 				    stcb->sctp_ep, stcb, netp);
4907 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4908 				    stcb->sctp_ep, stcb, netp);
4909 			}
4910 			return;
4911 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4912 		    (asoc->stream_queue_cnt == 0)) {
4913 			struct sctp_nets *netp;
4914 
4915 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4916 				goto abort_out_now;
4917 			}
4918 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4919 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4920 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4921 			sctp_stop_timers_for_shutdown(stcb);
4922 			if (asoc->alternate) {
4923 				netp = asoc->alternate;
4924 			} else {
4925 				netp = asoc->primary_destination;
4926 			}
4927 			sctp_send_shutdown_ack(stcb, netp);
4928 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4929 			    stcb->sctp_ep, stcb, netp);
4930 			return;
4931 		}
4932 	}
4933 	/*
4934 	 * Now here we are going to recycle net_ack for a different use...
4935 	 * HEADS UP.
4936 	 */
4937 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4938 		net->net_ack = 0;
4939 	}
4940 
4941 	/*
4942 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4943 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4944 	 * automatically ensure that.
4945 	 */
4946 	if ((asoc->sctp_cmt_on_off > 0) &&
4947 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4948 	    (cmt_dac_flag == 0)) {
4949 		this_sack_lowest_newack = cum_ack;
4950 	}
4951 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4952 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4953 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4954 	}
4955 	/* JRS - Use the congestion control given in the CC module */
4956 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4957 
4958 	/* Now are we exiting loss recovery ? */
4959 	if (will_exit_fast_recovery) {
4960 		/* Ok, we must exit fast recovery */
4961 		asoc->fast_retran_loss_recovery = 0;
4962 	}
4963 	if ((asoc->sat_t3_loss_recovery) &&
4964 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4965 		/* end satellite t3 loss recovery */
4966 		asoc->sat_t3_loss_recovery = 0;
4967 	}
4968 	/*
4969 	 * CMT Fast recovery
4970 	 */
4971 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4972 		if (net->will_exit_fast_recovery) {
4973 			/* Ok, we must exit fast recovery */
4974 			net->fast_retran_loss_recovery = 0;
4975 		}
4976 	}
4977 
4978 	/* Adjust and set the new rwnd value */
4979 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4980 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4981 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4982 	}
4983 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4984 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4985 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4986 		/* SWS sender side engages */
4987 		asoc->peers_rwnd = 0;
4988 	}
4989 	if (asoc->peers_rwnd > old_rwnd) {
4990 		win_probe_recovery = 1;
4991 	}
4992 	/*
4993 	 * Now we must setup so we have a timer up for anyone with
4994 	 * outstanding data.
4995 	 */
4996 	done_once = 0;
4997 again:
4998 	j = 0;
4999 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5000 		if (win_probe_recovery && (net->window_probe)) {
5001 			win_probe_recovered = 1;
5002 			/*-
5003 			 * Find first chunk that was used with
5004 			 * window probe and clear the event. Put
5005 			 * it back into the send queue as if has
5006 			 * not been sent.
5007 			 */
5008 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5009 				if (tp1->window_probe) {
5010 					sctp_window_probe_recovery(stcb, asoc, tp1);
5011 					break;
5012 				}
5013 			}
5014 		}
5015 		if (net->flight_size) {
5016 			j++;
5017 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5018 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5019 				    stcb->sctp_ep, stcb, net);
5020 			}
5021 			if (net->window_probe) {
5022 				net->window_probe = 0;
5023 			}
5024 		} else {
5025 			if (net->window_probe) {
5026 				/* In window probes we must assure a timer
5027 				 * is still running there */
5028 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5029 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5030 					    stcb->sctp_ep, stcb, net);
5031 
5032 				}
5033 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5034 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5035 				    stcb, net,
5036 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5037 			}
5038 		}
5039 	}
5040 	if ((j == 0) &&
5041 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5042 	    (asoc->sent_queue_retran_cnt == 0) &&
5043 	    (win_probe_recovered == 0) &&
5044 	    (done_once == 0)) {
5045 		/*
5046 		 * huh, this should not happen unless all packets are
5047 		 * PR-SCTP and marked to skip of course.
5048 		 */
5049 		if (sctp_fs_audit(asoc)) {
5050 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5051 				net->flight_size = 0;
5052 			}
5053 			asoc->total_flight = 0;
5054 			asoc->total_flight_count = 0;
5055 			asoc->sent_queue_retran_cnt = 0;
5056 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5057 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5058 					sctp_flight_size_increase(tp1);
5059 					sctp_total_flight_increase(stcb, tp1);
5060 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5061 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5062 				}
5063 			}
5064 		}
5065 		done_once = 1;
5066 		goto again;
5067 	}
5068 	/*********************************************/
5069 	/* Here we perform PR-SCTP procedures        */
5070 	/* (section 4.2)                             */
5071 	/*********************************************/
5072 	/* C1. update advancedPeerAckPoint */
5073 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5074 		asoc->advanced_peer_ack_point = cum_ack;
5075 	}
5076 	/* C2. try to further move advancedPeerAckPoint ahead */
5077 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5078 		struct sctp_tmit_chunk *lchk;
5079 		uint32_t old_adv_peer_ack_point;
5080 
5081 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5082 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5083 		/* C3. See if we need to send a Fwd-TSN */
5084 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5085 			/*
5086 			 * ISSUE with ECN, see FWD-TSN processing.
5087 			 */
5088 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5089 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5090 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5091 				    old_adv_peer_ack_point);
5092 			}
5093 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5094 				send_forward_tsn(stcb, asoc);
5095 			} else if (lchk) {
5096 				/* try to FR fwd-tsn's that get lost too */
5097 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5098 					send_forward_tsn(stcb, asoc);
5099 				}
5100 			}
5101 		}
5102 		if (lchk) {
5103 			/* Assure a timer is up */
5104 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5105 			    stcb->sctp_ep, stcb, lchk->whoTo);
5106 		}
5107 	}
5108 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5109 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5110 		    a_rwnd,
5111 		    stcb->asoc.peers_rwnd,
5112 		    stcb->asoc.total_flight,
5113 		    stcb->asoc.total_output_queue_size);
5114 	}
5115 }
5116 
5117 void
5118 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5119 {
5120 	/* Copy cum-ack */
5121 	uint32_t cum_ack, a_rwnd;
5122 
5123 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5124 	/* Arrange so a_rwnd does NOT change */
5125 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5126 
5127 	/* Now call the express sack handling */
5128 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5129 }
5130 
5131 static void
5132 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5133     struct sctp_stream_in *strmin)
5134 {
5135 	struct sctp_queued_to_read *ctl, *nctl;
5136 	struct sctp_association *asoc;
5137 	uint32_t mid;
5138 	int need_reasm_check = 0;
5139 
5140 	asoc = &stcb->asoc;
5141 	mid = strmin->last_mid_delivered;
5142 	/*
5143 	 * First deliver anything prior to and including the stream no that
5144 	 * came in.
5145 	 */
5146 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5147 		if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5148 			/* this is deliverable now */
5149 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5150 				if (ctl->on_strm_q) {
5151 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5152 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5153 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5154 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5155 #ifdef INVARIANTS
5156 					} else {
5157 						panic("strmin: %p ctl: %p unknown %d",
5158 						    strmin, ctl, ctl->on_strm_q);
5159 #endif
5160 					}
5161 					ctl->on_strm_q = 0;
5162 				}
5163 				/* subtract pending on streams */
5164 				asoc->size_on_all_streams -= ctl->length;
5165 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5166 				/* deliver it to at least the delivery-q */
5167 				if (stcb->sctp_socket) {
5168 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5169 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5170 					    ctl,
5171 					    &stcb->sctp_socket->so_rcv,
5172 					    1, SCTP_READ_LOCK_HELD,
5173 					    SCTP_SO_NOT_LOCKED);
5174 				}
5175 			} else {
5176 				/* Its a fragmented message */
5177 				if (ctl->first_frag_seen) {
5178 					/* Make it so this is next to
5179 					 * deliver, we restore later */
5180 					strmin->last_mid_delivered = ctl->mid - 1;
5181 					need_reasm_check = 1;
5182 					break;
5183 				}
5184 			}
5185 		} else {
5186 			/* no more delivery now. */
5187 			break;
5188 		}
5189 	}
5190 	if (need_reasm_check) {
5191 		int ret;
5192 
5193 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5194 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5195 			/* Restore the next to deliver unless we are ahead */
5196 			strmin->last_mid_delivered = mid;
5197 		}
5198 		if (ret == 0) {
5199 			/* Left the front Partial one on */
5200 			return;
5201 		}
5202 		need_reasm_check = 0;
5203 	}
5204 	/*
5205 	 * now we must deliver things in queue the normal way  if any are
5206 	 * now ready.
5207 	 */
5208 	mid = strmin->last_mid_delivered + 1;
5209 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5210 		if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5211 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5212 				/* this is deliverable now */
5213 				if (ctl->on_strm_q) {
5214 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5215 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5216 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5217 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5218 #ifdef INVARIANTS
5219 					} else {
5220 						panic("strmin: %p ctl: %p unknown %d",
5221 						    strmin, ctl, ctl->on_strm_q);
5222 #endif
5223 					}
5224 					ctl->on_strm_q = 0;
5225 				}
5226 				/* subtract pending on streams */
5227 				asoc->size_on_all_streams -= ctl->length;
5228 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5229 				/* deliver it to at least the delivery-q */
5230 				strmin->last_mid_delivered = ctl->mid;
5231 				if (stcb->sctp_socket) {
5232 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5233 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5234 					    ctl,
5235 					    &stcb->sctp_socket->so_rcv, 1,
5236 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5237 
5238 				}
5239 				mid = strmin->last_mid_delivered + 1;
5240 			} else {
5241 				/* Its a fragmented message */
5242 				if (ctl->first_frag_seen) {
5243 					/* Make it so this is next to
5244 					 * deliver */
5245 					strmin->last_mid_delivered = ctl->mid - 1;
5246 					need_reasm_check = 1;
5247 					break;
5248 				}
5249 			}
5250 		} else {
5251 			break;
5252 		}
5253 	}
5254 	if (need_reasm_check) {
5255 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5256 	}
5257 }
5258 
5259 
5260 
5261 static void
5262 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5263     struct sctp_association *asoc,
5264     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5265 {
5266 	struct sctp_queued_to_read *control;
5267 	struct sctp_stream_in *strm;
5268 	struct sctp_tmit_chunk *chk, *nchk;
5269 	int cnt_removed = 0;
5270 
5271 	/*
5272 	 * For now large messages held on the stream reasm that are complete
5273 	 * will be tossed too. We could in theory do more work to spin
5274 	 * through and stop after dumping one msg aka seeing the start of a
5275 	 * new msg at the head, and call the delivery function... to see if
5276 	 * it can be delivered... But for now we just dump everything on the
5277 	 * queue.
5278 	 */
5279 	strm = &asoc->strmin[stream];
5280 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5281 	if (control == NULL) {
5282 		/* Not found */
5283 		return;
5284 	}
5285 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5286 		return;
5287 	}
5288 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5289 		/* Purge hanging chunks */
5290 		if (!asoc->idata_supported && (ordered == 0)) {
5291 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5292 				break;
5293 			}
5294 		}
5295 		cnt_removed++;
5296 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5297 		asoc->size_on_reasm_queue -= chk->send_size;
5298 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5299 		if (chk->data) {
5300 			sctp_m_freem(chk->data);
5301 			chk->data = NULL;
5302 		}
5303 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5304 	}
5305 	if (!TAILQ_EMPTY(&control->reasm)) {
5306 		/* This has to be old data, unordered */
5307 		if (control->data) {
5308 			sctp_m_freem(control->data);
5309 			control->data = NULL;
5310 		}
5311 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5312 		chk = TAILQ_FIRST(&control->reasm);
5313 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5314 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5315 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5316 			    chk, SCTP_READ_LOCK_HELD);
5317 		}
5318 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5319 		return;
5320 	}
5321 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5322 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5323 		control->on_strm_q = 0;
5324 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5325 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5326 		control->on_strm_q = 0;
5327 #ifdef INVARIANTS
5328 	} else if (control->on_strm_q) {
5329 		panic("strm: %p ctl: %p unknown %d",
5330 		    strm, control, control->on_strm_q);
5331 #endif
5332 	}
5333 	control->on_strm_q = 0;
5334 	if (control->on_read_q == 0) {
5335 		sctp_free_remote_addr(control->whoFrom);
5336 		if (control->data) {
5337 			sctp_m_freem(control->data);
5338 			control->data = NULL;
5339 		}
5340 		sctp_free_a_readq(stcb, control);
5341 	}
5342 }
5343 
5344 void
5345 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5346     struct sctp_forward_tsn_chunk *fwd,
5347     int *abort_flag, struct mbuf *m, int offset)
5348 {
5349 	/* The pr-sctp fwd tsn */
5350 	/*
5351 	 * here we will perform all the data receiver side steps for
5352 	 * processing FwdTSN, as required in by pr-sctp draft:
5353 	 *
5354 	 * Assume we get FwdTSN(x):
5355 	 *
5356 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5357 	 * + others we have 3) examine and update re-ordering queue on
5358 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5359 	 * report where we are.
5360 	 */
5361 	struct sctp_association *asoc;
5362 	uint32_t new_cum_tsn, gap;
5363 	unsigned int i, fwd_sz, m_size;
5364 	uint32_t str_seq;
5365 	struct sctp_stream_in *strm;
5366 	struct sctp_queued_to_read *ctl, *sv;
5367 
5368 	asoc = &stcb->asoc;
5369 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5370 		SCTPDBG(SCTP_DEBUG_INDATA1,
5371 		    "Bad size too small/big fwd-tsn\n");
5372 		return;
5373 	}
5374 	m_size = (stcb->asoc.mapping_array_size << 3);
5375 	/*************************************************************/
5376 	/* 1. Here we update local cumTSN and shift the bitmap array */
5377 	/*************************************************************/
5378 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5379 
5380 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5381 		/* Already got there ... */
5382 		return;
5383 	}
5384 	/*
5385 	 * now we know the new TSN is more advanced, let's find the actual
5386 	 * gap
5387 	 */
5388 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5389 	asoc->cumulative_tsn = new_cum_tsn;
5390 	if (gap >= m_size) {
5391 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5392 			struct mbuf *op_err;
5393 			char msg[SCTP_DIAG_INFO_LEN];
5394 
5395 			/*
5396 			 * out of range (of single byte chunks in the rwnd I
5397 			 * give out). This must be an attacker.
5398 			 */
5399 			*abort_flag = 1;
5400 			snprintf(msg, sizeof(msg),
5401 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5402 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5403 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5404 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5405 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5406 			return;
5407 		}
5408 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5409 
5410 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5411 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5412 		asoc->highest_tsn_inside_map = new_cum_tsn;
5413 
5414 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5415 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5416 
5417 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5418 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5419 		}
5420 	} else {
5421 		SCTP_TCB_LOCK_ASSERT(stcb);
5422 		for (i = 0; i <= gap; i++) {
5423 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5424 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5425 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5426 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5427 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5428 				}
5429 			}
5430 		}
5431 	}
5432 	/*************************************************************/
5433 	/* 2. Clear up re-assembly queue                             */
5434 	/*************************************************************/
5435 
5436 	/* This is now done as part of clearing up the stream/seq */
5437 	if (asoc->idata_supported == 0) {
5438 		uint16_t sid;
5439 
5440 		/* Flush all the un-ordered data based on cum-tsn */
5441 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5442 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5443 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5444 		}
5445 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5446 	}
5447 	/*******************************************************/
5448 	/* 3. Update the PR-stream re-ordering queues and fix  */
5449 	/* delivery issues as needed.                       */
5450 	/*******************************************************/
5451 	fwd_sz -= sizeof(*fwd);
5452 	if (m && fwd_sz) {
5453 		/* New method. */
5454 		unsigned int num_str;
5455 		uint32_t mid, cur_mid;
5456 		uint16_t sid;
5457 		uint16_t ordered, flags;
5458 		struct sctp_strseq *stseq, strseqbuf;
5459 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5460 
5461 		offset += sizeof(*fwd);
5462 
5463 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5464 		if (asoc->idata_supported) {
5465 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5466 		} else {
5467 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5468 		}
5469 		for (i = 0; i < num_str; i++) {
5470 			if (asoc->idata_supported) {
5471 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5472 				    sizeof(struct sctp_strseq_mid),
5473 				    (uint8_t *) & strseqbuf_m);
5474 				offset += sizeof(struct sctp_strseq_mid);
5475 				if (stseq_m == NULL) {
5476 					break;
5477 				}
5478 				sid = ntohs(stseq_m->sid);
5479 				mid = ntohl(stseq_m->mid);
5480 				flags = ntohs(stseq_m->flags);
5481 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5482 					ordered = 0;
5483 				} else {
5484 					ordered = 1;
5485 				}
5486 			} else {
5487 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5488 				    sizeof(struct sctp_strseq),
5489 				    (uint8_t *) & strseqbuf);
5490 				offset += sizeof(struct sctp_strseq);
5491 				if (stseq == NULL) {
5492 					break;
5493 				}
5494 				sid = ntohs(stseq->sid);
5495 				mid = (uint32_t) ntohs(stseq->ssn);
5496 				ordered = 1;
5497 			}
5498 			/* Convert */
5499 
5500 			/* now process */
5501 
5502 			/*
5503 			 * Ok we now look for the stream/seq on the read
5504 			 * queue where its not all delivered. If we find it
5505 			 * we transmute the read entry into a PDI_ABORTED.
5506 			 */
5507 			if (sid >= asoc->streamincnt) {
5508 				/* screwed up streams, stop!  */
5509 				break;
5510 			}
5511 			if ((asoc->str_of_pdapi == sid) &&
5512 			    (asoc->ssn_of_pdapi == mid)) {
5513 				/*
5514 				 * If this is the one we were partially
5515 				 * delivering now then we no longer are.
5516 				 * Note this will change with the reassembly
5517 				 * re-write.
5518 				 */
5519 				asoc->fragmented_delivery_inprogress = 0;
5520 			}
5521 			strm = &asoc->strmin[sid];
5522 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5523 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5524 			}
5525 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5526 				if ((ctl->sinfo_stream == sid) &&
5527 				    (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5528 					str_seq = (sid << 16) | (0x0000ffff & mid);
5529 					ctl->pdapi_aborted = 1;
5530 					sv = stcb->asoc.control_pdapi;
5531 					ctl->end_added = 1;
5532 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5533 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5534 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5535 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5536 #ifdef INVARIANTS
5537 					} else if (ctl->on_strm_q) {
5538 						panic("strm: %p ctl: %p unknown %d",
5539 						    strm, ctl, ctl->on_strm_q);
5540 #endif
5541 					}
5542 					ctl->on_strm_q = 0;
5543 					stcb->asoc.control_pdapi = ctl;
5544 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5545 					    stcb,
5546 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5547 					    (void *)&str_seq,
5548 					    SCTP_SO_NOT_LOCKED);
5549 					stcb->asoc.control_pdapi = sv;
5550 					break;
5551 				} else if ((ctl->sinfo_stream == sid) &&
5552 				    SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5553 					/* We are past our victim SSN */
5554 					break;
5555 				}
5556 			}
5557 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5558 				/* Update the sequence number */
5559 				strm->last_mid_delivered = mid;
5560 			}
5561 			/* now kick the stream the new way */
5562 			/* sa_ignore NO_NULL_CHK */
5563 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5564 		}
5565 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5566 	}
5567 	/*
5568 	 * Now slide thing forward.
5569 	 */
5570 	sctp_slide_mapping_arrays(stcb);
5571 }
5572