xref: /freebsd/sys/netinet/sctp_indata.c (revision 7be8de4271d5cb5d441e2757912c1824f6c3dc3b)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_queued_to_read *control,
417     struct sctp_tmit_chunk *chk,
418     int *abort_flag, int opspot)
419 {
420 	char msg[SCTP_DIAG_INFO_LEN];
421 	struct mbuf *oper;
422 
423 	if (stcb->asoc.idata_supported) {
424 		snprintf(msg, sizeof(msg),
425 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 		    opspot,
427 		    control->fsn_included,
428 		    chk->rec.data.TSN_seq,
429 		    chk->rec.data.stream_number,
430 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
431 	} else {
432 		snprintf(msg, sizeof(msg),
433 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 		    opspot,
435 		    control->fsn_included,
436 		    chk->rec.data.TSN_seq,
437 		    chk->rec.data.stream_number,
438 		    chk->rec.data.fsn_num,
439 		    (uint16_t) chk->rec.data.stream_seq);
440 	}
441 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 	sctp_m_freem(chk->data);
443 	chk->data = NULL;
444 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
447 	*abort_flag = 1;
448 }
449 
450 static void
451 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 {
453 	/*
454 	 * The control could not be placed and must be cleaned.
455 	 */
456 	struct sctp_tmit_chunk *chk, *nchk;
457 
458 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 		if (chk->data)
461 			sctp_m_freem(chk->data);
462 		chk->data = NULL;
463 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	}
465 	sctp_free_a_readq(stcb, control);
466 }
467 
468 /*
469  * Queue the chunk either right into the socket buffer if it is the next one
470  * to go OR put it in the correct place in the delivery queue.  If we do
471  * append to the so_buf, keep doing so until we are out of order as
472  * long as the control's entered are non-fragmented.
473  */
474 static void
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476     struct sctp_stream_in *strm,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream usage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	char msg[SCTP_DIAG_INFO_LEN];
503 
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    control->sinfo_ssn, strm->last_sequence_delivered);
511 protocol_error:
512 		/*
513 		 * throw it in the stream so it gets cleaned up in
514 		 * association destruction
515 		 */
516 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 		    strm->last_sequence_delivered, control->sinfo_tsn,
519 		    control->sinfo_stream, control->sinfo_ssn);
520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
523 		*abort_flag = 1;
524 		return;
525 
526 	}
527 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
528 		goto protocol_error;
529 	}
530 	queue_needed = 1;
531 	asoc->size_on_all_streams += control->length;
532 	sctp_ucount_incr(asoc->cnt_on_all_streams);
533 	nxt_todel = strm->last_sequence_delivered + 1;
534 	if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 		struct socket *so;
537 
538 		so = SCTP_INP_SO(stcb->sctp_ep);
539 		atomic_add_int(&stcb->asoc.refcnt, 1);
540 		SCTP_TCB_UNLOCK(stcb);
541 		SCTP_SOCKET_LOCK(so, 1);
542 		SCTP_TCB_LOCK(stcb);
543 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 			SCTP_SOCKET_UNLOCK(so, 1);
546 			return;
547 		}
548 #endif
549 		/* can be delivered right away? */
550 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
552 		}
553 		/* EY it wont be queued if it could be delivered directly */
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
560 		    control,
561 		    &stcb->sctp_socket->so_rcv, 1,
562 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
563 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
564 			/* all delivered */
565 			nxt_todel = strm->last_sequence_delivered + 1;
566 			if ((nxt_todel == control->sinfo_ssn) &&
567 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				if (control->on_strm_q == SCTP_ON_ORDERED) {
571 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 #ifdef INVARIANTS
573 				} else {
574 					panic("Huh control: %p is on_strm_q: %d",
575 					    control, control->on_strm_q);
576 #endif
577 				}
578 				control->on_strm_q = 0;
579 				strm->last_sequence_delivered++;
580 				/*
581 				 * We ignore the return of deliver_data here
582 				 * since we always can hold the chunk on the
583 				 * d-queue. And we have a finite number that
584 				 * can be delivered from the strq.
585 				 */
586 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 					sctp_log_strm_del(control, NULL,
588 					    SCTP_STR_LOG_FROM_IMMED_DEL);
589 				}
590 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 				sctp_add_to_readq(stcb->sctp_ep, stcb,
592 				    control,
593 				    &stcb->sctp_socket->so_rcv, 1,
594 				    SCTP_READ_LOCK_NOT_HELD,
595 				    SCTP_SO_LOCKED);
596 				continue;
597 			} else if (nxt_todel == control->sinfo_ssn) {
598 				*need_reasm = 1;
599 			}
600 			break;
601 		}
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 		SCTP_SOCKET_UNLOCK(so, 1);
604 #endif
605 	}
606 	if (queue_needed) {
607 		/*
608 		 * Ok, we did not deliver this guy, find the correct place
609 		 * to put it on the queue.
610 		 */
611 		if (sctp_place_control_in_stream(strm, asoc, control)) {
612 			snprintf(msg, sizeof(msg),
613 			    "Queue to str msg_id: %u duplicate",
614 			    control->msg_id);
615 			sctp_clean_up_control(stcb, control);
616 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 			*abort_flag = 1;
620 		}
621 	}
622 }
623 
624 
625 static void
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
627 {
628 	struct mbuf *m, *prev = NULL;
629 	struct sctp_tcb *stcb;
630 
631 	stcb = control->stcb;
632 	control->held_length = 0;
633 	control->length = 0;
634 	m = control->data;
635 	while (m) {
636 		if (SCTP_BUF_LEN(m) == 0) {
637 			/* Skip mbufs with NO length */
638 			if (prev == NULL) {
639 				/* First one */
640 				control->data = sctp_m_free(m);
641 				m = control->data;
642 			} else {
643 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 				m = SCTP_BUF_NEXT(prev);
645 			}
646 			if (m == NULL) {
647 				control->tail_mbuf = prev;
648 			}
649 			continue;
650 		}
651 		prev = m;
652 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 		if (control->on_read_q) {
654 			/*
655 			 * On read queue so we must increment the SB stuff,
656 			 * we assume caller has done any locks of SB.
657 			 */
658 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
659 		}
660 		m = SCTP_BUF_NEXT(m);
661 	}
662 	if (prev) {
663 		control->tail_mbuf = prev;
664 	}
665 }
666 
667 static void
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
669 {
670 	struct mbuf *prev = NULL;
671 	struct sctp_tcb *stcb;
672 
673 	stcb = control->stcb;
674 	if (stcb == NULL) {
675 #ifdef INVARIANTS
676 		panic("Control broken");
677 #else
678 		return;
679 #endif
680 	}
681 	if (control->tail_mbuf == NULL) {
682 		/* TSNH */
683 		control->data = m;
684 		sctp_setup_tail_pointer(control);
685 		return;
686 	}
687 	control->tail_mbuf->m_next = m;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->tail_mbuf->m_next = sctp_m_free(m);
694 				m = control->tail_mbuf->m_next;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		if (control->on_read_q) {
706 			/*
707 			 * On read queue so we must increment the SB stuff,
708 			 * we assume caller has done any locks of SB.
709 			 */
710 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
711 		}
712 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
722 {
723 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 	nc->sinfo_stream = control->sinfo_stream;
725 	nc->sinfo_ssn = control->sinfo_ssn;
726 	TAILQ_INIT(&nc->reasm);
727 	nc->top_fsn = control->top_fsn;
728 	nc->msg_id = control->msg_id;
729 	nc->sinfo_flags = control->sinfo_flags;
730 	nc->sinfo_ppid = control->sinfo_ppid;
731 	nc->sinfo_context = control->sinfo_context;
732 	nc->fsn_included = 0xffffffff;
733 	nc->sinfo_tsn = control->sinfo_tsn;
734 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 	nc->whoFrom = control->whoFrom;
737 	atomic_add_int(&nc->whoFrom->ref_count, 1);
738 	nc->stcb = control->stcb;
739 	nc->port_from = control->port_from;
740 }
741 
742 static void
743 sctp_reset_a_control(struct sctp_queued_to_read *control,
744     struct sctp_inpcb *inp, uint32_t tsn)
745 {
746 	control->fsn_included = tsn;
747 	if (control->on_read_q) {
748 		/*
749 		 * We have to purge it from there, hopefully this will work
750 		 * :-)
751 		 */
752 		TAILQ_REMOVE(&inp->read_queue, control, next);
753 		control->on_read_q = 0;
754 	}
755 }
756 
757 static int
758 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
759     struct sctp_association *asoc,
760     struct sctp_stream_in *strm,
761     struct sctp_queued_to_read *control,
762     uint32_t pd_point,
763     int inp_read_lock_held)
764 {
765 	/*
766 	 * Special handling for the old un-ordered data chunk. All the
767 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
768 	 * watching to see if we have it all. If you return one, no other
769 	 * control entries on the un-ordered queue will be looked at. In
770 	 * theory there should be no others entries in reality, unless the
771 	 * guy is sending both unordered NDATA and unordered DATA...
772 	 */
773 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
774 	uint32_t fsn;
775 	struct sctp_queued_to_read *nc;
776 	int cnt_added;
777 
778 	if (control->first_frag_seen == 0) {
779 		/* Nothing we can do, we have not seen the first piece yet */
780 		return (1);
781 	}
782 	/* Collapse any we can */
783 	cnt_added = 0;
784 restart:
785 	fsn = control->fsn_included + 1;
786 	/* Now what can we add? */
787 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
788 		if (chk->rec.data.fsn_num == fsn) {
789 			/* Ok lets add it */
790 			sctp_alloc_a_readq(stcb, nc);
791 			if (nc == NULL) {
792 				break;
793 			}
794 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
795 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
796 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
797 			fsn++;
798 			cnt_added++;
799 			chk = NULL;
800 			if (control->end_added) {
801 				/* We are done */
802 				if (!TAILQ_EMPTY(&control->reasm)) {
803 					/*
804 					 * Ok we have to move anything left
805 					 * on the control queue to a new
806 					 * control.
807 					 */
808 					sctp_build_readq_entry_from_ctl(nc, control);
809 					tchk = TAILQ_FIRST(&control->reasm);
810 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
811 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
812 						nc->first_frag_seen = 1;
813 						nc->fsn_included = tchk->rec.data.fsn_num;
814 						nc->data = tchk->data;
815 						nc->sinfo_ppid = tchk->rec.data.payloadtype;
816 						nc->sinfo_tsn = tchk->rec.data.TSN_seq;
817 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
818 						tchk->data = NULL;
819 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
820 						sctp_setup_tail_pointer(nc);
821 						tchk = TAILQ_FIRST(&control->reasm);
822 					}
823 					/* Spin the rest onto the queue */
824 					while (tchk) {
825 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
826 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
827 						tchk = TAILQ_FIRST(&control->reasm);
828 					}
829 					/*
830 					 * Now lets add it to the queue
831 					 * after removing control
832 					 */
833 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
834 					nc->on_strm_q = SCTP_ON_UNORDERED;
835 					if (control->on_strm_q) {
836 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
837 						control->on_strm_q = 0;
838 					}
839 				}
840 				if (control->pdapi_started) {
841 					strm->pd_api_started = 0;
842 					control->pdapi_started = 0;
843 				}
844 				if (control->on_strm_q) {
845 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
846 					control->on_strm_q = 0;
847 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
848 				}
849 				if (control->on_read_q == 0) {
850 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
851 					    &stcb->sctp_socket->so_rcv, control->end_added,
852 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
853 				}
854 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
855 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
856 					/*
857 					 * Switch to the new guy and
858 					 * continue
859 					 */
860 					control = nc;
861 					goto restart;
862 				} else {
863 					if (nc->on_strm_q == 0) {
864 						sctp_free_a_readq(stcb, nc);
865 					}
866 				}
867 				return (1);
868 			} else {
869 				sctp_free_a_readq(stcb, nc);
870 			}
871 		} else {
872 			/* Can't add more */
873 			break;
874 		}
875 	}
876 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
877 		strm->pd_api_started = 1;
878 		control->pdapi_started = 1;
879 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
880 		    &stcb->sctp_socket->so_rcv, control->end_added,
881 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
882 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
883 		return (0);
884 	} else {
885 		return (1);
886 	}
887 }
888 
889 static void
890 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
891     struct sctp_association *asoc,
892     struct sctp_queued_to_read *control,
893     struct sctp_tmit_chunk *chk,
894     int *abort_flag)
895 {
896 	struct sctp_tmit_chunk *at;
897 	int inserted;
898 
899 	/*
900 	 * Here we need to place the chunk into the control structure sorted
901 	 * in the correct order.
902 	 */
903 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
904 		/* Its the very first one. */
905 		SCTPDBG(SCTP_DEBUG_XXX,
906 		    "chunk is a first fsn: %u becomes fsn_included\n",
907 		    chk->rec.data.fsn_num);
908 		if (control->first_frag_seen) {
909 			/*
910 			 * In old un-ordered we can reassembly on one
911 			 * control multiple messages. As long as the next
912 			 * FIRST is greater then the old first (TSN i.e. FSN
913 			 * wise)
914 			 */
915 			struct mbuf *tdata;
916 			uint32_t tmp;
917 
918 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
919 				/*
920 				 * Easy way the start of a new guy beyond
921 				 * the lowest
922 				 */
923 				goto place_chunk;
924 			}
925 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
926 			    (control->pdapi_started)) {
927 				/*
928 				 * Ok this should not happen, if it does we
929 				 * started the pd-api on the higher TSN
930 				 * (since the equals part is a TSN failure
931 				 * it must be that).
932 				 *
933 				 * We are completly hosed in that case since I
934 				 * have no way to recover. This really will
935 				 * only happen if we can get more TSN's
936 				 * higher before the pd-api-point.
937 				 */
938 				sctp_abort_in_reasm(stcb, control, chk,
939 				    abort_flag,
940 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
941 
942 				return;
943 			}
944 			/*
945 			 * Ok we have two firsts and the one we just got is
946 			 * smaller than the one we previously placed.. yuck!
947 			 * We must swap them out.
948 			 */
949 			/* swap the mbufs */
950 			tdata = control->data;
951 			control->data = chk->data;
952 			chk->data = tdata;
953 			/* Save the lengths */
954 			chk->send_size = control->length;
955 			/* Recompute length of control and tail pointer */
956 			sctp_setup_tail_pointer(control);
957 			/* Fix the FSN included */
958 			tmp = control->fsn_included;
959 			control->fsn_included = chk->rec.data.fsn_num;
960 			chk->rec.data.fsn_num = tmp;
961 			/* Fix the TSN included */
962 			tmp = control->sinfo_tsn;
963 			control->sinfo_tsn = chk->rec.data.TSN_seq;
964 			chk->rec.data.TSN_seq = tmp;
965 			/* Fix the PPID included */
966 			tmp = control->sinfo_ppid;
967 			control->sinfo_ppid = chk->rec.data.payloadtype;
968 			chk->rec.data.payloadtype = tmp;
969 			/* Fix tail pointer */
970 			goto place_chunk;
971 		}
972 		control->first_frag_seen = 1;
973 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
974 		control->sinfo_tsn = chk->rec.data.TSN_seq;
975 		control->sinfo_ppid = chk->rec.data.payloadtype;
976 		control->data = chk->data;
977 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
978 		chk->data = NULL;
979 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
980 		sctp_setup_tail_pointer(control);
981 		return;
982 	}
983 place_chunk:
984 	inserted = 0;
985 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
986 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
987 			/*
988 			 * This one in queue is bigger than the new one,
989 			 * insert the new one before at.
990 			 */
991 			asoc->size_on_reasm_queue += chk->send_size;
992 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
993 			inserted = 1;
994 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
995 			break;
996 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
997 			/*
998 			 * They sent a duplicate fsn number. This really
999 			 * should not happen since the FSN is a TSN and it
1000 			 * should have been dropped earlier.
1001 			 */
1002 			sctp_abort_in_reasm(stcb, control, chk,
1003 			    abort_flag,
1004 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1005 			return;
1006 		}
1007 	}
1008 	if (inserted == 0) {
1009 		/* Its at the end */
1010 		asoc->size_on_reasm_queue += chk->send_size;
1011 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1012 		control->top_fsn = chk->rec.data.fsn_num;
1013 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1014 	}
1015 }
1016 
1017 static int
1018 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1019     struct sctp_stream_in *strm, int inp_read_lock_held)
1020 {
1021 	/*
1022 	 * Given a stream, strm, see if any of the SSN's on it that are
1023 	 * fragmented are ready to deliver. If so go ahead and place them on
1024 	 * the read queue. In so placing if we have hit the end, then we
1025 	 * need to remove them from the stream's queue.
1026 	 */
1027 	struct sctp_queued_to_read *control, *nctl = NULL;
1028 	uint32_t next_to_del;
1029 	uint32_t pd_point;
1030 	int ret = 0;
1031 
1032 	if (stcb->sctp_socket) {
1033 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1034 		    stcb->sctp_ep->partial_delivery_point);
1035 	} else {
1036 		pd_point = stcb->sctp_ep->partial_delivery_point;
1037 	}
1038 	control = TAILQ_FIRST(&strm->uno_inqueue);
1039 
1040 	if ((control) &&
1041 	    (asoc->idata_supported == 0)) {
1042 		/* Special handling needed for "old" data format */
1043 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1044 			goto done_un;
1045 		}
1046 	}
1047 	if (strm->pd_api_started) {
1048 		/* Can't add more */
1049 		return (0);
1050 	}
1051 	while (control) {
1052 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1053 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1054 		nctl = TAILQ_NEXT(control, next_instrm);
1055 		if (control->end_added) {
1056 			/* We just put the last bit on */
1057 			if (control->on_strm_q) {
1058 #ifdef INVARIANTS
1059 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1060 					panic("Huh control: %p on_q: %d -- not unordered?",
1061 					    control, control->on_strm_q);
1062 				}
1063 #endif
1064 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1065 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1066 				control->on_strm_q = 0;
1067 			}
1068 			if (control->on_read_q == 0) {
1069 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1070 				    control,
1071 				    &stcb->sctp_socket->so_rcv, control->end_added,
1072 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1073 			}
1074 		} else {
1075 			/* Can we do a PD-API for this un-ordered guy? */
1076 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1077 				strm->pd_api_started = 1;
1078 				control->pdapi_started = 1;
1079 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1080 				    control,
1081 				    &stcb->sctp_socket->so_rcv, control->end_added,
1082 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1083 
1084 				break;
1085 			}
1086 		}
1087 		control = nctl;
1088 	}
1089 done_un:
1090 	control = TAILQ_FIRST(&strm->inqueue);
1091 	if (strm->pd_api_started) {
1092 		/* Can't add more */
1093 		return (0);
1094 	}
1095 	if (control == NULL) {
1096 		return (ret);
1097 	}
1098 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1099 		/*
1100 		 * Ok the guy at the top was being partially delivered
1101 		 * completed, so we remove it. Note the pd_api flag was
1102 		 * taken off when the chunk was merged on in
1103 		 * sctp_queue_data_for_reasm below.
1104 		 */
1105 		nctl = TAILQ_NEXT(control, next_instrm);
1106 		SCTPDBG(SCTP_DEBUG_XXX,
1107 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1108 		    control, control->end_added, control->sinfo_ssn,
1109 		    control->top_fsn, control->fsn_included,
1110 		    strm->last_sequence_delivered);
1111 		if (control->end_added) {
1112 			if (control->on_strm_q) {
1113 #ifdef INVARIANTS
1114 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1115 					panic("Huh control: %p on_q: %d -- not ordered?",
1116 					    control, control->on_strm_q);
1117 				}
1118 #endif
1119 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1120 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1121 				control->on_strm_q = 0;
1122 			}
1123 			if (strm->pd_api_started && control->pdapi_started) {
1124 				control->pdapi_started = 0;
1125 				strm->pd_api_started = 0;
1126 			}
1127 			if (control->on_read_q == 0) {
1128 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1129 				    control,
1130 				    &stcb->sctp_socket->so_rcv, control->end_added,
1131 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1132 			}
1133 			control = nctl;
1134 		}
1135 	}
1136 	if (strm->pd_api_started) {
1137 		/*
1138 		 * Can't add more must have gotten an un-ordered above being
1139 		 * partially delivered.
1140 		 */
1141 		return (0);
1142 	}
1143 deliver_more:
1144 	next_to_del = strm->last_sequence_delivered + 1;
1145 	if (control) {
1146 		SCTPDBG(SCTP_DEBUG_XXX,
1147 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1148 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1149 		    next_to_del);
1150 		nctl = TAILQ_NEXT(control, next_instrm);
1151 		if ((control->sinfo_ssn == next_to_del) &&
1152 		    (control->first_frag_seen)) {
1153 			int done;
1154 
1155 			/* Ok we can deliver it onto the stream. */
1156 			if (control->end_added) {
1157 				/* We are done with it afterwards */
1158 				if (control->on_strm_q) {
1159 #ifdef INVARIANTS
1160 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1161 						panic("Huh control: %p on_q: %d -- not ordered?",
1162 						    control, control->on_strm_q);
1163 					}
1164 #endif
1165 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1166 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1167 					control->on_strm_q = 0;
1168 				}
1169 				ret++;
1170 			}
1171 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1172 				/*
1173 				 * A singleton now slipping through - mark
1174 				 * it non-revokable too
1175 				 */
1176 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1177 			} else if (control->end_added == 0) {
1178 				/*
1179 				 * Check if we can defer adding until its
1180 				 * all there
1181 				 */
1182 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1183 					/*
1184 					 * Don't need it or cannot add more
1185 					 * (one being delivered that way)
1186 					 */
1187 					goto out;
1188 				}
1189 			}
1190 			done = (control->end_added) && (control->last_frag_seen);
1191 			if (control->on_read_q == 0) {
1192 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1193 				    control,
1194 				    &stcb->sctp_socket->so_rcv, control->end_added,
1195 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1196 			}
1197 			strm->last_sequence_delivered = next_to_del;
1198 			if (done) {
1199 				control = nctl;
1200 				goto deliver_more;
1201 			} else {
1202 				/* We are now doing PD API */
1203 				strm->pd_api_started = 1;
1204 				control->pdapi_started = 1;
1205 			}
1206 		}
1207 	}
1208 out:
1209 	return (ret);
1210 }
1211 
1212 
1213 void
1214 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1215     struct sctp_stream_in *strm,
1216     struct sctp_tcb *stcb, struct sctp_association *asoc,
1217     struct sctp_tmit_chunk *chk, int hold_rlock)
1218 {
1219 	/*
1220 	 * Given a control and a chunk, merge the data from the chk onto the
1221 	 * control and free up the chunk resources.
1222 	 */
1223 	int i_locked = 0;
1224 
1225 	if (control->on_read_q && (hold_rlock == 0)) {
1226 		/*
1227 		 * Its being pd-api'd so we must do some locks.
1228 		 */
1229 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1230 		i_locked = 1;
1231 	}
1232 	if (control->data == NULL) {
1233 		control->data = chk->data;
1234 		sctp_setup_tail_pointer(control);
1235 	} else {
1236 		sctp_add_to_tail_pointer(control, chk->data);
1237 	}
1238 	control->fsn_included = chk->rec.data.fsn_num;
1239 	asoc->size_on_reasm_queue -= chk->send_size;
1240 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1241 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1242 	chk->data = NULL;
1243 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1244 		control->first_frag_seen = 1;
1245 	}
1246 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1247 		/* Its complete */
1248 		if ((control->on_strm_q) && (control->on_read_q)) {
1249 			if (control->pdapi_started) {
1250 				control->pdapi_started = 0;
1251 				strm->pd_api_started = 0;
1252 			}
1253 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1254 				/* Unordered */
1255 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1256 				control->on_strm_q = 0;
1257 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1258 				/* Ordered */
1259 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1260 				control->on_strm_q = 0;
1261 #ifdef INVARIANTS
1262 			} else if (control->on_strm_q) {
1263 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1264 				    control->on_strm_q);
1265 #endif
1266 			}
1267 		}
1268 		control->end_added = 1;
1269 		control->last_frag_seen = 1;
1270 	}
1271 	if (i_locked) {
1272 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1273 	}
1274 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1275 }
1276 
1277 /*
1278  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1279  * queue, see if anthing can be delivered. If so pull it off (or as much as
1280  * we can. If we run out of space then we must dump what we can and set the
1281  * appropriate flag to say we queued what we could.
1282  */
1283 static void
1284 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1285     struct sctp_stream_in *strm,
1286     struct sctp_queued_to_read *control,
1287     struct sctp_tmit_chunk *chk,
1288     int created_control,
1289     int *abort_flag, uint32_t tsn)
1290 {
1291 	uint32_t next_fsn;
1292 	struct sctp_tmit_chunk *at, *nat;
1293 	int do_wakeup, unordered;
1294 
1295 	/*
1296 	 * For old un-ordered data chunks.
1297 	 */
1298 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1299 		unordered = 1;
1300 	} else {
1301 		unordered = 0;
1302 	}
1303 	/* Must be added to the stream-in queue */
1304 	if (created_control) {
1305 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1306 			/* Duplicate SSN? */
1307 			sctp_clean_up_control(stcb, control);
1308 			sctp_abort_in_reasm(stcb, control, chk,
1309 			    abort_flag,
1310 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1311 			return;
1312 		}
1313 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1314 			/*
1315 			 * Ok we created this control and now lets validate
1316 			 * that its legal i.e. there is a B bit set, if not
1317 			 * and we have up to the cum-ack then its invalid.
1318 			 */
1319 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1320 				sctp_abort_in_reasm(stcb, control, chk,
1321 				    abort_flag,
1322 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1323 				return;
1324 			}
1325 		}
1326 	}
1327 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1328 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1329 		return;
1330 	}
1331 	/*
1332 	 * Ok we must queue the chunk into the reasembly portion: o if its
1333 	 * the first it goes to the control mbuf. o if its not first but the
1334 	 * next in sequence it goes to the control, and each succeeding one
1335 	 * in order also goes. o if its not in order we place it on the list
1336 	 * in its place.
1337 	 */
1338 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1339 		/* Its the very first one. */
1340 		SCTPDBG(SCTP_DEBUG_XXX,
1341 		    "chunk is a first fsn: %u becomes fsn_included\n",
1342 		    chk->rec.data.fsn_num);
1343 		if (control->first_frag_seen) {
1344 			/*
1345 			 * Error on senders part, they either sent us two
1346 			 * data chunks with FIRST, or they sent two
1347 			 * un-ordered chunks that were fragmented at the
1348 			 * same time in the same stream.
1349 			 */
1350 			sctp_abort_in_reasm(stcb, control, chk,
1351 			    abort_flag,
1352 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1353 			return;
1354 		}
1355 		control->first_frag_seen = 1;
1356 		control->fsn_included = chk->rec.data.fsn_num;
1357 		control->data = chk->data;
1358 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1359 		chk->data = NULL;
1360 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 		sctp_setup_tail_pointer(control);
1362 	} else {
1363 		/* Place the chunk in our list */
1364 		int inserted = 0;
1365 
1366 		if (control->last_frag_seen == 0) {
1367 			/* Still willing to raise highest FSN seen */
1368 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1369 				SCTPDBG(SCTP_DEBUG_XXX,
1370 				    "We have a new top_fsn: %u\n",
1371 				    chk->rec.data.fsn_num);
1372 				control->top_fsn = chk->rec.data.fsn_num;
1373 			}
1374 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1375 				SCTPDBG(SCTP_DEBUG_XXX,
1376 				    "The last fsn is now in place fsn: %u\n",
1377 				    chk->rec.data.fsn_num);
1378 				control->last_frag_seen = 1;
1379 			}
1380 			if (asoc->idata_supported || control->first_frag_seen) {
1381 				/*
1382 				 * For IDATA we always check since we know
1383 				 * that the first fragment is 0. For old
1384 				 * DATA we have to receive the first before
1385 				 * we know the first FSN (which is the TSN).
1386 				 */
1387 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1388 					/*
1389 					 * We have already delivered up to
1390 					 * this so its a dup
1391 					 */
1392 					sctp_abort_in_reasm(stcb, control, chk,
1393 					    abort_flag,
1394 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1395 					return;
1396 				}
1397 			}
1398 		} else {
1399 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1400 				/* Second last? huh? */
1401 				SCTPDBG(SCTP_DEBUG_XXX,
1402 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1403 				    chk->rec.data.fsn_num, control->top_fsn);
1404 				sctp_abort_in_reasm(stcb, control,
1405 				    chk, abort_flag,
1406 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1407 				return;
1408 			}
1409 			if (asoc->idata_supported || control->first_frag_seen) {
1410 				/*
1411 				 * For IDATA we always check since we know
1412 				 * that the first fragment is 0. For old
1413 				 * DATA we have to receive the first before
1414 				 * we know the first FSN (which is the TSN).
1415 				 */
1416 
1417 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1418 					/*
1419 					 * We have already delivered up to
1420 					 * this so its a dup
1421 					 */
1422 					SCTPDBG(SCTP_DEBUG_XXX,
1423 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1424 					    chk->rec.data.fsn_num, control->fsn_included);
1425 					sctp_abort_in_reasm(stcb, control, chk,
1426 					    abort_flag,
1427 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1428 					return;
1429 				}
1430 			}
1431 			/*
1432 			 * validate not beyond top FSN if we have seen last
1433 			 * one
1434 			 */
1435 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1436 				SCTPDBG(SCTP_DEBUG_XXX,
1437 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1438 				    chk->rec.data.fsn_num,
1439 				    control->top_fsn);
1440 				sctp_abort_in_reasm(stcb, control, chk,
1441 				    abort_flag,
1442 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1443 				return;
1444 			}
1445 		}
1446 		/*
1447 		 * If we reach here, we need to place the new chunk in the
1448 		 * reassembly for this control.
1449 		 */
1450 		SCTPDBG(SCTP_DEBUG_XXX,
1451 		    "chunk is a not first fsn: %u needs to be inserted\n",
1452 		    chk->rec.data.fsn_num);
1453 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1454 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1455 				/*
1456 				 * This one in queue is bigger than the new
1457 				 * one, insert the new one before at.
1458 				 */
1459 				SCTPDBG(SCTP_DEBUG_XXX,
1460 				    "Insert it before fsn: %u\n",
1461 				    at->rec.data.fsn_num);
1462 				asoc->size_on_reasm_queue += chk->send_size;
1463 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1464 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1465 				inserted = 1;
1466 				break;
1467 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1468 				/*
1469 				 * Gak, He sent me a duplicate str seq
1470 				 * number
1471 				 */
1472 				/*
1473 				 * foo bar, I guess I will just free this
1474 				 * new guy, should we abort too? FIX ME
1475 				 * MAYBE? Or it COULD be that the SSN's have
1476 				 * wrapped. Maybe I should compare to TSN
1477 				 * somehow... sigh for now just blow away
1478 				 * the chunk!
1479 				 */
1480 				SCTPDBG(SCTP_DEBUG_XXX,
1481 				    "Duplicate to fsn: %u -- abort\n",
1482 				    at->rec.data.fsn_num);
1483 				sctp_abort_in_reasm(stcb, control,
1484 				    chk, abort_flag,
1485 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1486 				return;
1487 			}
1488 		}
1489 		if (inserted == 0) {
1490 			/* Goes on the end */
1491 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1492 			    chk->rec.data.fsn_num);
1493 			asoc->size_on_reasm_queue += chk->send_size;
1494 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1495 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1496 		}
1497 	}
1498 	/*
1499 	 * Ok lets see if we can suck any up into the control structure that
1500 	 * are in seq if it makes sense.
1501 	 */
1502 	do_wakeup = 0;
1503 	/*
1504 	 * If the first fragment has not been seen there is no sense in
1505 	 * looking.
1506 	 */
1507 	if (control->first_frag_seen) {
1508 		next_fsn = control->fsn_included + 1;
1509 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1510 			if (at->rec.data.fsn_num == next_fsn) {
1511 				/* We can add this one now to the control */
1512 				SCTPDBG(SCTP_DEBUG_XXX,
1513 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1514 				    control, at,
1515 				    at->rec.data.fsn_num,
1516 				    next_fsn, control->fsn_included);
1517 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1518 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1519 				if (control->on_read_q) {
1520 					do_wakeup = 1;
1521 				}
1522 				next_fsn++;
1523 				if (control->end_added && control->pdapi_started) {
1524 					if (strm->pd_api_started) {
1525 						strm->pd_api_started = 0;
1526 						control->pdapi_started = 0;
1527 					}
1528 					if (control->on_read_q == 0) {
1529 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1530 						    control,
1531 						    &stcb->sctp_socket->so_rcv, control->end_added,
1532 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1533 						do_wakeup = 1;
1534 					}
1535 					break;
1536 				}
1537 			} else {
1538 				break;
1539 			}
1540 		}
1541 	}
1542 	if (do_wakeup) {
1543 		/* Need to wakeup the reader */
1544 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1545 	}
1546 }
1547 
1548 static struct sctp_queued_to_read *
1549 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1550 {
1551 	struct sctp_queued_to_read *control;
1552 
1553 	if (ordered) {
1554 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1555 			if (control->msg_id == msg_id) {
1556 				break;
1557 			}
1558 		}
1559 	} else {
1560 		if (old) {
1561 			control = TAILQ_FIRST(&strm->uno_inqueue);
1562 			return (control);
1563 		}
1564 		TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1565 			if (control->msg_id == msg_id) {
1566 				break;
1567 			}
1568 		}
1569 	}
1570 	return (control);
1571 }
1572 
1573 static int
1574 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1575     struct mbuf **m, int offset, int chk_length,
1576     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1577     int *break_flag, int last_chunk, uint8_t chtype)
1578 {
1579 	/* Process a data chunk */
1580 	/* struct sctp_tmit_chunk *chk; */
1581 	struct sctp_data_chunk *ch;
1582 	struct sctp_idata_chunk *nch, chunk_buf;
1583 	struct sctp_tmit_chunk *chk;
1584 	uint32_t tsn, fsn, gap, msg_id;
1585 	struct mbuf *dmbuf;
1586 	int the_len;
1587 	int need_reasm_check = 0;
1588 	uint16_t strmno;
1589 	struct mbuf *op_err;
1590 	char msg[SCTP_DIAG_INFO_LEN];
1591 	struct sctp_queued_to_read *control = NULL;
1592 	uint32_t protocol_id;
1593 	uint8_t chunk_flags;
1594 	struct sctp_stream_reset_list *liste;
1595 	struct sctp_stream_in *strm;
1596 	int ordered;
1597 	size_t clen;
1598 	int created_control = 0;
1599 	uint8_t old_data;
1600 
1601 	chk = NULL;
1602 	if (chtype == SCTP_IDATA) {
1603 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1604 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1605 		ch = (struct sctp_data_chunk *)nch;
1606 		clen = sizeof(struct sctp_idata_chunk);
1607 		tsn = ntohl(ch->dp.tsn);
1608 		msg_id = ntohl(nch->dp.msg_id);
1609 		protocol_id = nch->dp.ppid_fsn.protocol_id;
1610 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1611 			fsn = 0;
1612 		else
1613 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1614 		old_data = 0;
1615 	} else {
1616 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1617 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1618 		tsn = ntohl(ch->dp.tsn);
1619 		protocol_id = ch->dp.protocol_id;
1620 		clen = sizeof(struct sctp_data_chunk);
1621 		fsn = tsn;
1622 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1623 		nch = NULL;
1624 		old_data = 1;
1625 	}
1626 	chunk_flags = ch->ch.chunk_flags;
1627 	if ((size_t)chk_length == clen) {
1628 		/*
1629 		 * Need to send an abort since we had a empty data chunk.
1630 		 */
1631 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1632 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1633 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1634 		*abort_flag = 1;
1635 		return (0);
1636 	}
1637 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1638 		asoc->send_sack = 1;
1639 	}
1640 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1641 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1642 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1643 	}
1644 	if (stcb == NULL) {
1645 		return (0);
1646 	}
1647 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1648 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1649 		/* It is a duplicate */
1650 		SCTP_STAT_INCR(sctps_recvdupdata);
1651 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1652 			/* Record a dup for the next outbound sack */
1653 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1654 			asoc->numduptsns++;
1655 		}
1656 		asoc->send_sack = 1;
1657 		return (0);
1658 	}
1659 	/* Calculate the number of TSN's between the base and this TSN */
1660 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1661 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1662 		/* Can't hold the bit in the mapping at max array, toss it */
1663 		return (0);
1664 	}
1665 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1666 		SCTP_TCB_LOCK_ASSERT(stcb);
1667 		if (sctp_expand_mapping_array(asoc, gap)) {
1668 			/* Can't expand, drop it */
1669 			return (0);
1670 		}
1671 	}
1672 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1673 		*high_tsn = tsn;
1674 	}
1675 	/* See if we have received this one already */
1676 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1677 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1678 		SCTP_STAT_INCR(sctps_recvdupdata);
1679 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1680 			/* Record a dup for the next outbound sack */
1681 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1682 			asoc->numduptsns++;
1683 		}
1684 		asoc->send_sack = 1;
1685 		return (0);
1686 	}
1687 	/*
1688 	 * Check to see about the GONE flag, duplicates would cause a sack
1689 	 * to be sent up above
1690 	 */
1691 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1692 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1693 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1694 		/*
1695 		 * wait a minute, this guy is gone, there is no longer a
1696 		 * receiver. Send peer an ABORT!
1697 		 */
1698 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1699 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1700 		*abort_flag = 1;
1701 		return (0);
1702 	}
1703 	/*
1704 	 * Now before going further we see if there is room. If NOT then we
1705 	 * MAY let one through only IF this TSN is the one we are waiting
1706 	 * for on a partial delivery API.
1707 	 */
1708 
1709 	/* Is the stream valid? */
1710 	strmno = ntohs(ch->dp.stream_id);
1711 
1712 	if (strmno >= asoc->streamincnt) {
1713 		struct sctp_error_invalid_stream *cause;
1714 
1715 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1716 		    0, M_NOWAIT, 1, MT_DATA);
1717 		if (op_err != NULL) {
1718 			/* add some space up front so prepend will work well */
1719 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1720 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1721 			/*
1722 			 * Error causes are just param's and this one has
1723 			 * two back to back phdr, one with the error type
1724 			 * and size, the other with the streamid and a rsvd
1725 			 */
1726 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1727 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1728 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1729 			cause->stream_id = ch->dp.stream_id;
1730 			cause->reserved = htons(0);
1731 			sctp_queue_op_err(stcb, op_err);
1732 		}
1733 		SCTP_STAT_INCR(sctps_badsid);
1734 		SCTP_TCB_LOCK_ASSERT(stcb);
1735 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1736 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1737 			asoc->highest_tsn_inside_nr_map = tsn;
1738 		}
1739 		if (tsn == (asoc->cumulative_tsn + 1)) {
1740 			/* Update cum-ack */
1741 			asoc->cumulative_tsn = tsn;
1742 		}
1743 		return (0);
1744 	}
1745 	strm = &asoc->strmin[strmno];
1746 	/*
1747 	 * If its a fragmented message, lets see if we can find the control
1748 	 * on the reassembly queues.
1749 	 */
1750 	if ((chtype == SCTP_IDATA) &&
1751 	    ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1752 	    (fsn == 0)) {
1753 		/*
1754 		 * The first *must* be fsn 0, and other (middle/end) pieces
1755 		 * can *not* be fsn 0. XXX: This can happen in case of a
1756 		 * wrap around. Ignore is for now.
1757 		 */
1758 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1759 		    msg_id, chunk_flags);
1760 		goto err_out;
1761 	}
1762 	control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data);
1763 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1764 	    chunk_flags, control);
1765 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1766 		/* See if we can find the re-assembly entity */
1767 		if (control != NULL) {
1768 			/* We found something, does it belong? */
1769 			if (ordered && (msg_id != control->sinfo_ssn)) {
1770 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", msg_id);
1771 		err_out:
1772 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1773 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1774 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1775 				*abort_flag = 1;
1776 				return (0);
1777 			}
1778 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1779 				/*
1780 				 * We can't have a switched order with an
1781 				 * unordered chunk
1782 				 */
1783 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1784 				    tsn);
1785 				goto err_out;
1786 			}
1787 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1788 				/*
1789 				 * We can't have a switched unordered with a
1790 				 * ordered chunk
1791 				 */
1792 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1793 				    tsn);
1794 				goto err_out;
1795 			}
1796 		}
1797 	} else {
1798 		/*
1799 		 * Its a complete segment. Lets validate we don't have a
1800 		 * re-assembly going on with the same Stream/Seq (for
1801 		 * ordered) or in the same Stream for unordered.
1802 		 */
1803 		if (control != NULL) {
1804 			if (ordered || (old_data == 0)) {
1805 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1806 				    chunk_flags, msg_id);
1807 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", msg_id);
1808 				goto err_out;
1809 			} else {
1810 				if ((tsn == control->fsn_included + 1) &&
1811 				    (control->end_added == 0)) {
1812 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1813 					goto err_out;
1814 				} else {
1815 					control = NULL;
1816 				}
1817 			}
1818 		}
1819 	}
1820 	/* now do the tests */
1821 	if (((asoc->cnt_on_all_streams +
1822 	    asoc->cnt_on_reasm_queue +
1823 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1824 	    (((int)asoc->my_rwnd) <= 0)) {
1825 		/*
1826 		 * When we have NO room in the rwnd we check to make sure
1827 		 * the reader is doing its job...
1828 		 */
1829 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1830 			/* some to read, wake-up */
1831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1832 			struct socket *so;
1833 
1834 			so = SCTP_INP_SO(stcb->sctp_ep);
1835 			atomic_add_int(&stcb->asoc.refcnt, 1);
1836 			SCTP_TCB_UNLOCK(stcb);
1837 			SCTP_SOCKET_LOCK(so, 1);
1838 			SCTP_TCB_LOCK(stcb);
1839 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1840 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1841 				/* assoc was freed while we were unlocked */
1842 				SCTP_SOCKET_UNLOCK(so, 1);
1843 				return (0);
1844 			}
1845 #endif
1846 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1847 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1848 			SCTP_SOCKET_UNLOCK(so, 1);
1849 #endif
1850 		}
1851 		/* now is it in the mapping array of what we have accepted? */
1852 		if (nch == NULL) {
1853 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1854 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1855 				/* Nope not in the valid range dump it */
1856 		dump_packet:
1857 				sctp_set_rwnd(stcb, asoc);
1858 				if ((asoc->cnt_on_all_streams +
1859 				    asoc->cnt_on_reasm_queue +
1860 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1861 					SCTP_STAT_INCR(sctps_datadropchklmt);
1862 				} else {
1863 					SCTP_STAT_INCR(sctps_datadroprwnd);
1864 				}
1865 				*break_flag = 1;
1866 				return (0);
1867 			}
1868 		} else {
1869 			if (control == NULL) {
1870 				goto dump_packet;
1871 			}
1872 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1873 				goto dump_packet;
1874 			}
1875 		}
1876 	}
1877 #ifdef SCTP_ASOCLOG_OF_TSNS
1878 	SCTP_TCB_LOCK_ASSERT(stcb);
1879 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1880 		asoc->tsn_in_at = 0;
1881 		asoc->tsn_in_wrapped = 1;
1882 	}
1883 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1884 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1885 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1886 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1887 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1888 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1889 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1890 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1891 	asoc->tsn_in_at++;
1892 #endif
1893 	/*
1894 	 * Before we continue lets validate that we are not being fooled by
1895 	 * an evil attacker. We can only have Nk chunks based on our TSN
1896 	 * spread allowed by the mapping array N * 8 bits, so there is no
1897 	 * way our stream sequence numbers could have wrapped. We of course
1898 	 * only validate the FIRST fragment so the bit must be set.
1899 	 */
1900 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1901 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1902 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1903 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1904 		/* The incoming sseq is behind where we last delivered? */
1905 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1906 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1907 
1908 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1909 		    asoc->strmin[strmno].last_sequence_delivered,
1910 		    tsn, strmno, msg_id);
1911 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1912 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1913 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1914 		*abort_flag = 1;
1915 		return (0);
1916 	}
1917 	/************************************
1918 	 * From here down we may find ch-> invalid
1919 	 * so its a good idea NOT to use it.
1920 	 *************************************/
1921 	if (nch) {
1922 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1923 	} else {
1924 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1925 	}
1926 	if (last_chunk == 0) {
1927 		if (nch) {
1928 			dmbuf = SCTP_M_COPYM(*m,
1929 			    (offset + sizeof(struct sctp_idata_chunk)),
1930 			    the_len, M_NOWAIT);
1931 		} else {
1932 			dmbuf = SCTP_M_COPYM(*m,
1933 			    (offset + sizeof(struct sctp_data_chunk)),
1934 			    the_len, M_NOWAIT);
1935 		}
1936 #ifdef SCTP_MBUF_LOGGING
1937 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1938 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1939 		}
1940 #endif
1941 	} else {
1942 		/* We can steal the last chunk */
1943 		int l_len;
1944 
1945 		dmbuf = *m;
1946 		/* lop off the top part */
1947 		if (nch) {
1948 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1949 		} else {
1950 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1951 		}
1952 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1953 			l_len = SCTP_BUF_LEN(dmbuf);
1954 		} else {
1955 			/*
1956 			 * need to count up the size hopefully does not hit
1957 			 * this to often :-0
1958 			 */
1959 			struct mbuf *lat;
1960 
1961 			l_len = 0;
1962 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1963 				l_len += SCTP_BUF_LEN(lat);
1964 			}
1965 		}
1966 		if (l_len > the_len) {
1967 			/* Trim the end round bytes off  too */
1968 			m_adj(dmbuf, -(l_len - the_len));
1969 		}
1970 	}
1971 	if (dmbuf == NULL) {
1972 		SCTP_STAT_INCR(sctps_nomem);
1973 		return (0);
1974 	}
1975 	/*
1976 	 * Now no matter what we need a control, get one if we don't have
1977 	 * one (we may have gotten it above when we found the message was
1978 	 * fragmented
1979 	 */
1980 	if (control == NULL) {
1981 		sctp_alloc_a_readq(stcb, control);
1982 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1983 		    protocol_id,
1984 		    strmno, msg_id,
1985 		    chunk_flags,
1986 		    NULL, fsn, msg_id);
1987 		if (control == NULL) {
1988 			SCTP_STAT_INCR(sctps_nomem);
1989 			return (0);
1990 		}
1991 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1992 			control->data = dmbuf;
1993 			control->tail_mbuf = NULL;
1994 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1995 			control->top_fsn = control->fsn_included = fsn;
1996 		}
1997 		created_control = 1;
1998 	}
1999 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
2000 	    chunk_flags, ordered, msg_id, control);
2001 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2002 	    TAILQ_EMPTY(&asoc->resetHead) &&
2003 	    ((ordered == 0) ||
2004 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
2005 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
2006 		/* Candidate for express delivery */
2007 		/*
2008 		 * Its not fragmented, No PD-API is up, Nothing in the
2009 		 * delivery queue, Its un-ordered OR ordered and the next to
2010 		 * deliver AND nothing else is stuck on the stream queue,
2011 		 * And there is room for it in the socket buffer. Lets just
2012 		 * stuff it up the buffer....
2013 		 */
2014 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2015 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2016 			asoc->highest_tsn_inside_nr_map = tsn;
2017 		}
2018 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
2019 		    control, msg_id);
2020 
2021 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2022 		    control, &stcb->sctp_socket->so_rcv,
2023 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2024 
2025 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2026 			/* for ordered, bump what we delivered */
2027 			strm->last_sequence_delivered++;
2028 		}
2029 		SCTP_STAT_INCR(sctps_recvexpress);
2030 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2031 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
2032 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2033 		}
2034 		control = NULL;
2035 		goto finish_express_del;
2036 	}
2037 	/* Now will we need a chunk too? */
2038 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2039 		sctp_alloc_a_chunk(stcb, chk);
2040 		if (chk == NULL) {
2041 			/* No memory so we drop the chunk */
2042 			SCTP_STAT_INCR(sctps_nomem);
2043 			if (last_chunk == 0) {
2044 				/* we copied it, free the copy */
2045 				sctp_m_freem(dmbuf);
2046 			}
2047 			return (0);
2048 		}
2049 		chk->rec.data.TSN_seq = tsn;
2050 		chk->no_fr_allowed = 0;
2051 		chk->rec.data.fsn_num = fsn;
2052 		chk->rec.data.stream_seq = msg_id;
2053 		chk->rec.data.stream_number = strmno;
2054 		chk->rec.data.payloadtype = protocol_id;
2055 		chk->rec.data.context = stcb->asoc.context;
2056 		chk->rec.data.doing_fast_retransmit = 0;
2057 		chk->rec.data.rcv_flags = chunk_flags;
2058 		chk->asoc = asoc;
2059 		chk->send_size = the_len;
2060 		chk->whoTo = net;
2061 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2062 		    chk,
2063 		    control, msg_id);
2064 		atomic_add_int(&net->ref_count, 1);
2065 		chk->data = dmbuf;
2066 	}
2067 	/* Set the appropriate TSN mark */
2068 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2069 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2070 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2071 			asoc->highest_tsn_inside_nr_map = tsn;
2072 		}
2073 	} else {
2074 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2075 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2076 			asoc->highest_tsn_inside_map = tsn;
2077 		}
2078 	}
2079 	/* Now is it complete (i.e. not fragmented)? */
2080 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2081 		/*
2082 		 * Special check for when streams are resetting. We could be
2083 		 * more smart about this and check the actual stream to see
2084 		 * if it is not being reset.. that way we would not create a
2085 		 * HOLB when amongst streams being reset and those not being
2086 		 * reset.
2087 		 *
2088 		 */
2089 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2090 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2091 			/*
2092 			 * yep its past where we need to reset... go ahead
2093 			 * and queue it.
2094 			 */
2095 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2096 				/* first one on */
2097 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2098 			} else {
2099 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2100 				unsigned char inserted = 0;
2101 
2102 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2103 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2104 
2105 						continue;
2106 					} else {
2107 						/* found it */
2108 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2109 						inserted = 1;
2110 						break;
2111 					}
2112 				}
2113 				if (inserted == 0) {
2114 					/*
2115 					 * must be put at end, use prevP
2116 					 * (all setup from loop) to setup
2117 					 * nextP.
2118 					 */
2119 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2120 				}
2121 			}
2122 			goto finish_express_del;
2123 		}
2124 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2125 			/* queue directly into socket buffer */
2126 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2127 			    control, msg_id);
2128 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2129 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2130 			    control,
2131 			    &stcb->sctp_socket->so_rcv, 1,
2132 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2133 
2134 		} else {
2135 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2136 			    msg_id);
2137 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2138 			if (*abort_flag) {
2139 				if (last_chunk) {
2140 					*m = NULL;
2141 				}
2142 				return (0);
2143 			}
2144 		}
2145 		goto finish_express_del;
2146 	}
2147 	/* If we reach here its a reassembly */
2148 	need_reasm_check = 1;
2149 	SCTPDBG(SCTP_DEBUG_XXX,
2150 	    "Queue data to stream for reasm control: %p msg_id: %u\n",
2151 	    control, msg_id);
2152 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2153 	if (*abort_flag) {
2154 		/*
2155 		 * the assoc is now gone and chk was put onto the reasm
2156 		 * queue, which has all been freed.
2157 		 */
2158 		if (last_chunk) {
2159 			*m = NULL;
2160 		}
2161 		return (0);
2162 	}
2163 finish_express_del:
2164 	/* Here we tidy up things */
2165 	if (tsn == (asoc->cumulative_tsn + 1)) {
2166 		/* Update cum-ack */
2167 		asoc->cumulative_tsn = tsn;
2168 	}
2169 	if (last_chunk) {
2170 		*m = NULL;
2171 	}
2172 	if (ordered) {
2173 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2174 	} else {
2175 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2176 	}
2177 	SCTP_STAT_INCR(sctps_recvdata);
2178 	/* Set it present please */
2179 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2180 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2181 	}
2182 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2183 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2184 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2185 	}
2186 	/* check the special flag for stream resets */
2187 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2188 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2189 		/*
2190 		 * we have finished working through the backlogged TSN's now
2191 		 * time to reset streams. 1: call reset function. 2: free
2192 		 * pending_reply space 3: distribute any chunks in
2193 		 * pending_reply_queue.
2194 		 */
2195 		struct sctp_queued_to_read *ctl, *nctl;
2196 
2197 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2198 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2199 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2200 		SCTP_FREE(liste, SCTP_M_STRESET);
2201 		/* sa_ignore FREED_MEMORY */
2202 		liste = TAILQ_FIRST(&asoc->resetHead);
2203 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2204 			/* All can be removed */
2205 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2206 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2207 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2208 				if (*abort_flag) {
2209 					return (0);
2210 				}
2211 			}
2212 		} else {
2213 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2214 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2215 					break;
2216 				}
2217 				/*
2218 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2219 				 * process it which is the NOT of
2220 				 * ctl->sinfo_tsn > liste->tsn
2221 				 */
2222 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2223 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2224 				if (*abort_flag) {
2225 					return (0);
2226 				}
2227 			}
2228 		}
2229 		/*
2230 		 * Now service re-assembly to pick up anything that has been
2231 		 * held on reassembly queue?
2232 		 */
2233 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2234 		need_reasm_check = 0;
2235 	}
2236 	if (need_reasm_check) {
2237 		/* Another one waits ? */
2238 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2239 	}
2240 	return (1);
2241 }
2242 
2243 static const int8_t sctp_map_lookup_tab[256] = {
2244 	0, 1, 0, 2, 0, 1, 0, 3,
2245 	0, 1, 0, 2, 0, 1, 0, 4,
2246 	0, 1, 0, 2, 0, 1, 0, 3,
2247 	0, 1, 0, 2, 0, 1, 0, 5,
2248 	0, 1, 0, 2, 0, 1, 0, 3,
2249 	0, 1, 0, 2, 0, 1, 0, 4,
2250 	0, 1, 0, 2, 0, 1, 0, 3,
2251 	0, 1, 0, 2, 0, 1, 0, 6,
2252 	0, 1, 0, 2, 0, 1, 0, 3,
2253 	0, 1, 0, 2, 0, 1, 0, 4,
2254 	0, 1, 0, 2, 0, 1, 0, 3,
2255 	0, 1, 0, 2, 0, 1, 0, 5,
2256 	0, 1, 0, 2, 0, 1, 0, 3,
2257 	0, 1, 0, 2, 0, 1, 0, 4,
2258 	0, 1, 0, 2, 0, 1, 0, 3,
2259 	0, 1, 0, 2, 0, 1, 0, 7,
2260 	0, 1, 0, 2, 0, 1, 0, 3,
2261 	0, 1, 0, 2, 0, 1, 0, 4,
2262 	0, 1, 0, 2, 0, 1, 0, 3,
2263 	0, 1, 0, 2, 0, 1, 0, 5,
2264 	0, 1, 0, 2, 0, 1, 0, 3,
2265 	0, 1, 0, 2, 0, 1, 0, 4,
2266 	0, 1, 0, 2, 0, 1, 0, 3,
2267 	0, 1, 0, 2, 0, 1, 0, 6,
2268 	0, 1, 0, 2, 0, 1, 0, 3,
2269 	0, 1, 0, 2, 0, 1, 0, 4,
2270 	0, 1, 0, 2, 0, 1, 0, 3,
2271 	0, 1, 0, 2, 0, 1, 0, 5,
2272 	0, 1, 0, 2, 0, 1, 0, 3,
2273 	0, 1, 0, 2, 0, 1, 0, 4,
2274 	0, 1, 0, 2, 0, 1, 0, 3,
2275 	0, 1, 0, 2, 0, 1, 0, 8
2276 };
2277 
2278 
2279 void
2280 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2281 {
2282 	/*
2283 	 * Now we also need to check the mapping array in a couple of ways.
2284 	 * 1) Did we move the cum-ack point?
2285 	 *
2286 	 * When you first glance at this you might think that all entries that
2287 	 * make up the position of the cum-ack would be in the nr-mapping
2288 	 * array only.. i.e. things up to the cum-ack are always
2289 	 * deliverable. Thats true with one exception, when its a fragmented
2290 	 * message we may not deliver the data until some threshold (or all
2291 	 * of it) is in place. So we must OR the nr_mapping_array and
2292 	 * mapping_array to get a true picture of the cum-ack.
2293 	 */
2294 	struct sctp_association *asoc;
2295 	int at;
2296 	uint8_t val;
2297 	int slide_from, slide_end, lgap, distance;
2298 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2299 
2300 	asoc = &stcb->asoc;
2301 
2302 	old_cumack = asoc->cumulative_tsn;
2303 	old_base = asoc->mapping_array_base_tsn;
2304 	old_highest = asoc->highest_tsn_inside_map;
2305 	/*
2306 	 * We could probably improve this a small bit by calculating the
2307 	 * offset of the current cum-ack as the starting point.
2308 	 */
2309 	at = 0;
2310 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2311 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2312 		if (val == 0xff) {
2313 			at += 8;
2314 		} else {
2315 			/* there is a 0 bit */
2316 			at += sctp_map_lookup_tab[val];
2317 			break;
2318 		}
2319 	}
2320 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2321 
2322 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2323 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2324 #ifdef INVARIANTS
2325 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2326 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2327 #else
2328 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2329 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2330 		sctp_print_mapping_array(asoc);
2331 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2332 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2333 		}
2334 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2335 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2336 #endif
2337 	}
2338 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2339 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2340 	} else {
2341 		highest_tsn = asoc->highest_tsn_inside_map;
2342 	}
2343 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2344 		/* The complete array was completed by a single FR */
2345 		/* highest becomes the cum-ack */
2346 		int clr;
2347 
2348 #ifdef INVARIANTS
2349 		unsigned int i;
2350 
2351 #endif
2352 
2353 		/* clear the array */
2354 		clr = ((at + 7) >> 3);
2355 		if (clr > asoc->mapping_array_size) {
2356 			clr = asoc->mapping_array_size;
2357 		}
2358 		memset(asoc->mapping_array, 0, clr);
2359 		memset(asoc->nr_mapping_array, 0, clr);
2360 #ifdef INVARIANTS
2361 		for (i = 0; i < asoc->mapping_array_size; i++) {
2362 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2363 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2364 				sctp_print_mapping_array(asoc);
2365 			}
2366 		}
2367 #endif
2368 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2369 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2370 	} else if (at >= 8) {
2371 		/* we can slide the mapping array down */
2372 		/* slide_from holds where we hit the first NON 0xff byte */
2373 
2374 		/*
2375 		 * now calculate the ceiling of the move using our highest
2376 		 * TSN value
2377 		 */
2378 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2379 		slide_end = (lgap >> 3);
2380 		if (slide_end < slide_from) {
2381 			sctp_print_mapping_array(asoc);
2382 #ifdef INVARIANTS
2383 			panic("impossible slide");
2384 #else
2385 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2386 			    lgap, slide_end, slide_from, at);
2387 			return;
2388 #endif
2389 		}
2390 		if (slide_end > asoc->mapping_array_size) {
2391 #ifdef INVARIANTS
2392 			panic("would overrun buffer");
2393 #else
2394 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2395 			    asoc->mapping_array_size, slide_end);
2396 			slide_end = asoc->mapping_array_size;
2397 #endif
2398 		}
2399 		distance = (slide_end - slide_from) + 1;
2400 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2401 			sctp_log_map(old_base, old_cumack, old_highest,
2402 			    SCTP_MAP_PREPARE_SLIDE);
2403 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2404 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2405 		}
2406 		if (distance + slide_from > asoc->mapping_array_size ||
2407 		    distance < 0) {
2408 			/*
2409 			 * Here we do NOT slide forward the array so that
2410 			 * hopefully when more data comes in to fill it up
2411 			 * we will be able to slide it forward. Really I
2412 			 * don't think this should happen :-0
2413 			 */
2414 
2415 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2416 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2417 				    (uint32_t) asoc->mapping_array_size,
2418 				    SCTP_MAP_SLIDE_NONE);
2419 			}
2420 		} else {
2421 			int ii;
2422 
2423 			for (ii = 0; ii < distance; ii++) {
2424 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2425 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2426 
2427 			}
2428 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2429 				asoc->mapping_array[ii] = 0;
2430 				asoc->nr_mapping_array[ii] = 0;
2431 			}
2432 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2433 				asoc->highest_tsn_inside_map += (slide_from << 3);
2434 			}
2435 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2436 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2437 			}
2438 			asoc->mapping_array_base_tsn += (slide_from << 3);
2439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2440 				sctp_log_map(asoc->mapping_array_base_tsn,
2441 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2442 				    SCTP_MAP_SLIDE_RESULT);
2443 			}
2444 		}
2445 	}
2446 }
2447 
2448 void
2449 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2450 {
2451 	struct sctp_association *asoc;
2452 	uint32_t highest_tsn;
2453 
2454 	asoc = &stcb->asoc;
2455 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2456 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2457 	} else {
2458 		highest_tsn = asoc->highest_tsn_inside_map;
2459 	}
2460 
2461 	/*
2462 	 * Now we need to see if we need to queue a sack or just start the
2463 	 * timer (if allowed).
2464 	 */
2465 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2466 		/*
2467 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2468 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2469 		 * SACK
2470 		 */
2471 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2472 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2473 			    stcb->sctp_ep, stcb, NULL,
2474 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2475 		}
2476 		sctp_send_shutdown(stcb,
2477 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2478 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2479 	} else {
2480 		int is_a_gap;
2481 
2482 		/* is there a gap now ? */
2483 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2484 
2485 		/*
2486 		 * CMT DAC algorithm: increase number of packets received
2487 		 * since last ack
2488 		 */
2489 		stcb->asoc.cmt_dac_pkts_rcvd++;
2490 
2491 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2492 							 * SACK */
2493 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2494 							 * longer is one */
2495 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2496 		    (is_a_gap) ||	/* is still a gap */
2497 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2498 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2499 		    ) {
2500 
2501 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2502 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2503 			    (stcb->asoc.send_sack == 0) &&
2504 			    (stcb->asoc.numduptsns == 0) &&
2505 			    (stcb->asoc.delayed_ack) &&
2506 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2507 
2508 				/*
2509 				 * CMT DAC algorithm: With CMT, delay acks
2510 				 * even in the face of
2511 				 *
2512 				 * reordering. Therefore, if acks that do not
2513 				 * have to be sent because of the above
2514 				 * reasons, will be delayed. That is, acks
2515 				 * that would have been sent due to gap
2516 				 * reports will be delayed with DAC. Start
2517 				 * the delayed ack timer.
2518 				 */
2519 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2520 				    stcb->sctp_ep, stcb, NULL);
2521 			} else {
2522 				/*
2523 				 * Ok we must build a SACK since the timer
2524 				 * is pending, we got our first packet OR
2525 				 * there are gaps or duplicates.
2526 				 */
2527 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2528 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2529 			}
2530 		} else {
2531 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2532 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2533 				    stcb->sctp_ep, stcb, NULL);
2534 			}
2535 		}
2536 	}
2537 }
2538 
2539 int
2540 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2541     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2542     struct sctp_nets *net, uint32_t * high_tsn)
2543 {
2544 	struct sctp_chunkhdr *ch, chunk_buf;
2545 	struct sctp_association *asoc;
2546 	int num_chunks = 0;	/* number of control chunks processed */
2547 	int stop_proc = 0;
2548 	int chk_length, break_flag, last_chunk;
2549 	int abort_flag = 0, was_a_gap;
2550 	struct mbuf *m;
2551 	uint32_t highest_tsn;
2552 
2553 	/* set the rwnd */
2554 	sctp_set_rwnd(stcb, &stcb->asoc);
2555 
2556 	m = *mm;
2557 	SCTP_TCB_LOCK_ASSERT(stcb);
2558 	asoc = &stcb->asoc;
2559 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2560 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2561 	} else {
2562 		highest_tsn = asoc->highest_tsn_inside_map;
2563 	}
2564 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2565 	/*
2566 	 * setup where we got the last DATA packet from for any SACK that
2567 	 * may need to go out. Don't bump the net. This is done ONLY when a
2568 	 * chunk is assigned.
2569 	 */
2570 	asoc->last_data_chunk_from = net;
2571 
2572 	/*-
2573 	 * Now before we proceed we must figure out if this is a wasted
2574 	 * cluster... i.e. it is a small packet sent in and yet the driver
2575 	 * underneath allocated a full cluster for it. If so we must copy it
2576 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2577 	 * with cluster starvation. Note for __Panda__ we don't do this
2578 	 * since it has clusters all the way down to 64 bytes.
2579 	 */
2580 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2581 		/* we only handle mbufs that are singletons.. not chains */
2582 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2583 		if (m) {
2584 			/* ok lets see if we can copy the data up */
2585 			caddr_t *from, *to;
2586 
2587 			/* get the pointers and copy */
2588 			to = mtod(m, caddr_t *);
2589 			from = mtod((*mm), caddr_t *);
2590 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2591 			/* copy the length and free up the old */
2592 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2593 			sctp_m_freem(*mm);
2594 			/* success, back copy */
2595 			*mm = m;
2596 		} else {
2597 			/* We are in trouble in the mbuf world .. yikes */
2598 			m = *mm;
2599 		}
2600 	}
2601 	/* get pointer to the first chunk header */
2602 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2603 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2604 	if (ch == NULL) {
2605 		return (1);
2606 	}
2607 	/*
2608 	 * process all DATA chunks...
2609 	 */
2610 	*high_tsn = asoc->cumulative_tsn;
2611 	break_flag = 0;
2612 	asoc->data_pkts_seen++;
2613 	while (stop_proc == 0) {
2614 		/* validate chunk length */
2615 		chk_length = ntohs(ch->chunk_length);
2616 		if (length - *offset < chk_length) {
2617 			/* all done, mutulated chunk */
2618 			stop_proc = 1;
2619 			continue;
2620 		}
2621 		if ((asoc->idata_supported == 1) &&
2622 		    (ch->chunk_type == SCTP_DATA)) {
2623 			struct mbuf *op_err;
2624 			char msg[SCTP_DIAG_INFO_LEN];
2625 
2626 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2627 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2628 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2629 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2630 			return (2);
2631 		}
2632 		if ((asoc->idata_supported == 0) &&
2633 		    (ch->chunk_type == SCTP_IDATA)) {
2634 			struct mbuf *op_err;
2635 			char msg[SCTP_DIAG_INFO_LEN];
2636 
2637 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2638 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2639 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2640 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2641 			return (2);
2642 		}
2643 		if ((ch->chunk_type == SCTP_DATA) ||
2644 		    (ch->chunk_type == SCTP_IDATA)) {
2645 			int clen;
2646 
2647 			if (ch->chunk_type == SCTP_DATA) {
2648 				clen = sizeof(struct sctp_data_chunk);
2649 			} else {
2650 				clen = sizeof(struct sctp_idata_chunk);
2651 			}
2652 			if (chk_length < clen) {
2653 				/*
2654 				 * Need to send an abort since we had a
2655 				 * invalid data chunk.
2656 				 */
2657 				struct mbuf *op_err;
2658 				char msg[SCTP_DIAG_INFO_LEN];
2659 
2660 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2661 				    chk_length);
2662 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2663 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2664 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2665 				return (2);
2666 			}
2667 #ifdef SCTP_AUDITING_ENABLED
2668 			sctp_audit_log(0xB1, 0);
2669 #endif
2670 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2671 				last_chunk = 1;
2672 			} else {
2673 				last_chunk = 0;
2674 			}
2675 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2676 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2677 			    last_chunk, ch->chunk_type)) {
2678 				num_chunks++;
2679 			}
2680 			if (abort_flag)
2681 				return (2);
2682 
2683 			if (break_flag) {
2684 				/*
2685 				 * Set because of out of rwnd space and no
2686 				 * drop rep space left.
2687 				 */
2688 				stop_proc = 1;
2689 				continue;
2690 			}
2691 		} else {
2692 			/* not a data chunk in the data region */
2693 			switch (ch->chunk_type) {
2694 			case SCTP_INITIATION:
2695 			case SCTP_INITIATION_ACK:
2696 			case SCTP_SELECTIVE_ACK:
2697 			case SCTP_NR_SELECTIVE_ACK:
2698 			case SCTP_HEARTBEAT_REQUEST:
2699 			case SCTP_HEARTBEAT_ACK:
2700 			case SCTP_ABORT_ASSOCIATION:
2701 			case SCTP_SHUTDOWN:
2702 			case SCTP_SHUTDOWN_ACK:
2703 			case SCTP_OPERATION_ERROR:
2704 			case SCTP_COOKIE_ECHO:
2705 			case SCTP_COOKIE_ACK:
2706 			case SCTP_ECN_ECHO:
2707 			case SCTP_ECN_CWR:
2708 			case SCTP_SHUTDOWN_COMPLETE:
2709 			case SCTP_AUTHENTICATION:
2710 			case SCTP_ASCONF_ACK:
2711 			case SCTP_PACKET_DROPPED:
2712 			case SCTP_STREAM_RESET:
2713 			case SCTP_FORWARD_CUM_TSN:
2714 			case SCTP_ASCONF:
2715 				{
2716 					/*
2717 					 * Now, what do we do with KNOWN
2718 					 * chunks that are NOT in the right
2719 					 * place?
2720 					 *
2721 					 * For now, I do nothing but ignore
2722 					 * them. We may later want to add
2723 					 * sysctl stuff to switch out and do
2724 					 * either an ABORT() or possibly
2725 					 * process them.
2726 					 */
2727 					struct mbuf *op_err;
2728 					char msg[SCTP_DIAG_INFO_LEN];
2729 
2730 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2731 					    ch->chunk_type);
2732 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2733 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2734 					return (2);
2735 				}
2736 			default:
2737 				/* unknown chunk type, use bit rules */
2738 				if (ch->chunk_type & 0x40) {
2739 					/* Add a error report to the queue */
2740 					struct mbuf *op_err;
2741 					struct sctp_gen_error_cause *cause;
2742 
2743 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2744 					    0, M_NOWAIT, 1, MT_DATA);
2745 					if (op_err != NULL) {
2746 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2747 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2748 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2749 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2750 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2751 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2752 							sctp_queue_op_err(stcb, op_err);
2753 						} else {
2754 							sctp_m_freem(op_err);
2755 						}
2756 					}
2757 				}
2758 				if ((ch->chunk_type & 0x80) == 0) {
2759 					/* discard the rest of this packet */
2760 					stop_proc = 1;
2761 				}	/* else skip this bad chunk and
2762 					 * continue... */
2763 				break;
2764 			}	/* switch of chunk type */
2765 		}
2766 		*offset += SCTP_SIZE32(chk_length);
2767 		if ((*offset >= length) || stop_proc) {
2768 			/* no more data left in the mbuf chain */
2769 			stop_proc = 1;
2770 			continue;
2771 		}
2772 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2773 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2774 		if (ch == NULL) {
2775 			*offset = length;
2776 			stop_proc = 1;
2777 			continue;
2778 		}
2779 	}
2780 	if (break_flag) {
2781 		/*
2782 		 * we need to report rwnd overrun drops.
2783 		 */
2784 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2785 	}
2786 	if (num_chunks) {
2787 		/*
2788 		 * Did we get data, if so update the time for auto-close and
2789 		 * give peer credit for being alive.
2790 		 */
2791 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2792 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2793 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2794 			    stcb->asoc.overall_error_count,
2795 			    0,
2796 			    SCTP_FROM_SCTP_INDATA,
2797 			    __LINE__);
2798 		}
2799 		stcb->asoc.overall_error_count = 0;
2800 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2801 	}
2802 	/* now service all of the reassm queue if needed */
2803 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2804 		/* Assure that we ack right away */
2805 		stcb->asoc.send_sack = 1;
2806 	}
2807 	/* Start a sack timer or QUEUE a SACK for sending */
2808 	sctp_sack_check(stcb, was_a_gap);
2809 	return (0);
2810 }
2811 
2812 static int
2813 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2814     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2815     int *num_frs,
2816     uint32_t * biggest_newly_acked_tsn,
2817     uint32_t * this_sack_lowest_newack,
2818     int *rto_ok)
2819 {
2820 	struct sctp_tmit_chunk *tp1;
2821 	unsigned int theTSN;
2822 	int j, wake_him = 0, circled = 0;
2823 
2824 	/* Recover the tp1 we last saw */
2825 	tp1 = *p_tp1;
2826 	if (tp1 == NULL) {
2827 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2828 	}
2829 	for (j = frag_strt; j <= frag_end; j++) {
2830 		theTSN = j + last_tsn;
2831 		while (tp1) {
2832 			if (tp1->rec.data.doing_fast_retransmit)
2833 				(*num_frs) += 1;
2834 
2835 			/*-
2836 			 * CMT: CUCv2 algorithm. For each TSN being
2837 			 * processed from the sent queue, track the
2838 			 * next expected pseudo-cumack, or
2839 			 * rtx_pseudo_cumack, if required. Separate
2840 			 * cumack trackers for first transmissions,
2841 			 * and retransmissions.
2842 			 */
2843 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2844 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2845 			    (tp1->snd_count == 1)) {
2846 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2847 				tp1->whoTo->find_pseudo_cumack = 0;
2848 			}
2849 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2850 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2851 			    (tp1->snd_count > 1)) {
2852 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2853 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2854 			}
2855 			if (tp1->rec.data.TSN_seq == theTSN) {
2856 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2857 					/*-
2858 					 * must be held until
2859 					 * cum-ack passes
2860 					 */
2861 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2862 						/*-
2863 						 * If it is less than RESEND, it is
2864 						 * now no-longer in flight.
2865 						 * Higher values may already be set
2866 						 * via previous Gap Ack Blocks...
2867 						 * i.e. ACKED or RESEND.
2868 						 */
2869 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2870 						    *biggest_newly_acked_tsn)) {
2871 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2872 						}
2873 						/*-
2874 						 * CMT: SFR algo (and HTNA) - set
2875 						 * saw_newack to 1 for dest being
2876 						 * newly acked. update
2877 						 * this_sack_highest_newack if
2878 						 * appropriate.
2879 						 */
2880 						if (tp1->rec.data.chunk_was_revoked == 0)
2881 							tp1->whoTo->saw_newack = 1;
2882 
2883 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2884 						    tp1->whoTo->this_sack_highest_newack)) {
2885 							tp1->whoTo->this_sack_highest_newack =
2886 							    tp1->rec.data.TSN_seq;
2887 						}
2888 						/*-
2889 						 * CMT DAC algo: also update
2890 						 * this_sack_lowest_newack
2891 						 */
2892 						if (*this_sack_lowest_newack == 0) {
2893 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2894 								sctp_log_sack(*this_sack_lowest_newack,
2895 								    last_tsn,
2896 								    tp1->rec.data.TSN_seq,
2897 								    0,
2898 								    0,
2899 								    SCTP_LOG_TSN_ACKED);
2900 							}
2901 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2902 						}
2903 						/*-
2904 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2905 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2906 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2907 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2908 						 * Separate pseudo_cumack trackers for first transmissions and
2909 						 * retransmissions.
2910 						 */
2911 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2912 							if (tp1->rec.data.chunk_was_revoked == 0) {
2913 								tp1->whoTo->new_pseudo_cumack = 1;
2914 							}
2915 							tp1->whoTo->find_pseudo_cumack = 1;
2916 						}
2917 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2918 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2919 						}
2920 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2921 							if (tp1->rec.data.chunk_was_revoked == 0) {
2922 								tp1->whoTo->new_pseudo_cumack = 1;
2923 							}
2924 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2925 						}
2926 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2927 							sctp_log_sack(*biggest_newly_acked_tsn,
2928 							    last_tsn,
2929 							    tp1->rec.data.TSN_seq,
2930 							    frag_strt,
2931 							    frag_end,
2932 							    SCTP_LOG_TSN_ACKED);
2933 						}
2934 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2935 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2936 							    tp1->whoTo->flight_size,
2937 							    tp1->book_size,
2938 							    (uint32_t) (uintptr_t) tp1->whoTo,
2939 							    tp1->rec.data.TSN_seq);
2940 						}
2941 						sctp_flight_size_decrease(tp1);
2942 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2943 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2944 							    tp1);
2945 						}
2946 						sctp_total_flight_decrease(stcb, tp1);
2947 
2948 						tp1->whoTo->net_ack += tp1->send_size;
2949 						if (tp1->snd_count < 2) {
2950 							/*-
2951 							 * True non-retransmited chunk
2952 							 */
2953 							tp1->whoTo->net_ack2 += tp1->send_size;
2954 
2955 							/*-
2956 							 * update RTO too ?
2957 							 */
2958 							if (tp1->do_rtt) {
2959 								if (*rto_ok) {
2960 									tp1->whoTo->RTO =
2961 									    sctp_calculate_rto(stcb,
2962 									    &stcb->asoc,
2963 									    tp1->whoTo,
2964 									    &tp1->sent_rcv_time,
2965 									    sctp_align_safe_nocopy,
2966 									    SCTP_RTT_FROM_DATA);
2967 									*rto_ok = 0;
2968 								}
2969 								if (tp1->whoTo->rto_needed == 0) {
2970 									tp1->whoTo->rto_needed = 1;
2971 								}
2972 								tp1->do_rtt = 0;
2973 							}
2974 						}
2975 					}
2976 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2977 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2978 						    stcb->asoc.this_sack_highest_gap)) {
2979 							stcb->asoc.this_sack_highest_gap =
2980 							    tp1->rec.data.TSN_seq;
2981 						}
2982 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2983 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2984 #ifdef SCTP_AUDITING_ENABLED
2985 							sctp_audit_log(0xB2,
2986 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2987 #endif
2988 						}
2989 					}
2990 					/*-
2991 					 * All chunks NOT UNSENT fall through here and are marked
2992 					 * (leave PR-SCTP ones that are to skip alone though)
2993 					 */
2994 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2995 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2996 						tp1->sent = SCTP_DATAGRAM_MARKED;
2997 					}
2998 					if (tp1->rec.data.chunk_was_revoked) {
2999 						/* deflate the cwnd */
3000 						tp1->whoTo->cwnd -= tp1->book_size;
3001 						tp1->rec.data.chunk_was_revoked = 0;
3002 					}
3003 					/* NR Sack code here */
3004 					if (nr_sacking &&
3005 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3006 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3007 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3008 #ifdef INVARIANTS
3009 						} else {
3010 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3011 #endif
3012 						}
3013 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3014 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3015 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
3016 							stcb->asoc.trigger_reset = 1;
3017 						}
3018 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3019 						if (tp1->data) {
3020 							/*
3021 							 * sa_ignore
3022 							 * NO_NULL_CHK
3023 							 */
3024 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3025 							sctp_m_freem(tp1->data);
3026 							tp1->data = NULL;
3027 						}
3028 						wake_him++;
3029 					}
3030 				}
3031 				break;
3032 			}	/* if (tp1->TSN_seq == theTSN) */
3033 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3034 				break;
3035 			}
3036 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3037 			if ((tp1 == NULL) && (circled == 0)) {
3038 				circled++;
3039 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3040 			}
3041 		}		/* end while (tp1) */
3042 		if (tp1 == NULL) {
3043 			circled = 0;
3044 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3045 		}
3046 		/* In case the fragments were not in order we must reset */
3047 	}			/* end for (j = fragStart */
3048 	*p_tp1 = tp1;
3049 	return (wake_him);	/* Return value only used for nr-sack */
3050 }
3051 
3052 
3053 static int
3054 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3055     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3056     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3057     int num_seg, int num_nr_seg, int *rto_ok)
3058 {
3059 	struct sctp_gap_ack_block *frag, block;
3060 	struct sctp_tmit_chunk *tp1;
3061 	int i;
3062 	int num_frs = 0;
3063 	int chunk_freed;
3064 	int non_revocable;
3065 	uint16_t frag_strt, frag_end, prev_frag_end;
3066 
3067 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3068 	prev_frag_end = 0;
3069 	chunk_freed = 0;
3070 
3071 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3072 		if (i == num_seg) {
3073 			prev_frag_end = 0;
3074 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3075 		}
3076 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3077 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3078 		*offset += sizeof(block);
3079 		if (frag == NULL) {
3080 			return (chunk_freed);
3081 		}
3082 		frag_strt = ntohs(frag->start);
3083 		frag_end = ntohs(frag->end);
3084 
3085 		if (frag_strt > frag_end) {
3086 			/* This gap report is malformed, skip it. */
3087 			continue;
3088 		}
3089 		if (frag_strt <= prev_frag_end) {
3090 			/* This gap report is not in order, so restart. */
3091 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3092 		}
3093 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3094 			*biggest_tsn_acked = last_tsn + frag_end;
3095 		}
3096 		if (i < num_seg) {
3097 			non_revocable = 0;
3098 		} else {
3099 			non_revocable = 1;
3100 		}
3101 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3102 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3103 		    this_sack_lowest_newack, rto_ok)) {
3104 			chunk_freed = 1;
3105 		}
3106 		prev_frag_end = frag_end;
3107 	}
3108 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3109 		if (num_frs)
3110 			sctp_log_fr(*biggest_tsn_acked,
3111 			    *biggest_newly_acked_tsn,
3112 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3113 	}
3114 	return (chunk_freed);
3115 }
3116 
3117 static void
3118 sctp_check_for_revoked(struct sctp_tcb *stcb,
3119     struct sctp_association *asoc, uint32_t cumack,
3120     uint32_t biggest_tsn_acked)
3121 {
3122 	struct sctp_tmit_chunk *tp1;
3123 
3124 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3125 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3126 			/*
3127 			 * ok this guy is either ACK or MARKED. If it is
3128 			 * ACKED it has been previously acked but not this
3129 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3130 			 * again.
3131 			 */
3132 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3133 				break;
3134 			}
3135 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3136 				/* it has been revoked */
3137 				tp1->sent = SCTP_DATAGRAM_SENT;
3138 				tp1->rec.data.chunk_was_revoked = 1;
3139 				/*
3140 				 * We must add this stuff back in to assure
3141 				 * timers and such get started.
3142 				 */
3143 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3144 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3145 					    tp1->whoTo->flight_size,
3146 					    tp1->book_size,
3147 					    (uint32_t) (uintptr_t) tp1->whoTo,
3148 					    tp1->rec.data.TSN_seq);
3149 				}
3150 				sctp_flight_size_increase(tp1);
3151 				sctp_total_flight_increase(stcb, tp1);
3152 				/*
3153 				 * We inflate the cwnd to compensate for our
3154 				 * artificial inflation of the flight_size.
3155 				 */
3156 				tp1->whoTo->cwnd += tp1->book_size;
3157 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3158 					sctp_log_sack(asoc->last_acked_seq,
3159 					    cumack,
3160 					    tp1->rec.data.TSN_seq,
3161 					    0,
3162 					    0,
3163 					    SCTP_LOG_TSN_REVOKED);
3164 				}
3165 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3166 				/* it has been re-acked in this SACK */
3167 				tp1->sent = SCTP_DATAGRAM_ACKED;
3168 			}
3169 		}
3170 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3171 			break;
3172 	}
3173 }
3174 
3175 
3176 static void
3177 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3178     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3179 {
3180 	struct sctp_tmit_chunk *tp1;
3181 	int strike_flag = 0;
3182 	struct timeval now;
3183 	int tot_retrans = 0;
3184 	uint32_t sending_seq;
3185 	struct sctp_nets *net;
3186 	int num_dests_sacked = 0;
3187 
3188 	/*
3189 	 * select the sending_seq, this is either the next thing ready to be
3190 	 * sent but not transmitted, OR, the next seq we assign.
3191 	 */
3192 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3193 	if (tp1 == NULL) {
3194 		sending_seq = asoc->sending_seq;
3195 	} else {
3196 		sending_seq = tp1->rec.data.TSN_seq;
3197 	}
3198 
3199 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3200 	if ((asoc->sctp_cmt_on_off > 0) &&
3201 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3202 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3203 			if (net->saw_newack)
3204 				num_dests_sacked++;
3205 		}
3206 	}
3207 	if (stcb->asoc.prsctp_supported) {
3208 		(void)SCTP_GETTIME_TIMEVAL(&now);
3209 	}
3210 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3211 		strike_flag = 0;
3212 		if (tp1->no_fr_allowed) {
3213 			/* this one had a timeout or something */
3214 			continue;
3215 		}
3216 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3217 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3218 				sctp_log_fr(biggest_tsn_newly_acked,
3219 				    tp1->rec.data.TSN_seq,
3220 				    tp1->sent,
3221 				    SCTP_FR_LOG_CHECK_STRIKE);
3222 		}
3223 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3224 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3225 			/* done */
3226 			break;
3227 		}
3228 		if (stcb->asoc.prsctp_supported) {
3229 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3230 				/* Is it expired? */
3231 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3232 					/* Yes so drop it */
3233 					if (tp1->data != NULL) {
3234 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3235 						    SCTP_SO_NOT_LOCKED);
3236 					}
3237 					continue;
3238 				}
3239 			}
3240 		}
3241 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3242 			/* we are beyond the tsn in the sack  */
3243 			break;
3244 		}
3245 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3246 			/* either a RESEND, ACKED, or MARKED */
3247 			/* skip */
3248 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3249 				/* Continue strikin FWD-TSN chunks */
3250 				tp1->rec.data.fwd_tsn_cnt++;
3251 			}
3252 			continue;
3253 		}
3254 		/*
3255 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3256 		 */
3257 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3258 			/*
3259 			 * No new acks were receieved for data sent to this
3260 			 * dest. Therefore, according to the SFR algo for
3261 			 * CMT, no data sent to this dest can be marked for
3262 			 * FR using this SACK.
3263 			 */
3264 			continue;
3265 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3266 		    tp1->whoTo->this_sack_highest_newack)) {
3267 			/*
3268 			 * CMT: New acks were receieved for data sent to
3269 			 * this dest. But no new acks were seen for data
3270 			 * sent after tp1. Therefore, according to the SFR
3271 			 * algo for CMT, tp1 cannot be marked for FR using
3272 			 * this SACK. This step covers part of the DAC algo
3273 			 * and the HTNA algo as well.
3274 			 */
3275 			continue;
3276 		}
3277 		/*
3278 		 * Here we check to see if we were have already done a FR
3279 		 * and if so we see if the biggest TSN we saw in the sack is
3280 		 * smaller than the recovery point. If so we don't strike
3281 		 * the tsn... otherwise we CAN strike the TSN.
3282 		 */
3283 		/*
3284 		 * @@@ JRI: Check for CMT if (accum_moved &&
3285 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3286 		 * 0)) {
3287 		 */
3288 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3289 			/*
3290 			 * Strike the TSN if in fast-recovery and cum-ack
3291 			 * moved.
3292 			 */
3293 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 				sctp_log_fr(biggest_tsn_newly_acked,
3295 				    tp1->rec.data.TSN_seq,
3296 				    tp1->sent,
3297 				    SCTP_FR_LOG_STRIKE_CHUNK);
3298 			}
3299 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3300 				tp1->sent++;
3301 			}
3302 			if ((asoc->sctp_cmt_on_off > 0) &&
3303 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3304 				/*
3305 				 * CMT DAC algorithm: If SACK flag is set to
3306 				 * 0, then lowest_newack test will not pass
3307 				 * because it would have been set to the
3308 				 * cumack earlier. If not already to be
3309 				 * rtx'd, If not a mixed sack and if tp1 is
3310 				 * not between two sacked TSNs, then mark by
3311 				 * one more. NOTE that we are marking by one
3312 				 * additional time since the SACK DAC flag
3313 				 * indicates that two packets have been
3314 				 * received after this missing TSN.
3315 				 */
3316 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3317 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3318 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3319 						sctp_log_fr(16 + num_dests_sacked,
3320 						    tp1->rec.data.TSN_seq,
3321 						    tp1->sent,
3322 						    SCTP_FR_LOG_STRIKE_CHUNK);
3323 					}
3324 					tp1->sent++;
3325 				}
3326 			}
3327 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3328 		    (asoc->sctp_cmt_on_off == 0)) {
3329 			/*
3330 			 * For those that have done a FR we must take
3331 			 * special consideration if we strike. I.e the
3332 			 * biggest_newly_acked must be higher than the
3333 			 * sending_seq at the time we did the FR.
3334 			 */
3335 			if (
3336 #ifdef SCTP_FR_TO_ALTERNATE
3337 			/*
3338 			 * If FR's go to new networks, then we must only do
3339 			 * this for singly homed asoc's. However if the FR's
3340 			 * go to the same network (Armando's work) then its
3341 			 * ok to FR multiple times.
3342 			 */
3343 			    (asoc->numnets < 2)
3344 #else
3345 			    (1)
3346 #endif
3347 			    ) {
3348 
3349 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3350 				    tp1->rec.data.fast_retran_tsn)) {
3351 					/*
3352 					 * Strike the TSN, since this ack is
3353 					 * beyond where things were when we
3354 					 * did a FR.
3355 					 */
3356 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 						sctp_log_fr(biggest_tsn_newly_acked,
3358 						    tp1->rec.data.TSN_seq,
3359 						    tp1->sent,
3360 						    SCTP_FR_LOG_STRIKE_CHUNK);
3361 					}
3362 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3363 						tp1->sent++;
3364 					}
3365 					strike_flag = 1;
3366 					if ((asoc->sctp_cmt_on_off > 0) &&
3367 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3368 						/*
3369 						 * CMT DAC algorithm: If
3370 						 * SACK flag is set to 0,
3371 						 * then lowest_newack test
3372 						 * will not pass because it
3373 						 * would have been set to
3374 						 * the cumack earlier. If
3375 						 * not already to be rtx'd,
3376 						 * If not a mixed sack and
3377 						 * if tp1 is not between two
3378 						 * sacked TSNs, then mark by
3379 						 * one more. NOTE that we
3380 						 * are marking by one
3381 						 * additional time since the
3382 						 * SACK DAC flag indicates
3383 						 * that two packets have
3384 						 * been received after this
3385 						 * missing TSN.
3386 						 */
3387 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3388 						    (num_dests_sacked == 1) &&
3389 						    SCTP_TSN_GT(this_sack_lowest_newack,
3390 						    tp1->rec.data.TSN_seq)) {
3391 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3392 								sctp_log_fr(32 + num_dests_sacked,
3393 								    tp1->rec.data.TSN_seq,
3394 								    tp1->sent,
3395 								    SCTP_FR_LOG_STRIKE_CHUNK);
3396 							}
3397 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3398 								tp1->sent++;
3399 							}
3400 						}
3401 					}
3402 				}
3403 			}
3404 			/*
3405 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3406 			 * algo covers HTNA.
3407 			 */
3408 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3409 		    biggest_tsn_newly_acked)) {
3410 			/*
3411 			 * We don't strike these: This is the  HTNA
3412 			 * algorithm i.e. we don't strike If our TSN is
3413 			 * larger than the Highest TSN Newly Acked.
3414 			 */
3415 			;
3416 		} else {
3417 			/* Strike the TSN */
3418 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 				sctp_log_fr(biggest_tsn_newly_acked,
3420 				    tp1->rec.data.TSN_seq,
3421 				    tp1->sent,
3422 				    SCTP_FR_LOG_STRIKE_CHUNK);
3423 			}
3424 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3425 				tp1->sent++;
3426 			}
3427 			if ((asoc->sctp_cmt_on_off > 0) &&
3428 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3429 				/*
3430 				 * CMT DAC algorithm: If SACK flag is set to
3431 				 * 0, then lowest_newack test will not pass
3432 				 * because it would have been set to the
3433 				 * cumack earlier. If not already to be
3434 				 * rtx'd, If not a mixed sack and if tp1 is
3435 				 * not between two sacked TSNs, then mark by
3436 				 * one more. NOTE that we are marking by one
3437 				 * additional time since the SACK DAC flag
3438 				 * indicates that two packets have been
3439 				 * received after this missing TSN.
3440 				 */
3441 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3442 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3443 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3444 						sctp_log_fr(48 + num_dests_sacked,
3445 						    tp1->rec.data.TSN_seq,
3446 						    tp1->sent,
3447 						    SCTP_FR_LOG_STRIKE_CHUNK);
3448 					}
3449 					tp1->sent++;
3450 				}
3451 			}
3452 		}
3453 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3454 			struct sctp_nets *alt;
3455 
3456 			/* fix counts and things */
3457 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3458 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3459 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3460 				    tp1->book_size,
3461 				    (uint32_t) (uintptr_t) tp1->whoTo,
3462 				    tp1->rec.data.TSN_seq);
3463 			}
3464 			if (tp1->whoTo) {
3465 				tp1->whoTo->net_ack++;
3466 				sctp_flight_size_decrease(tp1);
3467 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3468 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3469 					    tp1);
3470 				}
3471 			}
3472 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3473 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3474 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3475 			}
3476 			/* add back to the rwnd */
3477 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3478 
3479 			/* remove from the total flight */
3480 			sctp_total_flight_decrease(stcb, tp1);
3481 
3482 			if ((stcb->asoc.prsctp_supported) &&
3483 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3484 				/*
3485 				 * Has it been retransmitted tv_sec times? -
3486 				 * we store the retran count there.
3487 				 */
3488 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3489 					/* Yes, so drop it */
3490 					if (tp1->data != NULL) {
3491 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3492 						    SCTP_SO_NOT_LOCKED);
3493 					}
3494 					/* Make sure to flag we had a FR */
3495 					tp1->whoTo->net_ack++;
3496 					continue;
3497 				}
3498 			}
3499 			/*
3500 			 * SCTP_PRINTF("OK, we are now ready to FR this
3501 			 * guy\n");
3502 			 */
3503 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3504 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3505 				    0, SCTP_FR_MARKED);
3506 			}
3507 			if (strike_flag) {
3508 				/* This is a subsequent FR */
3509 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3510 			}
3511 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3512 			if (asoc->sctp_cmt_on_off > 0) {
3513 				/*
3514 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3515 				 * If CMT is being used, then pick dest with
3516 				 * largest ssthresh for any retransmission.
3517 				 */
3518 				tp1->no_fr_allowed = 1;
3519 				alt = tp1->whoTo;
3520 				/* sa_ignore NO_NULL_CHK */
3521 				if (asoc->sctp_cmt_pf > 0) {
3522 					/*
3523 					 * JRS 5/18/07 - If CMT PF is on,
3524 					 * use the PF version of
3525 					 * find_alt_net()
3526 					 */
3527 					alt = sctp_find_alternate_net(stcb, alt, 2);
3528 				} else {
3529 					/*
3530 					 * JRS 5/18/07 - If only CMT is on,
3531 					 * use the CMT version of
3532 					 * find_alt_net()
3533 					 */
3534 					/* sa_ignore NO_NULL_CHK */
3535 					alt = sctp_find_alternate_net(stcb, alt, 1);
3536 				}
3537 				if (alt == NULL) {
3538 					alt = tp1->whoTo;
3539 				}
3540 				/*
3541 				 * CUCv2: If a different dest is picked for
3542 				 * the retransmission, then new
3543 				 * (rtx-)pseudo_cumack needs to be tracked
3544 				 * for orig dest. Let CUCv2 track new (rtx-)
3545 				 * pseudo-cumack always.
3546 				 */
3547 				if (tp1->whoTo) {
3548 					tp1->whoTo->find_pseudo_cumack = 1;
3549 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3550 				}
3551 			} else {/* CMT is OFF */
3552 
3553 #ifdef SCTP_FR_TO_ALTERNATE
3554 				/* Can we find an alternate? */
3555 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3556 #else
3557 				/*
3558 				 * default behavior is to NOT retransmit
3559 				 * FR's to an alternate. Armando Caro's
3560 				 * paper details why.
3561 				 */
3562 				alt = tp1->whoTo;
3563 #endif
3564 			}
3565 
3566 			tp1->rec.data.doing_fast_retransmit = 1;
3567 			tot_retrans++;
3568 			/* mark the sending seq for possible subsequent FR's */
3569 			/*
3570 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3571 			 * (uint32_t)tpi->rec.data.TSN_seq);
3572 			 */
3573 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3574 				/*
3575 				 * If the queue of send is empty then its
3576 				 * the next sequence number that will be
3577 				 * assigned so we subtract one from this to
3578 				 * get the one we last sent.
3579 				 */
3580 				tp1->rec.data.fast_retran_tsn = sending_seq;
3581 			} else {
3582 				/*
3583 				 * If there are chunks on the send queue
3584 				 * (unsent data that has made it from the
3585 				 * stream queues but not out the door, we
3586 				 * take the first one (which will have the
3587 				 * lowest TSN) and subtract one to get the
3588 				 * one we last sent.
3589 				 */
3590 				struct sctp_tmit_chunk *ttt;
3591 
3592 				ttt = TAILQ_FIRST(&asoc->send_queue);
3593 				tp1->rec.data.fast_retran_tsn =
3594 				    ttt->rec.data.TSN_seq;
3595 			}
3596 
3597 			if (tp1->do_rtt) {
3598 				/*
3599 				 * this guy had a RTO calculation pending on
3600 				 * it, cancel it
3601 				 */
3602 				if ((tp1->whoTo != NULL) &&
3603 				    (tp1->whoTo->rto_needed == 0)) {
3604 					tp1->whoTo->rto_needed = 1;
3605 				}
3606 				tp1->do_rtt = 0;
3607 			}
3608 			if (alt != tp1->whoTo) {
3609 				/* yes, there is an alternate. */
3610 				sctp_free_remote_addr(tp1->whoTo);
3611 				/* sa_ignore FREED_MEMORY */
3612 				tp1->whoTo = alt;
3613 				atomic_add_int(&alt->ref_count, 1);
3614 			}
3615 		}
3616 	}
3617 }
3618 
3619 struct sctp_tmit_chunk *
3620 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3621     struct sctp_association *asoc)
3622 {
3623 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3624 	struct timeval now;
3625 	int now_filled = 0;
3626 
3627 	if (asoc->prsctp_supported == 0) {
3628 		return (NULL);
3629 	}
3630 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3631 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3632 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3633 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3634 			/* no chance to advance, out of here */
3635 			break;
3636 		}
3637 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3638 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3639 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3640 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3641 				    asoc->advanced_peer_ack_point,
3642 				    tp1->rec.data.TSN_seq, 0, 0);
3643 			}
3644 		}
3645 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3646 			/*
3647 			 * We can't fwd-tsn past any that are reliable aka
3648 			 * retransmitted until the asoc fails.
3649 			 */
3650 			break;
3651 		}
3652 		if (!now_filled) {
3653 			(void)SCTP_GETTIME_TIMEVAL(&now);
3654 			now_filled = 1;
3655 		}
3656 		/*
3657 		 * now we got a chunk which is marked for another
3658 		 * retransmission to a PR-stream but has run out its chances
3659 		 * already maybe OR has been marked to skip now. Can we skip
3660 		 * it if its a resend?
3661 		 */
3662 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3663 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3664 			/*
3665 			 * Now is this one marked for resend and its time is
3666 			 * now up?
3667 			 */
3668 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3669 				/* Yes so drop it */
3670 				if (tp1->data) {
3671 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3672 					    1, SCTP_SO_NOT_LOCKED);
3673 				}
3674 			} else {
3675 				/*
3676 				 * No, we are done when hit one for resend
3677 				 * whos time as not expired.
3678 				 */
3679 				break;
3680 			}
3681 		}
3682 		/*
3683 		 * Ok now if this chunk is marked to drop it we can clean up
3684 		 * the chunk, advance our peer ack point and we can check
3685 		 * the next chunk.
3686 		 */
3687 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3688 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3689 			/* advance PeerAckPoint goes forward */
3690 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3691 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3692 				a_adv = tp1;
3693 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3694 				/* No update but we do save the chk */
3695 				a_adv = tp1;
3696 			}
3697 		} else {
3698 			/*
3699 			 * If it is still in RESEND we can advance no
3700 			 * further
3701 			 */
3702 			break;
3703 		}
3704 	}
3705 	return (a_adv);
3706 }
3707 
3708 static int
3709 sctp_fs_audit(struct sctp_association *asoc)
3710 {
3711 	struct sctp_tmit_chunk *chk;
3712 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3713 	int ret;
3714 
3715 #ifndef INVARIANTS
3716 	int entry_flight, entry_cnt;
3717 
3718 #endif
3719 
3720 	ret = 0;
3721 #ifndef INVARIANTS
3722 	entry_flight = asoc->total_flight;
3723 	entry_cnt = asoc->total_flight_count;
3724 #endif
3725 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3726 		return (0);
3727 
3728 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3729 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3730 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3731 			    chk->rec.data.TSN_seq,
3732 			    chk->send_size,
3733 			    chk->snd_count);
3734 			inflight++;
3735 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3736 			resend++;
3737 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3738 			inbetween++;
3739 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3740 			above++;
3741 		} else {
3742 			acked++;
3743 		}
3744 	}
3745 
3746 	if ((inflight > 0) || (inbetween > 0)) {
3747 #ifdef INVARIANTS
3748 		panic("Flight size-express incorrect? \n");
3749 #else
3750 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3751 		    entry_flight, entry_cnt);
3752 
3753 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3754 		    inflight, inbetween, resend, above, acked);
3755 		ret = 1;
3756 #endif
3757 	}
3758 	return (ret);
3759 }
3760 
3761 
3762 static void
3763 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3764     struct sctp_association *asoc,
3765     struct sctp_tmit_chunk *tp1)
3766 {
3767 	tp1->window_probe = 0;
3768 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3769 		/* TSN's skipped we do NOT move back. */
3770 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3771 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3772 		    tp1->book_size,
3773 		    (uint32_t) (uintptr_t) tp1->whoTo,
3774 		    tp1->rec.data.TSN_seq);
3775 		return;
3776 	}
3777 	/* First setup this by shrinking flight */
3778 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3779 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3780 		    tp1);
3781 	}
3782 	sctp_flight_size_decrease(tp1);
3783 	sctp_total_flight_decrease(stcb, tp1);
3784 	/* Now mark for resend */
3785 	tp1->sent = SCTP_DATAGRAM_RESEND;
3786 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3787 
3788 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3789 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3790 		    tp1->whoTo->flight_size,
3791 		    tp1->book_size,
3792 		    (uint32_t) (uintptr_t) tp1->whoTo,
3793 		    tp1->rec.data.TSN_seq);
3794 	}
3795 }
3796 
3797 void
3798 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3799     uint32_t rwnd, int *abort_now, int ecne_seen)
3800 {
3801 	struct sctp_nets *net;
3802 	struct sctp_association *asoc;
3803 	struct sctp_tmit_chunk *tp1, *tp2;
3804 	uint32_t old_rwnd;
3805 	int win_probe_recovery = 0;
3806 	int win_probe_recovered = 0;
3807 	int j, done_once = 0;
3808 	int rto_ok = 1;
3809 	uint32_t send_s;
3810 
3811 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3812 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3813 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3814 	}
3815 	SCTP_TCB_LOCK_ASSERT(stcb);
3816 #ifdef SCTP_ASOCLOG_OF_TSNS
3817 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3818 	stcb->asoc.cumack_log_at++;
3819 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3820 		stcb->asoc.cumack_log_at = 0;
3821 	}
3822 #endif
3823 	asoc = &stcb->asoc;
3824 	old_rwnd = asoc->peers_rwnd;
3825 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3826 		/* old ack */
3827 		return;
3828 	} else if (asoc->last_acked_seq == cumack) {
3829 		/* Window update sack */
3830 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3831 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3832 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3833 			/* SWS sender side engages */
3834 			asoc->peers_rwnd = 0;
3835 		}
3836 		if (asoc->peers_rwnd > old_rwnd) {
3837 			goto again;
3838 		}
3839 		return;
3840 	}
3841 	/* First setup for CC stuff */
3842 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3843 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3844 			/* Drag along the window_tsn for cwr's */
3845 			net->cwr_window_tsn = cumack;
3846 		}
3847 		net->prev_cwnd = net->cwnd;
3848 		net->net_ack = 0;
3849 		net->net_ack2 = 0;
3850 
3851 		/*
3852 		 * CMT: Reset CUC and Fast recovery algo variables before
3853 		 * SACK processing
3854 		 */
3855 		net->new_pseudo_cumack = 0;
3856 		net->will_exit_fast_recovery = 0;
3857 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3858 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3859 		}
3860 	}
3861 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3862 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3863 		    sctpchunk_listhead);
3864 		send_s = tp1->rec.data.TSN_seq + 1;
3865 	} else {
3866 		send_s = asoc->sending_seq;
3867 	}
3868 	if (SCTP_TSN_GE(cumack, send_s)) {
3869 		struct mbuf *op_err;
3870 		char msg[SCTP_DIAG_INFO_LEN];
3871 
3872 		*abort_now = 1;
3873 		/* XXX */
3874 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3875 		    cumack, send_s);
3876 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3877 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3878 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3879 		return;
3880 	}
3881 	asoc->this_sack_highest_gap = cumack;
3882 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3883 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3884 		    stcb->asoc.overall_error_count,
3885 		    0,
3886 		    SCTP_FROM_SCTP_INDATA,
3887 		    __LINE__);
3888 	}
3889 	stcb->asoc.overall_error_count = 0;
3890 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3891 		/* process the new consecutive TSN first */
3892 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3893 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3894 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3895 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3896 				}
3897 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3898 					/*
3899 					 * If it is less than ACKED, it is
3900 					 * now no-longer in flight. Higher
3901 					 * values may occur during marking
3902 					 */
3903 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3904 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3905 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3906 							    tp1->whoTo->flight_size,
3907 							    tp1->book_size,
3908 							    (uint32_t) (uintptr_t) tp1->whoTo,
3909 							    tp1->rec.data.TSN_seq);
3910 						}
3911 						sctp_flight_size_decrease(tp1);
3912 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3913 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3914 							    tp1);
3915 						}
3916 						/* sa_ignore NO_NULL_CHK */
3917 						sctp_total_flight_decrease(stcb, tp1);
3918 					}
3919 					tp1->whoTo->net_ack += tp1->send_size;
3920 					if (tp1->snd_count < 2) {
3921 						/*
3922 						 * True non-retransmited
3923 						 * chunk
3924 						 */
3925 						tp1->whoTo->net_ack2 +=
3926 						    tp1->send_size;
3927 
3928 						/* update RTO too? */
3929 						if (tp1->do_rtt) {
3930 							if (rto_ok) {
3931 								tp1->whoTo->RTO =
3932 								/*
3933 								 * sa_ignore
3934 								 * NO_NULL_CH
3935 								 * K
3936 								 */
3937 								    sctp_calculate_rto(stcb,
3938 								    asoc, tp1->whoTo,
3939 								    &tp1->sent_rcv_time,
3940 								    sctp_align_safe_nocopy,
3941 								    SCTP_RTT_FROM_DATA);
3942 								rto_ok = 0;
3943 							}
3944 							if (tp1->whoTo->rto_needed == 0) {
3945 								tp1->whoTo->rto_needed = 1;
3946 							}
3947 							tp1->do_rtt = 0;
3948 						}
3949 					}
3950 					/*
3951 					 * CMT: CUCv2 algorithm. From the
3952 					 * cumack'd TSNs, for each TSN being
3953 					 * acked for the first time, set the
3954 					 * following variables for the
3955 					 * corresp destination.
3956 					 * new_pseudo_cumack will trigger a
3957 					 * cwnd update.
3958 					 * find_(rtx_)pseudo_cumack will
3959 					 * trigger search for the next
3960 					 * expected (rtx-)pseudo-cumack.
3961 					 */
3962 					tp1->whoTo->new_pseudo_cumack = 1;
3963 					tp1->whoTo->find_pseudo_cumack = 1;
3964 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3965 
3966 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3967 						/* sa_ignore NO_NULL_CHK */
3968 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3969 					}
3970 				}
3971 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3972 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3973 				}
3974 				if (tp1->rec.data.chunk_was_revoked) {
3975 					/* deflate the cwnd */
3976 					tp1->whoTo->cwnd -= tp1->book_size;
3977 					tp1->rec.data.chunk_was_revoked = 0;
3978 				}
3979 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3980 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3981 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3982 #ifdef INVARIANTS
3983 					} else {
3984 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3985 #endif
3986 					}
3987 				}
3988 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3989 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3990 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3991 					asoc->trigger_reset = 1;
3992 				}
3993 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3994 				if (tp1->data) {
3995 					/* sa_ignore NO_NULL_CHK */
3996 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3997 					sctp_m_freem(tp1->data);
3998 					tp1->data = NULL;
3999 				}
4000 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4001 					sctp_log_sack(asoc->last_acked_seq,
4002 					    cumack,
4003 					    tp1->rec.data.TSN_seq,
4004 					    0,
4005 					    0,
4006 					    SCTP_LOG_FREE_SENT);
4007 				}
4008 				asoc->sent_queue_cnt--;
4009 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4010 			} else {
4011 				break;
4012 			}
4013 		}
4014 
4015 	}
4016 	/* sa_ignore NO_NULL_CHK */
4017 	if (stcb->sctp_socket) {
4018 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4019 		struct socket *so;
4020 
4021 #endif
4022 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4023 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4024 			/* sa_ignore NO_NULL_CHK */
4025 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4026 		}
4027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 		so = SCTP_INP_SO(stcb->sctp_ep);
4029 		atomic_add_int(&stcb->asoc.refcnt, 1);
4030 		SCTP_TCB_UNLOCK(stcb);
4031 		SCTP_SOCKET_LOCK(so, 1);
4032 		SCTP_TCB_LOCK(stcb);
4033 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4034 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4035 			/* assoc was freed while we were unlocked */
4036 			SCTP_SOCKET_UNLOCK(so, 1);
4037 			return;
4038 		}
4039 #endif
4040 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4041 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4042 		SCTP_SOCKET_UNLOCK(so, 1);
4043 #endif
4044 	} else {
4045 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4046 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4047 		}
4048 	}
4049 
4050 	/* JRS - Use the congestion control given in the CC module */
4051 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4052 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4053 			if (net->net_ack2 > 0) {
4054 				/*
4055 				 * Karn's rule applies to clearing error
4056 				 * count, this is optional.
4057 				 */
4058 				net->error_count = 0;
4059 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4060 					/* addr came good */
4061 					net->dest_state |= SCTP_ADDR_REACHABLE;
4062 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4063 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4064 				}
4065 				if (net == stcb->asoc.primary_destination) {
4066 					if (stcb->asoc.alternate) {
4067 						/*
4068 						 * release the alternate,
4069 						 * primary is good
4070 						 */
4071 						sctp_free_remote_addr(stcb->asoc.alternate);
4072 						stcb->asoc.alternate = NULL;
4073 					}
4074 				}
4075 				if (net->dest_state & SCTP_ADDR_PF) {
4076 					net->dest_state &= ~SCTP_ADDR_PF;
4077 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4078 					    stcb->sctp_ep, stcb, net,
4079 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4080 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4081 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4082 					/* Done with this net */
4083 					net->net_ack = 0;
4084 				}
4085 				/* restore any doubled timers */
4086 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4087 				if (net->RTO < stcb->asoc.minrto) {
4088 					net->RTO = stcb->asoc.minrto;
4089 				}
4090 				if (net->RTO > stcb->asoc.maxrto) {
4091 					net->RTO = stcb->asoc.maxrto;
4092 				}
4093 			}
4094 		}
4095 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4096 	}
4097 	asoc->last_acked_seq = cumack;
4098 
4099 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4100 		/* nothing left in-flight */
4101 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4102 			net->flight_size = 0;
4103 			net->partial_bytes_acked = 0;
4104 		}
4105 		asoc->total_flight = 0;
4106 		asoc->total_flight_count = 0;
4107 	}
4108 	/* RWND update */
4109 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4110 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4111 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4112 		/* SWS sender side engages */
4113 		asoc->peers_rwnd = 0;
4114 	}
4115 	if (asoc->peers_rwnd > old_rwnd) {
4116 		win_probe_recovery = 1;
4117 	}
4118 	/* Now assure a timer where data is queued at */
4119 again:
4120 	j = 0;
4121 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4122 		int to_ticks;
4123 
4124 		if (win_probe_recovery && (net->window_probe)) {
4125 			win_probe_recovered = 1;
4126 			/*
4127 			 * Find first chunk that was used with window probe
4128 			 * and clear the sent
4129 			 */
4130 			/* sa_ignore FREED_MEMORY */
4131 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4132 				if (tp1->window_probe) {
4133 					/* move back to data send queue */
4134 					sctp_window_probe_recovery(stcb, asoc, tp1);
4135 					break;
4136 				}
4137 			}
4138 		}
4139 		if (net->RTO == 0) {
4140 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4141 		} else {
4142 			to_ticks = MSEC_TO_TICKS(net->RTO);
4143 		}
4144 		if (net->flight_size) {
4145 			j++;
4146 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4147 			    sctp_timeout_handler, &net->rxt_timer);
4148 			if (net->window_probe) {
4149 				net->window_probe = 0;
4150 			}
4151 		} else {
4152 			if (net->window_probe) {
4153 				/*
4154 				 * In window probes we must assure a timer
4155 				 * is still running there
4156 				 */
4157 				net->window_probe = 0;
4158 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4159 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4160 					    sctp_timeout_handler, &net->rxt_timer);
4161 				}
4162 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4163 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4164 				    stcb, net,
4165 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4166 			}
4167 		}
4168 	}
4169 	if ((j == 0) &&
4170 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4171 	    (asoc->sent_queue_retran_cnt == 0) &&
4172 	    (win_probe_recovered == 0) &&
4173 	    (done_once == 0)) {
4174 		/*
4175 		 * huh, this should not happen unless all packets are
4176 		 * PR-SCTP and marked to skip of course.
4177 		 */
4178 		if (sctp_fs_audit(asoc)) {
4179 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4180 				net->flight_size = 0;
4181 			}
4182 			asoc->total_flight = 0;
4183 			asoc->total_flight_count = 0;
4184 			asoc->sent_queue_retran_cnt = 0;
4185 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4186 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4187 					sctp_flight_size_increase(tp1);
4188 					sctp_total_flight_increase(stcb, tp1);
4189 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4190 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4191 				}
4192 			}
4193 		}
4194 		done_once = 1;
4195 		goto again;
4196 	}
4197 	/**********************************/
4198 	/* Now what about shutdown issues */
4199 	/**********************************/
4200 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4201 		/* nothing left on sendqueue.. consider done */
4202 		/* clean up */
4203 		if ((asoc->stream_queue_cnt == 1) &&
4204 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4205 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4206 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4207 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4208 		}
4209 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4210 		    (asoc->stream_queue_cnt == 0)) {
4211 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4212 				/* Need to abort here */
4213 				struct mbuf *op_err;
4214 
4215 		abort_out_now:
4216 				*abort_now = 1;
4217 				/* XXX */
4218 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4219 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4220 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4221 				return;
4222 			} else {
4223 				struct sctp_nets *netp;
4224 
4225 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4226 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4227 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4228 				}
4229 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4230 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4231 				sctp_stop_timers_for_shutdown(stcb);
4232 				if (asoc->alternate) {
4233 					netp = asoc->alternate;
4234 				} else {
4235 					netp = asoc->primary_destination;
4236 				}
4237 				sctp_send_shutdown(stcb, netp);
4238 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4239 				    stcb->sctp_ep, stcb, netp);
4240 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4241 				    stcb->sctp_ep, stcb, netp);
4242 			}
4243 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4244 		    (asoc->stream_queue_cnt == 0)) {
4245 			struct sctp_nets *netp;
4246 
4247 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4248 				goto abort_out_now;
4249 			}
4250 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4251 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4252 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4253 			sctp_stop_timers_for_shutdown(stcb);
4254 			if (asoc->alternate) {
4255 				netp = asoc->alternate;
4256 			} else {
4257 				netp = asoc->primary_destination;
4258 			}
4259 			sctp_send_shutdown_ack(stcb, netp);
4260 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4261 			    stcb->sctp_ep, stcb, netp);
4262 		}
4263 	}
4264 	/*********************************************/
4265 	/* Here we perform PR-SCTP procedures        */
4266 	/* (section 4.2)                             */
4267 	/*********************************************/
4268 	/* C1. update advancedPeerAckPoint */
4269 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4270 		asoc->advanced_peer_ack_point = cumack;
4271 	}
4272 	/* PR-Sctp issues need to be addressed too */
4273 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4274 		struct sctp_tmit_chunk *lchk;
4275 		uint32_t old_adv_peer_ack_point;
4276 
4277 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4278 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4279 		/* C3. See if we need to send a Fwd-TSN */
4280 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4281 			/*
4282 			 * ISSUE with ECN, see FWD-TSN processing.
4283 			 */
4284 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4285 				send_forward_tsn(stcb, asoc);
4286 			} else if (lchk) {
4287 				/* try to FR fwd-tsn's that get lost too */
4288 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4289 					send_forward_tsn(stcb, asoc);
4290 				}
4291 			}
4292 		}
4293 		if (lchk) {
4294 			/* Assure a timer is up */
4295 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4296 			    stcb->sctp_ep, stcb, lchk->whoTo);
4297 		}
4298 	}
4299 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4300 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4301 		    rwnd,
4302 		    stcb->asoc.peers_rwnd,
4303 		    stcb->asoc.total_flight,
4304 		    stcb->asoc.total_output_queue_size);
4305 	}
4306 }
4307 
4308 void
4309 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4310     struct sctp_tcb *stcb,
4311     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4312     int *abort_now, uint8_t flags,
4313     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4314 {
4315 	struct sctp_association *asoc;
4316 	struct sctp_tmit_chunk *tp1, *tp2;
4317 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4318 	uint16_t wake_him = 0;
4319 	uint32_t send_s = 0;
4320 	long j;
4321 	int accum_moved = 0;
4322 	int will_exit_fast_recovery = 0;
4323 	uint32_t a_rwnd, old_rwnd;
4324 	int win_probe_recovery = 0;
4325 	int win_probe_recovered = 0;
4326 	struct sctp_nets *net = NULL;
4327 	int done_once;
4328 	int rto_ok = 1;
4329 	uint8_t reneged_all = 0;
4330 	uint8_t cmt_dac_flag;
4331 
4332 	/*
4333 	 * we take any chance we can to service our queues since we cannot
4334 	 * get awoken when the socket is read from :<
4335 	 */
4336 	/*
4337 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4338 	 * old sack, if so discard. 2) If there is nothing left in the send
4339 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4340 	 * too, update any rwnd change and verify no timers are running.
4341 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4342 	 * moved process these first and note that it moved. 4) Process any
4343 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4344 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4345 	 * sync up flightsizes and things, stop all timers and also check
4346 	 * for shutdown_pending state. If so then go ahead and send off the
4347 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4348 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4349 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4350 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4351 	 * if in shutdown_recv state.
4352 	 */
4353 	SCTP_TCB_LOCK_ASSERT(stcb);
4354 	/* CMT DAC algo */
4355 	this_sack_lowest_newack = 0;
4356 	SCTP_STAT_INCR(sctps_slowpath_sack);
4357 	last_tsn = cum_ack;
4358 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4359 #ifdef SCTP_ASOCLOG_OF_TSNS
4360 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4361 	stcb->asoc.cumack_log_at++;
4362 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4363 		stcb->asoc.cumack_log_at = 0;
4364 	}
4365 #endif
4366 	a_rwnd = rwnd;
4367 
4368 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4369 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4370 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4371 	}
4372 	old_rwnd = stcb->asoc.peers_rwnd;
4373 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4374 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4375 		    stcb->asoc.overall_error_count,
4376 		    0,
4377 		    SCTP_FROM_SCTP_INDATA,
4378 		    __LINE__);
4379 	}
4380 	stcb->asoc.overall_error_count = 0;
4381 	asoc = &stcb->asoc;
4382 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4383 		sctp_log_sack(asoc->last_acked_seq,
4384 		    cum_ack,
4385 		    0,
4386 		    num_seg,
4387 		    num_dup,
4388 		    SCTP_LOG_NEW_SACK);
4389 	}
4390 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4391 		uint16_t i;
4392 		uint32_t *dupdata, dblock;
4393 
4394 		for (i = 0; i < num_dup; i++) {
4395 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4396 			    sizeof(uint32_t), (uint8_t *) & dblock);
4397 			if (dupdata == NULL) {
4398 				break;
4399 			}
4400 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4401 		}
4402 	}
4403 	/* reality check */
4404 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4405 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4406 		    sctpchunk_listhead);
4407 		send_s = tp1->rec.data.TSN_seq + 1;
4408 	} else {
4409 		tp1 = NULL;
4410 		send_s = asoc->sending_seq;
4411 	}
4412 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4413 		struct mbuf *op_err;
4414 		char msg[SCTP_DIAG_INFO_LEN];
4415 
4416 		/*
4417 		 * no way, we have not even sent this TSN out yet. Peer is
4418 		 * hopelessly messed up with us.
4419 		 */
4420 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4421 		    cum_ack, send_s);
4422 		if (tp1) {
4423 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4424 			    tp1->rec.data.TSN_seq, (void *)tp1);
4425 		}
4426 hopeless_peer:
4427 		*abort_now = 1;
4428 		/* XXX */
4429 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4430 		    cum_ack, send_s);
4431 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4432 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4433 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4434 		return;
4435 	}
4436 	/**********************/
4437 	/* 1) check the range */
4438 	/**********************/
4439 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4440 		/* acking something behind */
4441 		return;
4442 	}
4443 	/* update the Rwnd of the peer */
4444 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4445 	    TAILQ_EMPTY(&asoc->send_queue) &&
4446 	    (asoc->stream_queue_cnt == 0)) {
4447 		/* nothing left on send/sent and strmq */
4448 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4449 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4450 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4451 		}
4452 		asoc->peers_rwnd = a_rwnd;
4453 		if (asoc->sent_queue_retran_cnt) {
4454 			asoc->sent_queue_retran_cnt = 0;
4455 		}
4456 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4457 			/* SWS sender side engages */
4458 			asoc->peers_rwnd = 0;
4459 		}
4460 		/* stop any timers */
4461 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4462 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4463 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4464 			net->partial_bytes_acked = 0;
4465 			net->flight_size = 0;
4466 		}
4467 		asoc->total_flight = 0;
4468 		asoc->total_flight_count = 0;
4469 		return;
4470 	}
4471 	/*
4472 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4473 	 * things. The total byte count acked is tracked in netAckSz AND
4474 	 * netAck2 is used to track the total bytes acked that are un-
4475 	 * amibguious and were never retransmitted. We track these on a per
4476 	 * destination address basis.
4477 	 */
4478 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4479 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4480 			/* Drag along the window_tsn for cwr's */
4481 			net->cwr_window_tsn = cum_ack;
4482 		}
4483 		net->prev_cwnd = net->cwnd;
4484 		net->net_ack = 0;
4485 		net->net_ack2 = 0;
4486 
4487 		/*
4488 		 * CMT: Reset CUC and Fast recovery algo variables before
4489 		 * SACK processing
4490 		 */
4491 		net->new_pseudo_cumack = 0;
4492 		net->will_exit_fast_recovery = 0;
4493 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4494 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4495 		}
4496 	}
4497 	/* process the new consecutive TSN first */
4498 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4499 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4500 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4501 				accum_moved = 1;
4502 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4503 					/*
4504 					 * If it is less than ACKED, it is
4505 					 * now no-longer in flight. Higher
4506 					 * values may occur during marking
4507 					 */
4508 					if ((tp1->whoTo->dest_state &
4509 					    SCTP_ADDR_UNCONFIRMED) &&
4510 					    (tp1->snd_count < 2)) {
4511 						/*
4512 						 * If there was no retran
4513 						 * and the address is
4514 						 * un-confirmed and we sent
4515 						 * there and are now
4516 						 * sacked.. its confirmed,
4517 						 * mark it so.
4518 						 */
4519 						tp1->whoTo->dest_state &=
4520 						    ~SCTP_ADDR_UNCONFIRMED;
4521 					}
4522 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4523 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4524 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4525 							    tp1->whoTo->flight_size,
4526 							    tp1->book_size,
4527 							    (uint32_t) (uintptr_t) tp1->whoTo,
4528 							    tp1->rec.data.TSN_seq);
4529 						}
4530 						sctp_flight_size_decrease(tp1);
4531 						sctp_total_flight_decrease(stcb, tp1);
4532 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4533 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4534 							    tp1);
4535 						}
4536 					}
4537 					tp1->whoTo->net_ack += tp1->send_size;
4538 
4539 					/* CMT SFR and DAC algos */
4540 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4541 					tp1->whoTo->saw_newack = 1;
4542 
4543 					if (tp1->snd_count < 2) {
4544 						/*
4545 						 * True non-retransmited
4546 						 * chunk
4547 						 */
4548 						tp1->whoTo->net_ack2 +=
4549 						    tp1->send_size;
4550 
4551 						/* update RTO too? */
4552 						if (tp1->do_rtt) {
4553 							if (rto_ok) {
4554 								tp1->whoTo->RTO =
4555 								    sctp_calculate_rto(stcb,
4556 								    asoc, tp1->whoTo,
4557 								    &tp1->sent_rcv_time,
4558 								    sctp_align_safe_nocopy,
4559 								    SCTP_RTT_FROM_DATA);
4560 								rto_ok = 0;
4561 							}
4562 							if (tp1->whoTo->rto_needed == 0) {
4563 								tp1->whoTo->rto_needed = 1;
4564 							}
4565 							tp1->do_rtt = 0;
4566 						}
4567 					}
4568 					/*
4569 					 * CMT: CUCv2 algorithm. From the
4570 					 * cumack'd TSNs, for each TSN being
4571 					 * acked for the first time, set the
4572 					 * following variables for the
4573 					 * corresp destination.
4574 					 * new_pseudo_cumack will trigger a
4575 					 * cwnd update.
4576 					 * find_(rtx_)pseudo_cumack will
4577 					 * trigger search for the next
4578 					 * expected (rtx-)pseudo-cumack.
4579 					 */
4580 					tp1->whoTo->new_pseudo_cumack = 1;
4581 					tp1->whoTo->find_pseudo_cumack = 1;
4582 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4583 
4584 
4585 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4586 						sctp_log_sack(asoc->last_acked_seq,
4587 						    cum_ack,
4588 						    tp1->rec.data.TSN_seq,
4589 						    0,
4590 						    0,
4591 						    SCTP_LOG_TSN_ACKED);
4592 					}
4593 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4594 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4595 					}
4596 				}
4597 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4598 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4599 #ifdef SCTP_AUDITING_ENABLED
4600 					sctp_audit_log(0xB3,
4601 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4602 #endif
4603 				}
4604 				if (tp1->rec.data.chunk_was_revoked) {
4605 					/* deflate the cwnd */
4606 					tp1->whoTo->cwnd -= tp1->book_size;
4607 					tp1->rec.data.chunk_was_revoked = 0;
4608 				}
4609 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4610 					tp1->sent = SCTP_DATAGRAM_ACKED;
4611 				}
4612 			}
4613 		} else {
4614 			break;
4615 		}
4616 	}
4617 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4618 	/* always set this up to cum-ack */
4619 	asoc->this_sack_highest_gap = last_tsn;
4620 
4621 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4622 
4623 		/*
4624 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4625 		 * to be greater than the cumack. Also reset saw_newack to 0
4626 		 * for all dests.
4627 		 */
4628 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4629 			net->saw_newack = 0;
4630 			net->this_sack_highest_newack = last_tsn;
4631 		}
4632 
4633 		/*
4634 		 * thisSackHighestGap will increase while handling NEW
4635 		 * segments this_sack_highest_newack will increase while
4636 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4637 		 * used for CMT DAC algo. saw_newack will also change.
4638 		 */
4639 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4640 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4641 		    num_seg, num_nr_seg, &rto_ok)) {
4642 			wake_him++;
4643 		}
4644 		/*
4645 		 * validate the biggest_tsn_acked in the gap acks if strict
4646 		 * adherence is wanted.
4647 		 */
4648 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4649 			/*
4650 			 * peer is either confused or we are under attack.
4651 			 * We must abort.
4652 			 */
4653 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4654 			    biggest_tsn_acked, send_s);
4655 			goto hopeless_peer;
4656 		}
4657 	}
4658 	/*******************************************/
4659 	/* cancel ALL T3-send timer if accum moved */
4660 	/*******************************************/
4661 	if (asoc->sctp_cmt_on_off > 0) {
4662 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4663 			if (net->new_pseudo_cumack)
4664 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4665 				    stcb, net,
4666 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4667 
4668 		}
4669 	} else {
4670 		if (accum_moved) {
4671 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4672 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4673 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4674 			}
4675 		}
4676 	}
4677 	/********************************************/
4678 	/* drop the acked chunks from the sentqueue */
4679 	/********************************************/
4680 	asoc->last_acked_seq = cum_ack;
4681 
4682 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4683 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4684 			break;
4685 		}
4686 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4687 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4688 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4689 #ifdef INVARIANTS
4690 			} else {
4691 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4692 #endif
4693 			}
4694 		}
4695 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4696 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4697 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4698 			asoc->trigger_reset = 1;
4699 		}
4700 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4701 		if (PR_SCTP_ENABLED(tp1->flags)) {
4702 			if (asoc->pr_sctp_cnt != 0)
4703 				asoc->pr_sctp_cnt--;
4704 		}
4705 		asoc->sent_queue_cnt--;
4706 		if (tp1->data) {
4707 			/* sa_ignore NO_NULL_CHK */
4708 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4709 			sctp_m_freem(tp1->data);
4710 			tp1->data = NULL;
4711 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4712 				asoc->sent_queue_cnt_removeable--;
4713 			}
4714 		}
4715 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4716 			sctp_log_sack(asoc->last_acked_seq,
4717 			    cum_ack,
4718 			    tp1->rec.data.TSN_seq,
4719 			    0,
4720 			    0,
4721 			    SCTP_LOG_FREE_SENT);
4722 		}
4723 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4724 		wake_him++;
4725 	}
4726 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4727 #ifdef INVARIANTS
4728 		panic("Warning flight size is positive and should be 0");
4729 #else
4730 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4731 		    asoc->total_flight);
4732 #endif
4733 		asoc->total_flight = 0;
4734 	}
4735 	/* sa_ignore NO_NULL_CHK */
4736 	if ((wake_him) && (stcb->sctp_socket)) {
4737 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4738 		struct socket *so;
4739 
4740 #endif
4741 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4742 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4743 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4744 		}
4745 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4746 		so = SCTP_INP_SO(stcb->sctp_ep);
4747 		atomic_add_int(&stcb->asoc.refcnt, 1);
4748 		SCTP_TCB_UNLOCK(stcb);
4749 		SCTP_SOCKET_LOCK(so, 1);
4750 		SCTP_TCB_LOCK(stcb);
4751 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4752 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4753 			/* assoc was freed while we were unlocked */
4754 			SCTP_SOCKET_UNLOCK(so, 1);
4755 			return;
4756 		}
4757 #endif
4758 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4759 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4760 		SCTP_SOCKET_UNLOCK(so, 1);
4761 #endif
4762 	} else {
4763 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4764 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4765 		}
4766 	}
4767 
4768 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4769 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4770 			/* Setup so we will exit RFC2582 fast recovery */
4771 			will_exit_fast_recovery = 1;
4772 		}
4773 	}
4774 	/*
4775 	 * Check for revoked fragments:
4776 	 *
4777 	 * if Previous sack - Had no frags then we can't have any revoked if
4778 	 * Previous sack - Had frag's then - If we now have frags aka
4779 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4780 	 * some of them. else - The peer revoked all ACKED fragments, since
4781 	 * we had some before and now we have NONE.
4782 	 */
4783 
4784 	if (num_seg) {
4785 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4786 		asoc->saw_sack_with_frags = 1;
4787 	} else if (asoc->saw_sack_with_frags) {
4788 		int cnt_revoked = 0;
4789 
4790 		/* Peer revoked all dg's marked or acked */
4791 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4792 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4793 				tp1->sent = SCTP_DATAGRAM_SENT;
4794 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4795 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4796 					    tp1->whoTo->flight_size,
4797 					    tp1->book_size,
4798 					    (uint32_t) (uintptr_t) tp1->whoTo,
4799 					    tp1->rec.data.TSN_seq);
4800 				}
4801 				sctp_flight_size_increase(tp1);
4802 				sctp_total_flight_increase(stcb, tp1);
4803 				tp1->rec.data.chunk_was_revoked = 1;
4804 				/*
4805 				 * To ensure that this increase in
4806 				 * flightsize, which is artificial, does not
4807 				 * throttle the sender, we also increase the
4808 				 * cwnd artificially.
4809 				 */
4810 				tp1->whoTo->cwnd += tp1->book_size;
4811 				cnt_revoked++;
4812 			}
4813 		}
4814 		if (cnt_revoked) {
4815 			reneged_all = 1;
4816 		}
4817 		asoc->saw_sack_with_frags = 0;
4818 	}
4819 	if (num_nr_seg > 0)
4820 		asoc->saw_sack_with_nr_frags = 1;
4821 	else
4822 		asoc->saw_sack_with_nr_frags = 0;
4823 
4824 	/* JRS - Use the congestion control given in the CC module */
4825 	if (ecne_seen == 0) {
4826 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4827 			if (net->net_ack2 > 0) {
4828 				/*
4829 				 * Karn's rule applies to clearing error
4830 				 * count, this is optional.
4831 				 */
4832 				net->error_count = 0;
4833 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4834 					/* addr came good */
4835 					net->dest_state |= SCTP_ADDR_REACHABLE;
4836 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4837 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4838 				}
4839 				if (net == stcb->asoc.primary_destination) {
4840 					if (stcb->asoc.alternate) {
4841 						/*
4842 						 * release the alternate,
4843 						 * primary is good
4844 						 */
4845 						sctp_free_remote_addr(stcb->asoc.alternate);
4846 						stcb->asoc.alternate = NULL;
4847 					}
4848 				}
4849 				if (net->dest_state & SCTP_ADDR_PF) {
4850 					net->dest_state &= ~SCTP_ADDR_PF;
4851 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4852 					    stcb->sctp_ep, stcb, net,
4853 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4854 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4855 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4856 					/* Done with this net */
4857 					net->net_ack = 0;
4858 				}
4859 				/* restore any doubled timers */
4860 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4861 				if (net->RTO < stcb->asoc.minrto) {
4862 					net->RTO = stcb->asoc.minrto;
4863 				}
4864 				if (net->RTO > stcb->asoc.maxrto) {
4865 					net->RTO = stcb->asoc.maxrto;
4866 				}
4867 			}
4868 		}
4869 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4870 	}
4871 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4872 		/* nothing left in-flight */
4873 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4874 			/* stop all timers */
4875 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4876 			    stcb, net,
4877 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4878 			net->flight_size = 0;
4879 			net->partial_bytes_acked = 0;
4880 		}
4881 		asoc->total_flight = 0;
4882 		asoc->total_flight_count = 0;
4883 	}
4884 	/**********************************/
4885 	/* Now what about shutdown issues */
4886 	/**********************************/
4887 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4888 		/* nothing left on sendqueue.. consider done */
4889 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4890 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4891 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4892 		}
4893 		asoc->peers_rwnd = a_rwnd;
4894 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4895 			/* SWS sender side engages */
4896 			asoc->peers_rwnd = 0;
4897 		}
4898 		/* clean up */
4899 		if ((asoc->stream_queue_cnt == 1) &&
4900 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4901 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4902 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4903 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4904 		}
4905 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4906 		    (asoc->stream_queue_cnt == 0)) {
4907 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4908 				/* Need to abort here */
4909 				struct mbuf *op_err;
4910 
4911 		abort_out_now:
4912 				*abort_now = 1;
4913 				/* XXX */
4914 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4915 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4916 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4917 				return;
4918 			} else {
4919 				struct sctp_nets *netp;
4920 
4921 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4922 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4923 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4924 				}
4925 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4926 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4927 				sctp_stop_timers_for_shutdown(stcb);
4928 				if (asoc->alternate) {
4929 					netp = asoc->alternate;
4930 				} else {
4931 					netp = asoc->primary_destination;
4932 				}
4933 				sctp_send_shutdown(stcb, netp);
4934 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4935 				    stcb->sctp_ep, stcb, netp);
4936 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4937 				    stcb->sctp_ep, stcb, netp);
4938 			}
4939 			return;
4940 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4941 		    (asoc->stream_queue_cnt == 0)) {
4942 			struct sctp_nets *netp;
4943 
4944 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4945 				goto abort_out_now;
4946 			}
4947 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4948 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4949 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4950 			sctp_stop_timers_for_shutdown(stcb);
4951 			if (asoc->alternate) {
4952 				netp = asoc->alternate;
4953 			} else {
4954 				netp = asoc->primary_destination;
4955 			}
4956 			sctp_send_shutdown_ack(stcb, netp);
4957 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4958 			    stcb->sctp_ep, stcb, netp);
4959 			return;
4960 		}
4961 	}
4962 	/*
4963 	 * Now here we are going to recycle net_ack for a different use...
4964 	 * HEADS UP.
4965 	 */
4966 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4967 		net->net_ack = 0;
4968 	}
4969 
4970 	/*
4971 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4972 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4973 	 * automatically ensure that.
4974 	 */
4975 	if ((asoc->sctp_cmt_on_off > 0) &&
4976 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4977 	    (cmt_dac_flag == 0)) {
4978 		this_sack_lowest_newack = cum_ack;
4979 	}
4980 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4981 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4982 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4983 	}
4984 	/* JRS - Use the congestion control given in the CC module */
4985 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4986 
4987 	/* Now are we exiting loss recovery ? */
4988 	if (will_exit_fast_recovery) {
4989 		/* Ok, we must exit fast recovery */
4990 		asoc->fast_retran_loss_recovery = 0;
4991 	}
4992 	if ((asoc->sat_t3_loss_recovery) &&
4993 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4994 		/* end satellite t3 loss recovery */
4995 		asoc->sat_t3_loss_recovery = 0;
4996 	}
4997 	/*
4998 	 * CMT Fast recovery
4999 	 */
5000 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5001 		if (net->will_exit_fast_recovery) {
5002 			/* Ok, we must exit fast recovery */
5003 			net->fast_retran_loss_recovery = 0;
5004 		}
5005 	}
5006 
5007 	/* Adjust and set the new rwnd value */
5008 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5009 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5010 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5011 	}
5012 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5013 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5014 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5015 		/* SWS sender side engages */
5016 		asoc->peers_rwnd = 0;
5017 	}
5018 	if (asoc->peers_rwnd > old_rwnd) {
5019 		win_probe_recovery = 1;
5020 	}
5021 	/*
5022 	 * Now we must setup so we have a timer up for anyone with
5023 	 * outstanding data.
5024 	 */
5025 	done_once = 0;
5026 again:
5027 	j = 0;
5028 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5029 		if (win_probe_recovery && (net->window_probe)) {
5030 			win_probe_recovered = 1;
5031 			/*-
5032 			 * Find first chunk that was used with
5033 			 * window probe and clear the event. Put
5034 			 * it back into the send queue as if has
5035 			 * not been sent.
5036 			 */
5037 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5038 				if (tp1->window_probe) {
5039 					sctp_window_probe_recovery(stcb, asoc, tp1);
5040 					break;
5041 				}
5042 			}
5043 		}
5044 		if (net->flight_size) {
5045 			j++;
5046 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5047 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5048 				    stcb->sctp_ep, stcb, net);
5049 			}
5050 			if (net->window_probe) {
5051 				net->window_probe = 0;
5052 			}
5053 		} else {
5054 			if (net->window_probe) {
5055 				/*
5056 				 * In window probes we must assure a timer
5057 				 * is still running there
5058 				 */
5059 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5060 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5061 					    stcb->sctp_ep, stcb, net);
5062 
5063 				}
5064 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5065 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5066 				    stcb, net,
5067 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5068 			}
5069 		}
5070 	}
5071 	if ((j == 0) &&
5072 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5073 	    (asoc->sent_queue_retran_cnt == 0) &&
5074 	    (win_probe_recovered == 0) &&
5075 	    (done_once == 0)) {
5076 		/*
5077 		 * huh, this should not happen unless all packets are
5078 		 * PR-SCTP and marked to skip of course.
5079 		 */
5080 		if (sctp_fs_audit(asoc)) {
5081 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5082 				net->flight_size = 0;
5083 			}
5084 			asoc->total_flight = 0;
5085 			asoc->total_flight_count = 0;
5086 			asoc->sent_queue_retran_cnt = 0;
5087 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5088 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5089 					sctp_flight_size_increase(tp1);
5090 					sctp_total_flight_increase(stcb, tp1);
5091 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5092 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5093 				}
5094 			}
5095 		}
5096 		done_once = 1;
5097 		goto again;
5098 	}
5099 	/*********************************************/
5100 	/* Here we perform PR-SCTP procedures        */
5101 	/* (section 4.2)                             */
5102 	/*********************************************/
5103 	/* C1. update advancedPeerAckPoint */
5104 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5105 		asoc->advanced_peer_ack_point = cum_ack;
5106 	}
5107 	/* C2. try to further move advancedPeerAckPoint ahead */
5108 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5109 		struct sctp_tmit_chunk *lchk;
5110 		uint32_t old_adv_peer_ack_point;
5111 
5112 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5113 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5114 		/* C3. See if we need to send a Fwd-TSN */
5115 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5116 			/*
5117 			 * ISSUE with ECN, see FWD-TSN processing.
5118 			 */
5119 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5120 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5121 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5122 				    old_adv_peer_ack_point);
5123 			}
5124 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5125 				send_forward_tsn(stcb, asoc);
5126 			} else if (lchk) {
5127 				/* try to FR fwd-tsn's that get lost too */
5128 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5129 					send_forward_tsn(stcb, asoc);
5130 				}
5131 			}
5132 		}
5133 		if (lchk) {
5134 			/* Assure a timer is up */
5135 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5136 			    stcb->sctp_ep, stcb, lchk->whoTo);
5137 		}
5138 	}
5139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5140 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5141 		    a_rwnd,
5142 		    stcb->asoc.peers_rwnd,
5143 		    stcb->asoc.total_flight,
5144 		    stcb->asoc.total_output_queue_size);
5145 	}
5146 }
5147 
5148 void
5149 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5150 {
5151 	/* Copy cum-ack */
5152 	uint32_t cum_ack, a_rwnd;
5153 
5154 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5155 	/* Arrange so a_rwnd does NOT change */
5156 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5157 
5158 	/* Now call the express sack handling */
5159 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5160 }
5161 
5162 static void
5163 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5164     struct sctp_stream_in *strmin)
5165 {
5166 	struct sctp_queued_to_read *ctl, *nctl;
5167 	struct sctp_association *asoc;
5168 	uint32_t tt;
5169 	int need_reasm_check = 0, old;
5170 
5171 	asoc = &stcb->asoc;
5172 	tt = strmin->last_sequence_delivered;
5173 	if (asoc->idata_supported) {
5174 		old = 0;
5175 	} else {
5176 		old = 1;
5177 	}
5178 	/*
5179 	 * First deliver anything prior to and including the stream no that
5180 	 * came in.
5181 	 */
5182 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5183 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5184 			/* this is deliverable now */
5185 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5186 				if (ctl->on_strm_q) {
5187 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5188 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5189 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5190 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5191 #ifdef INVARIANTS
5192 					} else {
5193 						panic("strmin: %p ctl: %p unknown %d",
5194 						    strmin, ctl, ctl->on_strm_q);
5195 #endif
5196 					}
5197 					ctl->on_strm_q = 0;
5198 				}
5199 				/* subtract pending on streams */
5200 				asoc->size_on_all_streams -= ctl->length;
5201 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5202 				/* deliver it to at least the delivery-q */
5203 				if (stcb->sctp_socket) {
5204 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5205 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5206 					    ctl,
5207 					    &stcb->sctp_socket->so_rcv,
5208 					    1, SCTP_READ_LOCK_HELD,
5209 					    SCTP_SO_NOT_LOCKED);
5210 				}
5211 			} else {
5212 				/* Its a fragmented message */
5213 				if (ctl->first_frag_seen) {
5214 					/*
5215 					 * Make it so this is next to
5216 					 * deliver, we restore later
5217 					 */
5218 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5219 					need_reasm_check = 1;
5220 					break;
5221 				}
5222 			}
5223 		} else {
5224 			/* no more delivery now. */
5225 			break;
5226 		}
5227 	}
5228 	if (need_reasm_check) {
5229 		int ret;
5230 
5231 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5232 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5233 			/* Restore the next to deliver unless we are ahead */
5234 			strmin->last_sequence_delivered = tt;
5235 		}
5236 		if (ret == 0) {
5237 			/* Left the front Partial one on */
5238 			return;
5239 		}
5240 		need_reasm_check = 0;
5241 	}
5242 	/*
5243 	 * now we must deliver things in queue the normal way  if any are
5244 	 * now ready.
5245 	 */
5246 	tt = strmin->last_sequence_delivered + 1;
5247 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5248 		if (tt == ctl->sinfo_ssn) {
5249 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5250 				/* this is deliverable now */
5251 				if (ctl->on_strm_q) {
5252 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5253 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5254 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5255 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5256 #ifdef INVARIANTS
5257 					} else {
5258 						panic("strmin: %p ctl: %p unknown %d",
5259 						    strmin, ctl, ctl->on_strm_q);
5260 #endif
5261 					}
5262 					ctl->on_strm_q = 0;
5263 				}
5264 				/* subtract pending on streams */
5265 				asoc->size_on_all_streams -= ctl->length;
5266 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5267 				/* deliver it to at least the delivery-q */
5268 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5269 				if (stcb->sctp_socket) {
5270 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5271 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5272 					    ctl,
5273 					    &stcb->sctp_socket->so_rcv, 1,
5274 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5275 
5276 				}
5277 				tt = strmin->last_sequence_delivered + 1;
5278 			} else {
5279 				/* Its a fragmented message */
5280 				if (ctl->first_frag_seen) {
5281 					/*
5282 					 * Make it so this is next to
5283 					 * deliver
5284 					 */
5285 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5286 					need_reasm_check = 1;
5287 					break;
5288 				}
5289 			}
5290 		} else {
5291 			break;
5292 		}
5293 	}
5294 	if (need_reasm_check) {
5295 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5296 	}
5297 }
5298 
5299 
5300 
5301 static void
5302 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5303     struct sctp_association *asoc,
5304     uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn)
5305 {
5306 	struct sctp_queued_to_read *control;
5307 	struct sctp_stream_in *strm;
5308 	struct sctp_tmit_chunk *chk, *nchk;
5309 	int cnt_removed = 0;
5310 
5311 	/*
5312 	 * For now large messages held on the stream reasm that are complete
5313 	 * will be tossed too. We could in theory do more work to spin
5314 	 * through and stop after dumping one msg aka seeing the start of a
5315 	 * new msg at the head, and call the delivery function... to see if
5316 	 * it can be delivered... But for now we just dump everything on the
5317 	 * queue.
5318 	 */
5319 	strm = &asoc->strmin[stream];
5320 	control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old);
5321 	if (control == NULL) {
5322 		/* Not found */
5323 		return;
5324 	}
5325 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5326 		/* Purge hanging chunks */
5327 		if (old && (ordered == 0)) {
5328 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) {
5329 				break;
5330 			}
5331 		}
5332 		cnt_removed++;
5333 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5334 		asoc->size_on_reasm_queue -= chk->send_size;
5335 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5336 		if (chk->data) {
5337 			sctp_m_freem(chk->data);
5338 			chk->data = NULL;
5339 		}
5340 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5341 	}
5342 	if (!TAILQ_EMPTY(&control->reasm)) {
5343 		/* This has to be old data, unordered */
5344 		if (control->data) {
5345 			sctp_m_freem(control->data);
5346 			control->data = NULL;
5347 		}
5348 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5349 		chk = TAILQ_FIRST(&control->reasm);
5350 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5351 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5352 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5353 			    chk, SCTP_READ_LOCK_HELD);
5354 		}
5355 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5356 		return;
5357 	}
5358 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5359 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5360 		control->on_strm_q = 0;
5361 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5362 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5363 		control->on_strm_q = 0;
5364 #ifdef INVARIANTS
5365 	} else if (control->on_strm_q) {
5366 		panic("strm: %p ctl: %p unknown %d",
5367 		    strm, control, control->on_strm_q);
5368 #endif
5369 	}
5370 	control->on_strm_q = 0;
5371 	if (control->on_read_q == 0) {
5372 		sctp_free_remote_addr(control->whoFrom);
5373 		if (control->data) {
5374 			sctp_m_freem(control->data);
5375 			control->data = NULL;
5376 		}
5377 		sctp_free_a_readq(stcb, control);
5378 	}
5379 }
5380 
5381 void
5382 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5383     struct sctp_forward_tsn_chunk *fwd,
5384     int *abort_flag, struct mbuf *m, int offset)
5385 {
5386 	/* The pr-sctp fwd tsn */
5387 	/*
5388 	 * here we will perform all the data receiver side steps for
5389 	 * processing FwdTSN, as required in by pr-sctp draft:
5390 	 *
5391 	 * Assume we get FwdTSN(x):
5392 	 *
5393 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5394 	 * others we have 3) examine and update re-ordering queue on
5395 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5396 	 * report where we are.
5397 	 */
5398 	struct sctp_association *asoc;
5399 	uint32_t new_cum_tsn, gap;
5400 	unsigned int i, fwd_sz, m_size;
5401 	uint32_t str_seq;
5402 	struct sctp_stream_in *strm;
5403 	struct sctp_queued_to_read *ctl, *sv;
5404 
5405 	asoc = &stcb->asoc;
5406 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5407 		SCTPDBG(SCTP_DEBUG_INDATA1,
5408 		    "Bad size too small/big fwd-tsn\n");
5409 		return;
5410 	}
5411 	m_size = (stcb->asoc.mapping_array_size << 3);
5412 	/*************************************************************/
5413 	/* 1. Here we update local cumTSN and shift the bitmap array */
5414 	/*************************************************************/
5415 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5416 
5417 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5418 		/* Already got there ... */
5419 		return;
5420 	}
5421 	/*
5422 	 * now we know the new TSN is more advanced, let's find the actual
5423 	 * gap
5424 	 */
5425 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5426 	asoc->cumulative_tsn = new_cum_tsn;
5427 	if (gap >= m_size) {
5428 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5429 			struct mbuf *op_err;
5430 			char msg[SCTP_DIAG_INFO_LEN];
5431 
5432 			/*
5433 			 * out of range (of single byte chunks in the rwnd I
5434 			 * give out). This must be an attacker.
5435 			 */
5436 			*abort_flag = 1;
5437 			snprintf(msg, sizeof(msg),
5438 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5439 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5440 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5441 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5442 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5443 			return;
5444 		}
5445 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5446 
5447 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5448 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5449 		asoc->highest_tsn_inside_map = new_cum_tsn;
5450 
5451 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5452 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5453 
5454 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5455 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5456 		}
5457 	} else {
5458 		SCTP_TCB_LOCK_ASSERT(stcb);
5459 		for (i = 0; i <= gap; i++) {
5460 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5461 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5462 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5463 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5464 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5465 				}
5466 			}
5467 		}
5468 	}
5469 	/*************************************************************/
5470 	/* 2. Clear up re-assembly queue                             */
5471 	/*************************************************************/
5472 
5473 	/* This is now done as part of clearing up the stream/seq */
5474 	if (asoc->idata_supported == 0) {
5475 		uint16_t sid;
5476 
5477 		/* Flush all the un-ordered data based on cum-tsn */
5478 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5479 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5480 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn);
5481 		}
5482 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5483 	}
5484 	/*******************************************************/
5485 	/* 3. Update the PR-stream re-ordering queues and fix  */
5486 	/* delivery issues as needed.                       */
5487 	/*******************************************************/
5488 	fwd_sz -= sizeof(*fwd);
5489 	if (m && fwd_sz) {
5490 		/* New method. */
5491 		unsigned int num_str;
5492 		uint32_t sequence;
5493 		uint16_t stream;
5494 		uint16_t ordered, flags;
5495 		int old;
5496 		struct sctp_strseq *stseq, strseqbuf;
5497 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5498 
5499 		offset += sizeof(*fwd);
5500 
5501 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5502 		if (asoc->idata_supported) {
5503 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5504 			old = 0;
5505 		} else {
5506 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5507 			old = 1;
5508 		}
5509 		for (i = 0; i < num_str; i++) {
5510 			if (asoc->idata_supported) {
5511 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5512 				    sizeof(struct sctp_strseq_mid),
5513 				    (uint8_t *) & strseqbuf_m);
5514 				offset += sizeof(struct sctp_strseq_mid);
5515 				if (stseq_m == NULL) {
5516 					break;
5517 				}
5518 				stream = ntohs(stseq_m->stream);
5519 				sequence = ntohl(stseq_m->msg_id);
5520 				flags = ntohs(stseq_m->flags);
5521 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5522 					ordered = 0;
5523 				} else {
5524 					ordered = 1;
5525 				}
5526 			} else {
5527 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5528 				    sizeof(struct sctp_strseq),
5529 				    (uint8_t *) & strseqbuf);
5530 				offset += sizeof(struct sctp_strseq);
5531 				if (stseq == NULL) {
5532 					break;
5533 				}
5534 				stream = ntohs(stseq->stream);
5535 				sequence = (uint32_t) ntohs(stseq->sequence);
5536 				ordered = 1;
5537 			}
5538 			/* Convert */
5539 
5540 			/* now process */
5541 
5542 			/*
5543 			 * Ok we now look for the stream/seq on the read
5544 			 * queue where its not all delivered. If we find it
5545 			 * we transmute the read entry into a PDI_ABORTED.
5546 			 */
5547 			if (stream >= asoc->streamincnt) {
5548 				/* screwed up streams, stop!  */
5549 				break;
5550 			}
5551 			if ((asoc->str_of_pdapi == stream) &&
5552 			    (asoc->ssn_of_pdapi == sequence)) {
5553 				/*
5554 				 * If this is the one we were partially
5555 				 * delivering now then we no longer are.
5556 				 * Note this will change with the reassembly
5557 				 * re-write.
5558 				 */
5559 				asoc->fragmented_delivery_inprogress = 0;
5560 			}
5561 			strm = &asoc->strmin[stream];
5562 			if (asoc->idata_supported == 0) {
5563 				uint16_t strm_at;
5564 
5565 				for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) {
5566 					sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5567 				}
5568 			} else {
5569 				uint32_t strm_at;
5570 
5571 				for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) {
5572 					sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn);
5573 				}
5574 			}
5575 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5576 				if ((ctl->sinfo_stream == stream) &&
5577 				    (ctl->sinfo_ssn == sequence)) {
5578 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5579 					ctl->pdapi_aborted = 1;
5580 					sv = stcb->asoc.control_pdapi;
5581 					ctl->end_added = 1;
5582 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5583 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5584 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5585 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5586 #ifdef INVARIANTS
5587 					} else if (ctl->on_strm_q) {
5588 						panic("strm: %p ctl: %p unknown %d",
5589 						    strm, ctl, ctl->on_strm_q);
5590 #endif
5591 					}
5592 					ctl->on_strm_q = 0;
5593 					stcb->asoc.control_pdapi = ctl;
5594 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5595 					    stcb,
5596 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5597 					    (void *)&str_seq,
5598 					    SCTP_SO_NOT_LOCKED);
5599 					stcb->asoc.control_pdapi = sv;
5600 					break;
5601 				} else if ((ctl->sinfo_stream == stream) &&
5602 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5603 					/* We are past our victim SSN */
5604 					break;
5605 				}
5606 			}
5607 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5608 				/* Update the sequence number */
5609 				strm->last_sequence_delivered = sequence;
5610 			}
5611 			/* now kick the stream the new way */
5612 			/* sa_ignore NO_NULL_CHK */
5613 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5614 		}
5615 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5616 	}
5617 	/*
5618 	 * Now slide thing forward.
5619 	 */
5620 	sctp_slide_mapping_arrays(stcb);
5621 }
5622