xref: /freebsd/sys/netinet/sctp_indata.c (revision 41059135ce931c0f1014a999ffabc6bc470ce856)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t sid,
134     uint32_t mid, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = sid;
145 	read_queue_e->sinfo_flags = (flags << 8);
146 	read_queue_e->sinfo_ppid = ppid;
147 	read_queue_e->sinfo_context = context;
148 	read_queue_e->sinfo_tsn = tsn;
149 	read_queue_e->sinfo_cumtsn = tsn;
150 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 	read_queue_e->mid = mid;
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t flags, unordered;
337 
338 	flags = (control->sinfo_flags >> 8);
339 	unordered = flags & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = 1;
359 		control->first_frag_seen = 1;
360 		control->last_frag_seen = 1;
361 	}
362 	if (TAILQ_EMPTY(q)) {
363 		/* Empty queue */
364 		TAILQ_INSERT_HEAD(q, control, next_instrm);
365 		if (unordered) {
366 			control->on_strm_q = SCTP_ON_UNORDERED;
367 		} else {
368 			control->on_strm_q = SCTP_ON_ORDERED;
369 		}
370 		return (0);
371 	} else {
372 		TAILQ_FOREACH(at, q, next_instrm) {
373 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
374 				/*
375 				 * one in queue is bigger than the new one,
376 				 * insert before this one
377 				 */
378 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
379 				if (unordered) {
380 					control->on_strm_q = SCTP_ON_UNORDERED;
381 				} else {
382 					control->on_strm_q = SCTP_ON_ORDERED;
383 				}
384 				break;
385 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
386 				/*
387 				 * Gak, He sent me a duplicate msg id
388 				 * number?? return -1 to abort.
389 				 */
390 				return (-1);
391 			} else {
392 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
393 					/*
394 					 * We are at the end, insert it
395 					 * after this one
396 					 */
397 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
398 						sctp_log_strm_del(control, at,
399 						    SCTP_STR_LOG_FROM_INSERT_TL);
400 					}
401 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
402 					if (unordered) {
403 						control->on_strm_q = SCTP_ON_UNORDERED;
404 					} else {
405 						control->on_strm_q = SCTP_ON_ORDERED;
406 					}
407 					break;
408 				}
409 			}
410 		}
411 	}
412 	return (0);
413 }
414 
415 static void
416 sctp_abort_in_reasm(struct sctp_tcb *stcb,
417     struct sctp_queued_to_read *control,
418     struct sctp_tmit_chunk *chk,
419     int *abort_flag, int opspot)
420 {
421 	char msg[SCTP_DIAG_INFO_LEN];
422 	struct mbuf *oper;
423 
424 	if (stcb->asoc.idata_supported) {
425 		snprintf(msg, sizeof(msg),
426 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 		    opspot,
428 		    control->fsn_included,
429 		    chk->rec.data.tsn,
430 		    chk->rec.data.sid,
431 		    chk->rec.data.fsn, chk->rec.data.mid);
432 	} else {
433 		snprintf(msg, sizeof(msg),
434 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 		    opspot,
436 		    control->fsn_included,
437 		    chk->rec.data.tsn,
438 		    chk->rec.data.sid,
439 		    chk->rec.data.fsn,
440 		    (uint16_t)chk->rec.data.mid);
441 	}
442 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 	sctp_m_freem(chk->data);
444 	chk->data = NULL;
445 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
448 	*abort_flag = 1;
449 }
450 
451 static void
452 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
453 {
454 	/*
455 	 * The control could not be placed and must be cleaned.
456 	 */
457 	struct sctp_tmit_chunk *chk, *nchk;
458 
459 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 		if (chk->data)
462 			sctp_m_freem(chk->data);
463 		chk->data = NULL;
464 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
465 	}
466 	sctp_free_a_readq(stcb, control);
467 }
468 
469 /*
470  * Queue the chunk either right into the socket buffer if it is the next one
471  * to go OR put it in the correct place in the delivery queue.  If we do
472  * append to the so_buf, keep doing so until we are out of order as
473  * long as the control's entered are non-fragmented.
474  */
475 static void
476 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream usage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
492 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	struct sctp_stream_in *strm;
503 	char msg[SCTP_DIAG_INFO_LEN];
504 
505 	strm = &asoc->strmin[control->sinfo_stream];
506 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
507 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
508 	}
509 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
510 		/* The incoming sseq is behind where we last delivered? */
511 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
512 		    strm->last_mid_delivered, control->mid);
513 		/*
514 		 * throw it in the stream so it gets cleaned up in
515 		 * association destruction
516 		 */
517 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
518 		if (asoc->idata_supported) {
519 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
520 			    strm->last_mid_delivered, control->sinfo_tsn,
521 			    control->sinfo_stream, control->mid);
522 		} else {
523 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
524 			    (uint16_t)strm->last_mid_delivered,
525 			    control->sinfo_tsn,
526 			    control->sinfo_stream,
527 			    (uint16_t)control->mid);
528 		}
529 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
530 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
531 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
532 		*abort_flag = 1;
533 		return;
534 
535 	}
536 	queue_needed = 1;
537 	asoc->size_on_all_streams += control->length;
538 	sctp_ucount_incr(asoc->cnt_on_all_streams);
539 	nxt_todel = strm->last_mid_delivered + 1;
540 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
541 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
542 		struct socket *so;
543 
544 		so = SCTP_INP_SO(stcb->sctp_ep);
545 		atomic_add_int(&stcb->asoc.refcnt, 1);
546 		SCTP_TCB_UNLOCK(stcb);
547 		SCTP_SOCKET_LOCK(so, 1);
548 		SCTP_TCB_LOCK(stcb);
549 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
550 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
551 			SCTP_SOCKET_UNLOCK(so, 1);
552 			return;
553 		}
554 #endif
555 		/* can be delivered right away? */
556 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
557 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
558 		}
559 		/* EY it wont be queued if it could be delivered directly */
560 		queue_needed = 0;
561 		asoc->size_on_all_streams -= control->length;
562 		sctp_ucount_decr(asoc->cnt_on_all_streams);
563 		strm->last_mid_delivered++;
564 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
565 		sctp_add_to_readq(stcb->sctp_ep, stcb,
566 		    control,
567 		    &stcb->sctp_socket->so_rcv, 1,
568 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
569 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
570 			/* all delivered */
571 			nxt_todel = strm->last_mid_delivered + 1;
572 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
573 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
574 				asoc->size_on_all_streams -= control->length;
575 				sctp_ucount_decr(asoc->cnt_on_all_streams);
576 				if (control->on_strm_q == SCTP_ON_ORDERED) {
577 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
578 #ifdef INVARIANTS
579 				} else {
580 					panic("Huh control: %p is on_strm_q: %d",
581 					    control, control->on_strm_q);
582 #endif
583 				}
584 				control->on_strm_q = 0;
585 				strm->last_mid_delivered++;
586 				/*
587 				 * We ignore the return of deliver_data here
588 				 * since we always can hold the chunk on the
589 				 * d-queue. And we have a finite number that
590 				 * can be delivered from the strq.
591 				 */
592 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
593 					sctp_log_strm_del(control, NULL,
594 					    SCTP_STR_LOG_FROM_IMMED_DEL);
595 				}
596 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
597 				sctp_add_to_readq(stcb->sctp_ep, stcb,
598 				    control,
599 				    &stcb->sctp_socket->so_rcv, 1,
600 				    SCTP_READ_LOCK_NOT_HELD,
601 				    SCTP_SO_LOCKED);
602 				continue;
603 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
604 				*need_reasm = 1;
605 			}
606 			break;
607 		}
608 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
609 		SCTP_SOCKET_UNLOCK(so, 1);
610 #endif
611 	}
612 	if (queue_needed) {
613 		/*
614 		 * Ok, we did not deliver this guy, find the correct place
615 		 * to put it on the queue.
616 		 */
617 		if (sctp_place_control_in_stream(strm, asoc, control)) {
618 			snprintf(msg, sizeof(msg),
619 			    "Queue to str MID: %u duplicate",
620 			    control->mid);
621 			sctp_clean_up_control(stcb, control);
622 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
623 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
624 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
625 			*abort_flag = 1;
626 		}
627 	}
628 }
629 
630 
631 static void
632 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
633 {
634 	struct mbuf *m, *prev = NULL;
635 	struct sctp_tcb *stcb;
636 
637 	stcb = control->stcb;
638 	control->held_length = 0;
639 	control->length = 0;
640 	m = control->data;
641 	while (m) {
642 		if (SCTP_BUF_LEN(m) == 0) {
643 			/* Skip mbufs with NO length */
644 			if (prev == NULL) {
645 				/* First one */
646 				control->data = sctp_m_free(m);
647 				m = control->data;
648 			} else {
649 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
650 				m = SCTP_BUF_NEXT(prev);
651 			}
652 			if (m == NULL) {
653 				control->tail_mbuf = prev;
654 			}
655 			continue;
656 		}
657 		prev = m;
658 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
659 		if (control->on_read_q) {
660 			/*
661 			 * On read queue so we must increment the SB stuff,
662 			 * we assume caller has done any locks of SB.
663 			 */
664 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
665 		}
666 		m = SCTP_BUF_NEXT(m);
667 	}
668 	if (prev) {
669 		control->tail_mbuf = prev;
670 	}
671 }
672 
673 static void
674 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
675 {
676 	struct mbuf *prev = NULL;
677 	struct sctp_tcb *stcb;
678 
679 	stcb = control->stcb;
680 	if (stcb == NULL) {
681 #ifdef INVARIANTS
682 		panic("Control broken");
683 #else
684 		return;
685 #endif
686 	}
687 	if (control->tail_mbuf == NULL) {
688 		/* TSNH */
689 		control->data = m;
690 		sctp_setup_tail_pointer(control);
691 		return;
692 	}
693 	control->tail_mbuf->m_next = m;
694 	while (m) {
695 		if (SCTP_BUF_LEN(m) == 0) {
696 			/* Skip mbufs with NO length */
697 			if (prev == NULL) {
698 				/* First one */
699 				control->tail_mbuf->m_next = sctp_m_free(m);
700 				m = control->tail_mbuf->m_next;
701 			} else {
702 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
703 				m = SCTP_BUF_NEXT(prev);
704 			}
705 			if (m == NULL) {
706 				control->tail_mbuf = prev;
707 			}
708 			continue;
709 		}
710 		prev = m;
711 		if (control->on_read_q) {
712 			/*
713 			 * On read queue so we must increment the SB stuff,
714 			 * we assume caller has done any locks of SB.
715 			 */
716 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
717 		}
718 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
719 		m = SCTP_BUF_NEXT(m);
720 	}
721 	if (prev) {
722 		control->tail_mbuf = prev;
723 	}
724 }
725 
726 static void
727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
728 {
729 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
730 	nc->sinfo_stream = control->sinfo_stream;
731 	nc->mid = control->mid;
732 	TAILQ_INIT(&nc->reasm);
733 	nc->top_fsn = control->top_fsn;
734 	nc->mid = control->mid;
735 	nc->sinfo_flags = control->sinfo_flags;
736 	nc->sinfo_ppid = control->sinfo_ppid;
737 	nc->sinfo_context = control->sinfo_context;
738 	nc->fsn_included = 0xffffffff;
739 	nc->sinfo_tsn = control->sinfo_tsn;
740 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
741 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
742 	nc->whoFrom = control->whoFrom;
743 	atomic_add_int(&nc->whoFrom->ref_count, 1);
744 	nc->stcb = control->stcb;
745 	nc->port_from = control->port_from;
746 }
747 
748 static void
749 sctp_reset_a_control(struct sctp_queued_to_read *control,
750     struct sctp_inpcb *inp, uint32_t tsn)
751 {
752 	control->fsn_included = tsn;
753 	if (control->on_read_q) {
754 		/*
755 		 * We have to purge it from there, hopefully this will work
756 		 * :-)
757 		 */
758 		TAILQ_REMOVE(&inp->read_queue, control, next);
759 		control->on_read_q = 0;
760 	}
761 }
762 
763 static int
764 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
765     struct sctp_association *asoc,
766     struct sctp_stream_in *strm,
767     struct sctp_queued_to_read *control,
768     uint32_t pd_point,
769     int inp_read_lock_held)
770 {
771 	/*
772 	 * Special handling for the old un-ordered data chunk. All the
773 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
774 	 * to see if we have it all. If you return one, no other control
775 	 * entries on the un-ordered queue will be looked at. In theory
776 	 * there should be no others entries in reality, unless the guy is
777 	 * sending both unordered NDATA and unordered DATA...
778 	 */
779 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
780 	uint32_t fsn;
781 	struct sctp_queued_to_read *nc;
782 	int cnt_added;
783 
784 	if (control->first_frag_seen == 0) {
785 		/* Nothing we can do, we have not seen the first piece yet */
786 		return (1);
787 	}
788 	/* Collapse any we can */
789 	cnt_added = 0;
790 restart:
791 	fsn = control->fsn_included + 1;
792 	/* Now what can we add? */
793 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
794 		if (chk->rec.data.fsn == fsn) {
795 			/* Ok lets add it */
796 			sctp_alloc_a_readq(stcb, nc);
797 			if (nc == NULL) {
798 				break;
799 			}
800 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
801 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
802 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
803 			fsn++;
804 			cnt_added++;
805 			chk = NULL;
806 			if (control->end_added) {
807 				/* We are done */
808 				if (!TAILQ_EMPTY(&control->reasm)) {
809 					/*
810 					 * Ok we have to move anything left
811 					 * on the control queue to a new
812 					 * control.
813 					 */
814 					sctp_build_readq_entry_from_ctl(nc, control);
815 					tchk = TAILQ_FIRST(&control->reasm);
816 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
817 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
818 						asoc->size_on_reasm_queue -= tchk->send_size;
819 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
820 						nc->first_frag_seen = 1;
821 						nc->fsn_included = tchk->rec.data.fsn;
822 						nc->data = tchk->data;
823 						nc->sinfo_ppid = tchk->rec.data.ppid;
824 						nc->sinfo_tsn = tchk->rec.data.tsn;
825 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
826 						tchk->data = NULL;
827 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
828 						sctp_setup_tail_pointer(nc);
829 						tchk = TAILQ_FIRST(&control->reasm);
830 					}
831 					/* Spin the rest onto the queue */
832 					while (tchk) {
833 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
834 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
835 						tchk = TAILQ_FIRST(&control->reasm);
836 					}
837 					/*
838 					 * Now lets add it to the queue
839 					 * after removing control
840 					 */
841 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
842 					nc->on_strm_q = SCTP_ON_UNORDERED;
843 					if (control->on_strm_q) {
844 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
845 						control->on_strm_q = 0;
846 					}
847 				}
848 				if (control->pdapi_started) {
849 					strm->pd_api_started = 0;
850 					control->pdapi_started = 0;
851 				}
852 				if (control->on_strm_q) {
853 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
854 					control->on_strm_q = 0;
855 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
856 				}
857 				if (control->on_read_q == 0) {
858 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
859 					    &stcb->sctp_socket->so_rcv, control->end_added,
860 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
861 				}
862 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
863 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
864 					/*
865 					 * Switch to the new guy and
866 					 * continue
867 					 */
868 					control = nc;
869 					goto restart;
870 				} else {
871 					if (nc->on_strm_q == 0) {
872 						sctp_free_a_readq(stcb, nc);
873 					}
874 				}
875 				return (1);
876 			} else {
877 				sctp_free_a_readq(stcb, nc);
878 			}
879 		} else {
880 			/* Can't add more */
881 			break;
882 		}
883 	}
884 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
885 		strm->pd_api_started = 1;
886 		control->pdapi_started = 1;
887 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
888 		    &stcb->sctp_socket->so_rcv, control->end_added,
889 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
890 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
891 		return (0);
892 	} else {
893 		return (1);
894 	}
895 }
896 
897 static void
898 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
899     struct sctp_association *asoc,
900     struct sctp_queued_to_read *control,
901     struct sctp_tmit_chunk *chk,
902     int *abort_flag)
903 {
904 	struct sctp_tmit_chunk *at;
905 	int inserted;
906 
907 	/*
908 	 * Here we need to place the chunk into the control structure sorted
909 	 * in the correct order.
910 	 */
911 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
912 		/* Its the very first one. */
913 		SCTPDBG(SCTP_DEBUG_XXX,
914 		    "chunk is a first fsn: %u becomes fsn_included\n",
915 		    chk->rec.data.fsn);
916 		if (control->first_frag_seen) {
917 			/*
918 			 * In old un-ordered we can reassembly on one
919 			 * control multiple messages. As long as the next
920 			 * FIRST is greater then the old first (TSN i.e. FSN
921 			 * wise)
922 			 */
923 			struct mbuf *tdata;
924 			uint32_t tmp;
925 
926 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
927 				/*
928 				 * Easy way the start of a new guy beyond
929 				 * the lowest
930 				 */
931 				goto place_chunk;
932 			}
933 			if ((chk->rec.data.fsn == control->fsn_included) ||
934 			    (control->pdapi_started)) {
935 				/*
936 				 * Ok this should not happen, if it does we
937 				 * started the pd-api on the higher TSN
938 				 * (since the equals part is a TSN failure
939 				 * it must be that).
940 				 *
941 				 * We are completly hosed in that case since
942 				 * I have no way to recover. This really
943 				 * will only happen if we can get more TSN's
944 				 * higher before the pd-api-point.
945 				 */
946 				sctp_abort_in_reasm(stcb, control, chk,
947 				    abort_flag,
948 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
949 
950 				return;
951 			}
952 			/*
953 			 * Ok we have two firsts and the one we just got is
954 			 * smaller than the one we previously placed.. yuck!
955 			 * We must swap them out.
956 			 */
957 			/* swap the mbufs */
958 			tdata = control->data;
959 			control->data = chk->data;
960 			chk->data = tdata;
961 			/* Save the lengths */
962 			chk->send_size = control->length;
963 			/* Recompute length of control and tail pointer */
964 			sctp_setup_tail_pointer(control);
965 			/* Fix the FSN included */
966 			tmp = control->fsn_included;
967 			control->fsn_included = chk->rec.data.fsn;
968 			chk->rec.data.fsn = tmp;
969 			/* Fix the TSN included */
970 			tmp = control->sinfo_tsn;
971 			control->sinfo_tsn = chk->rec.data.tsn;
972 			chk->rec.data.tsn = tmp;
973 			/* Fix the PPID included */
974 			tmp = control->sinfo_ppid;
975 			control->sinfo_ppid = chk->rec.data.ppid;
976 			chk->rec.data.ppid = tmp;
977 			/* Fix tail pointer */
978 			goto place_chunk;
979 		}
980 		control->first_frag_seen = 1;
981 		control->fsn_included = chk->rec.data.fsn;
982 		control->top_fsn = chk->rec.data.fsn;
983 		control->sinfo_tsn = chk->rec.data.tsn;
984 		control->sinfo_ppid = chk->rec.data.ppid;
985 		control->data = chk->data;
986 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
987 		chk->data = NULL;
988 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
989 		sctp_setup_tail_pointer(control);
990 		return;
991 	}
992 place_chunk:
993 	inserted = 0;
994 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
995 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
996 			/*
997 			 * This one in queue is bigger than the new one,
998 			 * insert the new one before at.
999 			 */
1000 			asoc->size_on_reasm_queue += chk->send_size;
1001 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1002 			inserted = 1;
1003 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1004 			break;
1005 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1006 			/*
1007 			 * They sent a duplicate fsn number. This really
1008 			 * should not happen since the FSN is a TSN and it
1009 			 * should have been dropped earlier.
1010 			 */
1011 			sctp_abort_in_reasm(stcb, control, chk,
1012 			    abort_flag,
1013 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1014 			return;
1015 		}
1016 	}
1017 	if (inserted == 0) {
1018 		/* Its at the end */
1019 		asoc->size_on_reasm_queue += chk->send_size;
1020 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1021 		control->top_fsn = chk->rec.data.fsn;
1022 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1023 	}
1024 }
1025 
1026 static int
1027 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1028     struct sctp_stream_in *strm, int inp_read_lock_held)
1029 {
1030 	/*
1031 	 * Given a stream, strm, see if any of the SSN's on it that are
1032 	 * fragmented are ready to deliver. If so go ahead and place them on
1033 	 * the read queue. In so placing if we have hit the end, then we
1034 	 * need to remove them from the stream's queue.
1035 	 */
1036 	struct sctp_queued_to_read *control, *nctl = NULL;
1037 	uint32_t next_to_del;
1038 	uint32_t pd_point;
1039 	int ret = 0;
1040 
1041 	if (stcb->sctp_socket) {
1042 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1043 		    stcb->sctp_ep->partial_delivery_point);
1044 	} else {
1045 		pd_point = stcb->sctp_ep->partial_delivery_point;
1046 	}
1047 	control = TAILQ_FIRST(&strm->uno_inqueue);
1048 
1049 	if ((control != NULL) &&
1050 	    (asoc->idata_supported == 0)) {
1051 		/* Special handling needed for "old" data format */
1052 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1053 			goto done_un;
1054 		}
1055 	}
1056 	if (strm->pd_api_started) {
1057 		/* Can't add more */
1058 		return (0);
1059 	}
1060 	while (control) {
1061 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1062 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1063 		nctl = TAILQ_NEXT(control, next_instrm);
1064 		if (control->end_added) {
1065 			/* We just put the last bit on */
1066 			if (control->on_strm_q) {
1067 #ifdef INVARIANTS
1068 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1069 					panic("Huh control: %p on_q: %d -- not unordered?",
1070 					    control, control->on_strm_q);
1071 				}
1072 #endif
1073 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1074 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1075 				control->on_strm_q = 0;
1076 			}
1077 			if (control->on_read_q == 0) {
1078 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1079 				    control,
1080 				    &stcb->sctp_socket->so_rcv, control->end_added,
1081 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1082 			}
1083 		} else {
1084 			/* Can we do a PD-API for this un-ordered guy? */
1085 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1086 				strm->pd_api_started = 1;
1087 				control->pdapi_started = 1;
1088 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1089 				    control,
1090 				    &stcb->sctp_socket->so_rcv, control->end_added,
1091 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1092 
1093 				break;
1094 			}
1095 		}
1096 		control = nctl;
1097 	}
1098 done_un:
1099 	control = TAILQ_FIRST(&strm->inqueue);
1100 	if (strm->pd_api_started) {
1101 		/* Can't add more */
1102 		return (0);
1103 	}
1104 	if (control == NULL) {
1105 		return (ret);
1106 	}
1107 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1108 		/*
1109 		 * Ok the guy at the top was being partially delivered
1110 		 * completed, so we remove it. Note the pd_api flag was
1111 		 * taken off when the chunk was merged on in
1112 		 * sctp_queue_data_for_reasm below.
1113 		 */
1114 		nctl = TAILQ_NEXT(control, next_instrm);
1115 		SCTPDBG(SCTP_DEBUG_XXX,
1116 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1117 		    control, control->end_added, control->mid,
1118 		    control->top_fsn, control->fsn_included,
1119 		    strm->last_mid_delivered);
1120 		if (control->end_added) {
1121 			if (control->on_strm_q) {
1122 #ifdef INVARIANTS
1123 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1124 					panic("Huh control: %p on_q: %d -- not ordered?",
1125 					    control, control->on_strm_q);
1126 				}
1127 #endif
1128 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1129 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1130 				control->on_strm_q = 0;
1131 			}
1132 			if (strm->pd_api_started && control->pdapi_started) {
1133 				control->pdapi_started = 0;
1134 				strm->pd_api_started = 0;
1135 			}
1136 			if (control->on_read_q == 0) {
1137 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1138 				    control,
1139 				    &stcb->sctp_socket->so_rcv, control->end_added,
1140 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1141 			}
1142 			control = nctl;
1143 		}
1144 	}
1145 	if (strm->pd_api_started) {
1146 		/*
1147 		 * Can't add more must have gotten an un-ordered above being
1148 		 * partially delivered.
1149 		 */
1150 		return (0);
1151 	}
1152 deliver_more:
1153 	next_to_del = strm->last_mid_delivered + 1;
1154 	if (control) {
1155 		SCTPDBG(SCTP_DEBUG_XXX,
1156 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1157 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1158 		    next_to_del);
1159 		nctl = TAILQ_NEXT(control, next_instrm);
1160 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1161 		    (control->first_frag_seen)) {
1162 			int done;
1163 
1164 			/* Ok we can deliver it onto the stream. */
1165 			if (control->end_added) {
1166 				/* We are done with it afterwards */
1167 				if (control->on_strm_q) {
1168 #ifdef INVARIANTS
1169 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1170 						panic("Huh control: %p on_q: %d -- not ordered?",
1171 						    control, control->on_strm_q);
1172 					}
1173 #endif
1174 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1175 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1176 					control->on_strm_q = 0;
1177 				}
1178 				ret++;
1179 			}
1180 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1181 				/*
1182 				 * A singleton now slipping through - mark
1183 				 * it non-revokable too
1184 				 */
1185 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1186 			} else if (control->end_added == 0) {
1187 				/*
1188 				 * Check if we can defer adding until its
1189 				 * all there
1190 				 */
1191 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1192 					/*
1193 					 * Don't need it or cannot add more
1194 					 * (one being delivered that way)
1195 					 */
1196 					goto out;
1197 				}
1198 			}
1199 			done = (control->end_added) && (control->last_frag_seen);
1200 			if (control->on_read_q == 0) {
1201 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1202 				    control,
1203 				    &stcb->sctp_socket->so_rcv, control->end_added,
1204 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1205 			}
1206 			strm->last_mid_delivered = next_to_del;
1207 			if (done) {
1208 				control = nctl;
1209 				goto deliver_more;
1210 			} else {
1211 				/* We are now doing PD API */
1212 				strm->pd_api_started = 1;
1213 				control->pdapi_started = 1;
1214 			}
1215 		}
1216 	}
1217 out:
1218 	return (ret);
1219 }
1220 
1221 
1222 void
1223 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1224     struct sctp_stream_in *strm,
1225     struct sctp_tcb *stcb, struct sctp_association *asoc,
1226     struct sctp_tmit_chunk *chk, int hold_rlock)
1227 {
1228 	/*
1229 	 * Given a control and a chunk, merge the data from the chk onto the
1230 	 * control and free up the chunk resources.
1231 	 */
1232 	int i_locked = 0;
1233 
1234 	if (control->on_read_q && (hold_rlock == 0)) {
1235 		/*
1236 		 * Its being pd-api'd so we must do some locks.
1237 		 */
1238 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1239 		i_locked = 1;
1240 	}
1241 	if (control->data == NULL) {
1242 		control->data = chk->data;
1243 		sctp_setup_tail_pointer(control);
1244 	} else {
1245 		sctp_add_to_tail_pointer(control, chk->data);
1246 	}
1247 	control->fsn_included = chk->rec.data.fsn;
1248 	asoc->size_on_reasm_queue -= chk->send_size;
1249 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1250 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1251 	chk->data = NULL;
1252 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1253 		control->first_frag_seen = 1;
1254 		control->sinfo_tsn = chk->rec.data.tsn;
1255 		control->sinfo_ppid = chk->rec.data.ppid;
1256 	}
1257 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1258 		/* Its complete */
1259 		if ((control->on_strm_q) && (control->on_read_q)) {
1260 			if (control->pdapi_started) {
1261 				control->pdapi_started = 0;
1262 				strm->pd_api_started = 0;
1263 			}
1264 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1265 				/* Unordered */
1266 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1267 				control->on_strm_q = 0;
1268 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1269 				/* Ordered */
1270 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1271 				control->on_strm_q = 0;
1272 #ifdef INVARIANTS
1273 			} else if (control->on_strm_q) {
1274 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1275 				    control->on_strm_q);
1276 #endif
1277 			}
1278 		}
1279 		control->end_added = 1;
1280 		control->last_frag_seen = 1;
1281 	}
1282 	if (i_locked) {
1283 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1284 	}
1285 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1286 }
1287 
1288 /*
1289  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1290  * queue, see if anthing can be delivered. If so pull it off (or as much as
1291  * we can. If we run out of space then we must dump what we can and set the
1292  * appropriate flag to say we queued what we could.
1293  */
1294 static void
1295 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1296     struct sctp_queued_to_read *control,
1297     struct sctp_tmit_chunk *chk,
1298     int created_control,
1299     int *abort_flag, uint32_t tsn)
1300 {
1301 	uint32_t next_fsn;
1302 	struct sctp_tmit_chunk *at, *nat;
1303 	struct sctp_stream_in *strm;
1304 	int do_wakeup, unordered;
1305 
1306 	strm = &asoc->strmin[control->sinfo_stream];
1307 	/*
1308 	 * For old un-ordered data chunks.
1309 	 */
1310 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1311 		unordered = 1;
1312 	} else {
1313 		unordered = 0;
1314 	}
1315 	/* Must be added to the stream-in queue */
1316 	if (created_control) {
1317 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1318 			/* Duplicate SSN? */
1319 			sctp_clean_up_control(stcb, control);
1320 			sctp_abort_in_reasm(stcb, control, chk,
1321 			    abort_flag,
1322 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1323 			return;
1324 		}
1325 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1326 			/*
1327 			 * Ok we created this control and now lets validate
1328 			 * that its legal i.e. there is a B bit set, if not
1329 			 * and we have up to the cum-ack then its invalid.
1330 			 */
1331 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1332 				sctp_abort_in_reasm(stcb, control, chk,
1333 				    abort_flag,
1334 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1335 				return;
1336 			}
1337 		}
1338 	}
1339 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1340 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1341 		return;
1342 	}
1343 	/*
1344 	 * Ok we must queue the chunk into the reasembly portion: o if its
1345 	 * the first it goes to the control mbuf. o if its not first but the
1346 	 * next in sequence it goes to the control, and each succeeding one
1347 	 * in order also goes. o if its not in order we place it on the list
1348 	 * in its place.
1349 	 */
1350 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1351 		/* Its the very first one. */
1352 		SCTPDBG(SCTP_DEBUG_XXX,
1353 		    "chunk is a first fsn: %u becomes fsn_included\n",
1354 		    chk->rec.data.fsn);
1355 		if (control->first_frag_seen) {
1356 			/*
1357 			 * Error on senders part, they either sent us two
1358 			 * data chunks with FIRST, or they sent two
1359 			 * un-ordered chunks that were fragmented at the
1360 			 * same time in the same stream.
1361 			 */
1362 			sctp_abort_in_reasm(stcb, control, chk,
1363 			    abort_flag,
1364 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1365 			return;
1366 		}
1367 		control->first_frag_seen = 1;
1368 		control->sinfo_ppid = chk->rec.data.ppid;
1369 		control->sinfo_tsn = chk->rec.data.tsn;
1370 		control->fsn_included = chk->rec.data.fsn;
1371 		control->data = chk->data;
1372 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1373 		chk->data = NULL;
1374 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 		sctp_setup_tail_pointer(control);
1376 	} else {
1377 		/* Place the chunk in our list */
1378 		int inserted = 0;
1379 
1380 		if (control->last_frag_seen == 0) {
1381 			/* Still willing to raise highest FSN seen */
1382 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1383 				SCTPDBG(SCTP_DEBUG_XXX,
1384 				    "We have a new top_fsn: %u\n",
1385 				    chk->rec.data.fsn);
1386 				control->top_fsn = chk->rec.data.fsn;
1387 			}
1388 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1389 				SCTPDBG(SCTP_DEBUG_XXX,
1390 				    "The last fsn is now in place fsn: %u\n",
1391 				    chk->rec.data.fsn);
1392 				control->last_frag_seen = 1;
1393 			}
1394 			if (asoc->idata_supported || control->first_frag_seen) {
1395 				/*
1396 				 * For IDATA we always check since we know
1397 				 * that the first fragment is 0. For old
1398 				 * DATA we have to receive the first before
1399 				 * we know the first FSN (which is the TSN).
1400 				 */
1401 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1402 					/*
1403 					 * We have already delivered up to
1404 					 * this so its a dup
1405 					 */
1406 					sctp_abort_in_reasm(stcb, control, chk,
1407 					    abort_flag,
1408 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1409 					return;
1410 				}
1411 			}
1412 		} else {
1413 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1414 				/* Second last? huh? */
1415 				SCTPDBG(SCTP_DEBUG_XXX,
1416 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1417 				    chk->rec.data.fsn, control->top_fsn);
1418 				sctp_abort_in_reasm(stcb, control,
1419 				    chk, abort_flag,
1420 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1421 				return;
1422 			}
1423 			if (asoc->idata_supported || control->first_frag_seen) {
1424 				/*
1425 				 * For IDATA we always check since we know
1426 				 * that the first fragment is 0. For old
1427 				 * DATA we have to receive the first before
1428 				 * we know the first FSN (which is the TSN).
1429 				 */
1430 
1431 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1432 					/*
1433 					 * We have already delivered up to
1434 					 * this so its a dup
1435 					 */
1436 					SCTPDBG(SCTP_DEBUG_XXX,
1437 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1438 					    chk->rec.data.fsn, control->fsn_included);
1439 					sctp_abort_in_reasm(stcb, control, chk,
1440 					    abort_flag,
1441 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1442 					return;
1443 				}
1444 			}
1445 			/*
1446 			 * validate not beyond top FSN if we have seen last
1447 			 * one
1448 			 */
1449 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1450 				SCTPDBG(SCTP_DEBUG_XXX,
1451 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1452 				    chk->rec.data.fsn,
1453 				    control->top_fsn);
1454 				sctp_abort_in_reasm(stcb, control, chk,
1455 				    abort_flag,
1456 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1457 				return;
1458 			}
1459 		}
1460 		/*
1461 		 * If we reach here, we need to place the new chunk in the
1462 		 * reassembly for this control.
1463 		 */
1464 		SCTPDBG(SCTP_DEBUG_XXX,
1465 		    "chunk is a not first fsn: %u needs to be inserted\n",
1466 		    chk->rec.data.fsn);
1467 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1468 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1469 				/*
1470 				 * This one in queue is bigger than the new
1471 				 * one, insert the new one before at.
1472 				 */
1473 				SCTPDBG(SCTP_DEBUG_XXX,
1474 				    "Insert it before fsn: %u\n",
1475 				    at->rec.data.fsn);
1476 				asoc->size_on_reasm_queue += chk->send_size;
1477 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1478 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1479 				inserted = 1;
1480 				break;
1481 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1482 				/*
1483 				 * Gak, He sent me a duplicate str seq
1484 				 * number
1485 				 */
1486 				/*
1487 				 * foo bar, I guess I will just free this
1488 				 * new guy, should we abort too? FIX ME
1489 				 * MAYBE? Or it COULD be that the SSN's have
1490 				 * wrapped. Maybe I should compare to TSN
1491 				 * somehow... sigh for now just blow away
1492 				 * the chunk!
1493 				 */
1494 				SCTPDBG(SCTP_DEBUG_XXX,
1495 				    "Duplicate to fsn: %u -- abort\n",
1496 				    at->rec.data.fsn);
1497 				sctp_abort_in_reasm(stcb, control,
1498 				    chk, abort_flag,
1499 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1500 				return;
1501 			}
1502 		}
1503 		if (inserted == 0) {
1504 			/* Goes on the end */
1505 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1506 			    chk->rec.data.fsn);
1507 			asoc->size_on_reasm_queue += chk->send_size;
1508 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1509 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1510 		}
1511 	}
1512 	/*
1513 	 * Ok lets see if we can suck any up into the control structure that
1514 	 * are in seq if it makes sense.
1515 	 */
1516 	do_wakeup = 0;
1517 	/*
1518 	 * If the first fragment has not been seen there is no sense in
1519 	 * looking.
1520 	 */
1521 	if (control->first_frag_seen) {
1522 		next_fsn = control->fsn_included + 1;
1523 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1524 			if (at->rec.data.fsn == next_fsn) {
1525 				/* We can add this one now to the control */
1526 				SCTPDBG(SCTP_DEBUG_XXX,
1527 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1528 				    control, at,
1529 				    at->rec.data.fsn,
1530 				    next_fsn, control->fsn_included);
1531 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1532 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1533 				if (control->on_read_q) {
1534 					do_wakeup = 1;
1535 				}
1536 				next_fsn++;
1537 				if (control->end_added && control->pdapi_started) {
1538 					if (strm->pd_api_started) {
1539 						strm->pd_api_started = 0;
1540 						control->pdapi_started = 0;
1541 					}
1542 					if (control->on_read_q == 0) {
1543 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1544 						    control,
1545 						    &stcb->sctp_socket->so_rcv, control->end_added,
1546 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1547 						do_wakeup = 1;
1548 					}
1549 					break;
1550 				}
1551 			} else {
1552 				break;
1553 			}
1554 		}
1555 	}
1556 	if (do_wakeup) {
1557 		/* Need to wakeup the reader */
1558 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1559 	}
1560 }
1561 
1562 static struct sctp_queued_to_read *
1563 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1564 {
1565 	struct sctp_queued_to_read *control;
1566 
1567 	if (ordered) {
1568 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1569 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1570 				break;
1571 			}
1572 		}
1573 	} else {
1574 		if (idata_supported) {
1575 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1576 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1577 					break;
1578 				}
1579 			}
1580 		} else {
1581 			control = TAILQ_FIRST(&strm->uno_inqueue);
1582 		}
1583 	}
1584 	return (control);
1585 }
1586 
1587 static int
1588 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1589     struct mbuf **m, int offset, int chk_length,
1590     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1591     int *break_flag, int last_chunk, uint8_t chk_type)
1592 {
1593 	/* Process a data chunk */
1594 	/* struct sctp_tmit_chunk *chk; */
1595 	struct sctp_tmit_chunk *chk;
1596 	uint32_t tsn, fsn, gap, mid;
1597 	struct mbuf *dmbuf;
1598 	int the_len;
1599 	int need_reasm_check = 0;
1600 	uint16_t sid;
1601 	struct mbuf *op_err;
1602 	char msg[SCTP_DIAG_INFO_LEN];
1603 	struct sctp_queued_to_read *control = NULL;
1604 	uint32_t ppid;
1605 	uint8_t chk_flags;
1606 	struct sctp_stream_reset_list *liste;
1607 	int ordered;
1608 	size_t clen;
1609 	int created_control = 0;
1610 
1611 	if (chk_type == SCTP_IDATA) {
1612 		struct sctp_idata_chunk *chunk, chunk_buf;
1613 
1614 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1615 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1616 		chk_flags = chunk->ch.chunk_flags;
1617 		clen = sizeof(struct sctp_idata_chunk);
1618 		tsn = ntohl(chunk->dp.tsn);
1619 		sid = ntohs(chunk->dp.sid);
1620 		mid = ntohl(chunk->dp.mid);
1621 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1622 			fsn = 0;
1623 			ppid = chunk->dp.ppid_fsn.ppid;
1624 		} else {
1625 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1626 			ppid = 0xffffffff;	/* Use as an invalid value. */
1627 		}
1628 	} else {
1629 		struct sctp_data_chunk *chunk, chunk_buf;
1630 
1631 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1632 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1633 		chk_flags = chunk->ch.chunk_flags;
1634 		clen = sizeof(struct sctp_data_chunk);
1635 		tsn = ntohl(chunk->dp.tsn);
1636 		sid = ntohs(chunk->dp.sid);
1637 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1638 		fsn = tsn;
1639 		ppid = chunk->dp.ppid;
1640 	}
1641 	if ((size_t)chk_length == clen) {
1642 		/*
1643 		 * Need to send an abort since we had a empty data chunk.
1644 		 */
1645 		op_err = sctp_generate_no_user_data_cause(tsn);
1646 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1647 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1648 		*abort_flag = 1;
1649 		return (0);
1650 	}
1651 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1652 		asoc->send_sack = 1;
1653 	}
1654 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1655 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1656 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1657 	}
1658 	if (stcb == NULL) {
1659 		return (0);
1660 	}
1661 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1662 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1663 		/* It is a duplicate */
1664 		SCTP_STAT_INCR(sctps_recvdupdata);
1665 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1666 			/* Record a dup for the next outbound sack */
1667 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1668 			asoc->numduptsns++;
1669 		}
1670 		asoc->send_sack = 1;
1671 		return (0);
1672 	}
1673 	/* Calculate the number of TSN's between the base and this TSN */
1674 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1675 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1676 		/* Can't hold the bit in the mapping at max array, toss it */
1677 		return (0);
1678 	}
1679 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1680 		SCTP_TCB_LOCK_ASSERT(stcb);
1681 		if (sctp_expand_mapping_array(asoc, gap)) {
1682 			/* Can't expand, drop it */
1683 			return (0);
1684 		}
1685 	}
1686 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1687 		*high_tsn = tsn;
1688 	}
1689 	/* See if we have received this one already */
1690 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1691 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1692 		SCTP_STAT_INCR(sctps_recvdupdata);
1693 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1694 			/* Record a dup for the next outbound sack */
1695 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1696 			asoc->numduptsns++;
1697 		}
1698 		asoc->send_sack = 1;
1699 		return (0);
1700 	}
1701 	/*
1702 	 * Check to see about the GONE flag, duplicates would cause a sack
1703 	 * to be sent up above
1704 	 */
1705 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1706 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1707 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1708 		/*
1709 		 * wait a minute, this guy is gone, there is no longer a
1710 		 * receiver. Send peer an ABORT!
1711 		 */
1712 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1713 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1714 		*abort_flag = 1;
1715 		return (0);
1716 	}
1717 	/*
1718 	 * Now before going further we see if there is room. If NOT then we
1719 	 * MAY let one through only IF this TSN is the one we are waiting
1720 	 * for on a partial delivery API.
1721 	 */
1722 
1723 	/* Is the stream valid? */
1724 	if (sid >= asoc->streamincnt) {
1725 		struct sctp_error_invalid_stream *cause;
1726 
1727 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1728 		    0, M_NOWAIT, 1, MT_DATA);
1729 		if (op_err != NULL) {
1730 			/* add some space up front so prepend will work well */
1731 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1732 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1733 			/*
1734 			 * Error causes are just param's and this one has
1735 			 * two back to back phdr, one with the error type
1736 			 * and size, the other with the streamid and a rsvd
1737 			 */
1738 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1739 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1740 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1741 			cause->stream_id = htons(sid);
1742 			cause->reserved = htons(0);
1743 			sctp_queue_op_err(stcb, op_err);
1744 		}
1745 		SCTP_STAT_INCR(sctps_badsid);
1746 		SCTP_TCB_LOCK_ASSERT(stcb);
1747 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1748 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1749 			asoc->highest_tsn_inside_nr_map = tsn;
1750 		}
1751 		if (tsn == (asoc->cumulative_tsn + 1)) {
1752 			/* Update cum-ack */
1753 			asoc->cumulative_tsn = tsn;
1754 		}
1755 		return (0);
1756 	}
1757 	/*
1758 	 * If its a fragmented message, lets see if we can find the control
1759 	 * on the reassembly queues.
1760 	 */
1761 	if ((chk_type == SCTP_IDATA) &&
1762 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1763 	    (fsn == 0)) {
1764 		/*
1765 		 * The first *must* be fsn 0, and other (middle/end) pieces
1766 		 * can *not* be fsn 0. XXX: This can happen in case of a
1767 		 * wrap around. Ignore is for now.
1768 		 */
1769 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1770 		    mid, chk_flags);
1771 		goto err_out;
1772 	}
1773 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1774 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1775 	    chk_flags, control);
1776 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1777 		/* See if we can find the re-assembly entity */
1778 		if (control != NULL) {
1779 			/* We found something, does it belong? */
1780 			if (ordered && (mid != control->mid)) {
1781 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1782 		err_out:
1783 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1784 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1785 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1786 				*abort_flag = 1;
1787 				return (0);
1788 			}
1789 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1790 				/*
1791 				 * We can't have a switched order with an
1792 				 * unordered chunk
1793 				 */
1794 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1795 				    tsn);
1796 				goto err_out;
1797 			}
1798 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1799 				/*
1800 				 * We can't have a switched unordered with a
1801 				 * ordered chunk
1802 				 */
1803 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1804 				    tsn);
1805 				goto err_out;
1806 			}
1807 		}
1808 	} else {
1809 		/*
1810 		 * Its a complete segment. Lets validate we don't have a
1811 		 * re-assembly going on with the same Stream/Seq (for
1812 		 * ordered) or in the same Stream for unordered.
1813 		 */
1814 		if (control != NULL) {
1815 			if (ordered || asoc->idata_supported) {
1816 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1817 				    chk_flags, mid);
1818 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1819 				goto err_out;
1820 			} else {
1821 				if ((tsn == control->fsn_included + 1) &&
1822 				    (control->end_added == 0)) {
1823 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1824 					goto err_out;
1825 				} else {
1826 					control = NULL;
1827 				}
1828 			}
1829 		}
1830 	}
1831 	/* now do the tests */
1832 	if (((asoc->cnt_on_all_streams +
1833 	    asoc->cnt_on_reasm_queue +
1834 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1835 	    (((int)asoc->my_rwnd) <= 0)) {
1836 		/*
1837 		 * When we have NO room in the rwnd we check to make sure
1838 		 * the reader is doing its job...
1839 		 */
1840 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1841 			/* some to read, wake-up */
1842 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1843 			struct socket *so;
1844 
1845 			so = SCTP_INP_SO(stcb->sctp_ep);
1846 			atomic_add_int(&stcb->asoc.refcnt, 1);
1847 			SCTP_TCB_UNLOCK(stcb);
1848 			SCTP_SOCKET_LOCK(so, 1);
1849 			SCTP_TCB_LOCK(stcb);
1850 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1851 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1852 				/* assoc was freed while we were unlocked */
1853 				SCTP_SOCKET_UNLOCK(so, 1);
1854 				return (0);
1855 			}
1856 #endif
1857 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1858 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1859 			SCTP_SOCKET_UNLOCK(so, 1);
1860 #endif
1861 		}
1862 		/* now is it in the mapping array of what we have accepted? */
1863 		if (chk_type == SCTP_DATA) {
1864 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1865 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1866 				/* Nope not in the valid range dump it */
1867 		dump_packet:
1868 				sctp_set_rwnd(stcb, asoc);
1869 				if ((asoc->cnt_on_all_streams +
1870 				    asoc->cnt_on_reasm_queue +
1871 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1872 					SCTP_STAT_INCR(sctps_datadropchklmt);
1873 				} else {
1874 					SCTP_STAT_INCR(sctps_datadroprwnd);
1875 				}
1876 				*break_flag = 1;
1877 				return (0);
1878 			}
1879 		} else {
1880 			if (control == NULL) {
1881 				goto dump_packet;
1882 			}
1883 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1884 				goto dump_packet;
1885 			}
1886 		}
1887 	}
1888 #ifdef SCTP_ASOCLOG_OF_TSNS
1889 	SCTP_TCB_LOCK_ASSERT(stcb);
1890 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1891 		asoc->tsn_in_at = 0;
1892 		asoc->tsn_in_wrapped = 1;
1893 	}
1894 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1895 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1896 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1897 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1898 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1899 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1900 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1901 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1902 	asoc->tsn_in_at++;
1903 #endif
1904 	/*
1905 	 * Before we continue lets validate that we are not being fooled by
1906 	 * an evil attacker. We can only have Nk chunks based on our TSN
1907 	 * spread allowed by the mapping array N * 8 bits, so there is no
1908 	 * way our stream sequence numbers could have wrapped. We of course
1909 	 * only validate the FIRST fragment so the bit must be set.
1910 	 */
1911 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1912 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1913 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1914 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1915 		/* The incoming sseq is behind where we last delivered? */
1916 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1917 		    mid, asoc->strmin[sid].last_mid_delivered);
1918 
1919 		if (asoc->idata_supported) {
1920 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1921 			    asoc->strmin[sid].last_mid_delivered,
1922 			    tsn,
1923 			    sid,
1924 			    mid);
1925 		} else {
1926 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1927 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
1928 			    tsn,
1929 			    sid,
1930 			    (uint16_t)mid);
1931 		}
1932 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1933 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1934 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1935 		*abort_flag = 1;
1936 		return (0);
1937 	}
1938 	if (chk_type == SCTP_IDATA) {
1939 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1940 	} else {
1941 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1942 	}
1943 	if (last_chunk == 0) {
1944 		if (chk_type == SCTP_IDATA) {
1945 			dmbuf = SCTP_M_COPYM(*m,
1946 			    (offset + sizeof(struct sctp_idata_chunk)),
1947 			    the_len, M_NOWAIT);
1948 		} else {
1949 			dmbuf = SCTP_M_COPYM(*m,
1950 			    (offset + sizeof(struct sctp_data_chunk)),
1951 			    the_len, M_NOWAIT);
1952 		}
1953 #ifdef SCTP_MBUF_LOGGING
1954 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1955 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1956 		}
1957 #endif
1958 	} else {
1959 		/* We can steal the last chunk */
1960 		int l_len;
1961 
1962 		dmbuf = *m;
1963 		/* lop off the top part */
1964 		if (chk_type == SCTP_IDATA) {
1965 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1966 		} else {
1967 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1968 		}
1969 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1970 			l_len = SCTP_BUF_LEN(dmbuf);
1971 		} else {
1972 			/*
1973 			 * need to count up the size hopefully does not hit
1974 			 * this to often :-0
1975 			 */
1976 			struct mbuf *lat;
1977 
1978 			l_len = 0;
1979 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1980 				l_len += SCTP_BUF_LEN(lat);
1981 			}
1982 		}
1983 		if (l_len > the_len) {
1984 			/* Trim the end round bytes off  too */
1985 			m_adj(dmbuf, -(l_len - the_len));
1986 		}
1987 	}
1988 	if (dmbuf == NULL) {
1989 		SCTP_STAT_INCR(sctps_nomem);
1990 		return (0);
1991 	}
1992 	/*
1993 	 * Now no matter what, we need a control, get one if we don't have
1994 	 * one (we may have gotten it above when we found the message was
1995 	 * fragmented
1996 	 */
1997 	if (control == NULL) {
1998 		sctp_alloc_a_readq(stcb, control);
1999 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2000 		    ppid,
2001 		    sid,
2002 		    chk_flags,
2003 		    NULL, fsn, mid);
2004 		if (control == NULL) {
2005 			SCTP_STAT_INCR(sctps_nomem);
2006 			return (0);
2007 		}
2008 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2009 			control->data = dmbuf;
2010 			control->tail_mbuf = NULL;
2011 			control->end_added = 1;
2012 			control->last_frag_seen = 1;
2013 			control->first_frag_seen = 1;
2014 			control->fsn_included = fsn;
2015 			control->top_fsn = fsn;
2016 		}
2017 		created_control = 1;
2018 	}
2019 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2020 	    chk_flags, ordered, mid, control);
2021 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2022 	    TAILQ_EMPTY(&asoc->resetHead) &&
2023 	    ((ordered == 0) ||
2024 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2025 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2026 		/* Candidate for express delivery */
2027 		/*
2028 		 * Its not fragmented, No PD-API is up, Nothing in the
2029 		 * delivery queue, Its un-ordered OR ordered and the next to
2030 		 * deliver AND nothing else is stuck on the stream queue,
2031 		 * And there is room for it in the socket buffer. Lets just
2032 		 * stuff it up the buffer....
2033 		 */
2034 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2035 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2036 			asoc->highest_tsn_inside_nr_map = tsn;
2037 		}
2038 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2039 		    control, mid);
2040 
2041 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2042 		    control, &stcb->sctp_socket->so_rcv,
2043 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2044 
2045 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2046 			/* for ordered, bump what we delivered */
2047 			asoc->strmin[sid].last_mid_delivered++;
2048 		}
2049 		SCTP_STAT_INCR(sctps_recvexpress);
2050 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2051 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2052 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2053 		}
2054 		control = NULL;
2055 		goto finish_express_del;
2056 	}
2057 	/* Now will we need a chunk too? */
2058 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2059 		sctp_alloc_a_chunk(stcb, chk);
2060 		if (chk == NULL) {
2061 			/* No memory so we drop the chunk */
2062 			SCTP_STAT_INCR(sctps_nomem);
2063 			if (last_chunk == 0) {
2064 				/* we copied it, free the copy */
2065 				sctp_m_freem(dmbuf);
2066 			}
2067 			return (0);
2068 		}
2069 		chk->rec.data.tsn = tsn;
2070 		chk->no_fr_allowed = 0;
2071 		chk->rec.data.fsn = fsn;
2072 		chk->rec.data.mid = mid;
2073 		chk->rec.data.sid = sid;
2074 		chk->rec.data.ppid = ppid;
2075 		chk->rec.data.context = stcb->asoc.context;
2076 		chk->rec.data.doing_fast_retransmit = 0;
2077 		chk->rec.data.rcv_flags = chk_flags;
2078 		chk->asoc = asoc;
2079 		chk->send_size = the_len;
2080 		chk->whoTo = net;
2081 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2082 		    chk,
2083 		    control, mid);
2084 		atomic_add_int(&net->ref_count, 1);
2085 		chk->data = dmbuf;
2086 	}
2087 	/* Set the appropriate TSN mark */
2088 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2089 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2090 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2091 			asoc->highest_tsn_inside_nr_map = tsn;
2092 		}
2093 	} else {
2094 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2095 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2096 			asoc->highest_tsn_inside_map = tsn;
2097 		}
2098 	}
2099 	/* Now is it complete (i.e. not fragmented)? */
2100 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2101 		/*
2102 		 * Special check for when streams are resetting. We could be
2103 		 * more smart about this and check the actual stream to see
2104 		 * if it is not being reset.. that way we would not create a
2105 		 * HOLB when amongst streams being reset and those not being
2106 		 * reset.
2107 		 *
2108 		 */
2109 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2110 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2111 			/*
2112 			 * yep its past where we need to reset... go ahead
2113 			 * and queue it.
2114 			 */
2115 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2116 				/* first one on */
2117 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2118 			} else {
2119 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2120 				unsigned char inserted = 0;
2121 
2122 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2123 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2124 
2125 						continue;
2126 					} else {
2127 						/* found it */
2128 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2129 						inserted = 1;
2130 						break;
2131 					}
2132 				}
2133 				if (inserted == 0) {
2134 					/*
2135 					 * must be put at end, use prevP
2136 					 * (all setup from loop) to setup
2137 					 * nextP.
2138 					 */
2139 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2140 				}
2141 			}
2142 			goto finish_express_del;
2143 		}
2144 		if (chk_flags & SCTP_DATA_UNORDERED) {
2145 			/* queue directly into socket buffer */
2146 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2147 			    control, mid);
2148 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2149 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2150 			    control,
2151 			    &stcb->sctp_socket->so_rcv, 1,
2152 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2153 
2154 		} else {
2155 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2156 			    mid);
2157 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2158 			if (*abort_flag) {
2159 				if (last_chunk) {
2160 					*m = NULL;
2161 				}
2162 				return (0);
2163 			}
2164 		}
2165 		goto finish_express_del;
2166 	}
2167 	/* If we reach here its a reassembly */
2168 	need_reasm_check = 1;
2169 	SCTPDBG(SCTP_DEBUG_XXX,
2170 	    "Queue data to stream for reasm control: %p MID: %u\n",
2171 	    control, mid);
2172 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2173 	if (*abort_flag) {
2174 		/*
2175 		 * the assoc is now gone and chk was put onto the reasm
2176 		 * queue, which has all been freed.
2177 		 */
2178 		if (last_chunk) {
2179 			*m = NULL;
2180 		}
2181 		return (0);
2182 	}
2183 finish_express_del:
2184 	/* Here we tidy up things */
2185 	if (tsn == (asoc->cumulative_tsn + 1)) {
2186 		/* Update cum-ack */
2187 		asoc->cumulative_tsn = tsn;
2188 	}
2189 	if (last_chunk) {
2190 		*m = NULL;
2191 	}
2192 	if (ordered) {
2193 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2194 	} else {
2195 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2196 	}
2197 	SCTP_STAT_INCR(sctps_recvdata);
2198 	/* Set it present please */
2199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2200 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2201 	}
2202 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2203 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2204 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2205 	}
2206 	if (need_reasm_check) {
2207 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2208 		need_reasm_check = 0;
2209 	}
2210 	/* check the special flag for stream resets */
2211 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2212 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2213 		/*
2214 		 * we have finished working through the backlogged TSN's now
2215 		 * time to reset streams. 1: call reset function. 2: free
2216 		 * pending_reply space 3: distribute any chunks in
2217 		 * pending_reply_queue.
2218 		 */
2219 		struct sctp_queued_to_read *ctl, *nctl;
2220 
2221 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2222 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2223 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2224 		SCTP_FREE(liste, SCTP_M_STRESET);
2225 		/* sa_ignore FREED_MEMORY */
2226 		liste = TAILQ_FIRST(&asoc->resetHead);
2227 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2228 			/* All can be removed */
2229 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2230 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2231 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2232 				if (*abort_flag) {
2233 					return (0);
2234 				}
2235 				if (need_reasm_check) {
2236 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2237 					need_reasm_check = 0;
2238 				}
2239 			}
2240 		} else {
2241 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2242 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2243 					break;
2244 				}
2245 				/*
2246 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2247 				 * process it which is the NOT of
2248 				 * ctl->sinfo_tsn > liste->tsn
2249 				 */
2250 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2251 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
2252 				if (*abort_flag) {
2253 					return (0);
2254 				}
2255 				if (need_reasm_check) {
2256 					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2257 					need_reasm_check = 0;
2258 				}
2259 			}
2260 		}
2261 	}
2262 	return (1);
2263 }
2264 
2265 static const int8_t sctp_map_lookup_tab[256] = {
2266 	0, 1, 0, 2, 0, 1, 0, 3,
2267 	0, 1, 0, 2, 0, 1, 0, 4,
2268 	0, 1, 0, 2, 0, 1, 0, 3,
2269 	0, 1, 0, 2, 0, 1, 0, 5,
2270 	0, 1, 0, 2, 0, 1, 0, 3,
2271 	0, 1, 0, 2, 0, 1, 0, 4,
2272 	0, 1, 0, 2, 0, 1, 0, 3,
2273 	0, 1, 0, 2, 0, 1, 0, 6,
2274 	0, 1, 0, 2, 0, 1, 0, 3,
2275 	0, 1, 0, 2, 0, 1, 0, 4,
2276 	0, 1, 0, 2, 0, 1, 0, 3,
2277 	0, 1, 0, 2, 0, 1, 0, 5,
2278 	0, 1, 0, 2, 0, 1, 0, 3,
2279 	0, 1, 0, 2, 0, 1, 0, 4,
2280 	0, 1, 0, 2, 0, 1, 0, 3,
2281 	0, 1, 0, 2, 0, 1, 0, 7,
2282 	0, 1, 0, 2, 0, 1, 0, 3,
2283 	0, 1, 0, 2, 0, 1, 0, 4,
2284 	0, 1, 0, 2, 0, 1, 0, 3,
2285 	0, 1, 0, 2, 0, 1, 0, 5,
2286 	0, 1, 0, 2, 0, 1, 0, 3,
2287 	0, 1, 0, 2, 0, 1, 0, 4,
2288 	0, 1, 0, 2, 0, 1, 0, 3,
2289 	0, 1, 0, 2, 0, 1, 0, 6,
2290 	0, 1, 0, 2, 0, 1, 0, 3,
2291 	0, 1, 0, 2, 0, 1, 0, 4,
2292 	0, 1, 0, 2, 0, 1, 0, 3,
2293 	0, 1, 0, 2, 0, 1, 0, 5,
2294 	0, 1, 0, 2, 0, 1, 0, 3,
2295 	0, 1, 0, 2, 0, 1, 0, 4,
2296 	0, 1, 0, 2, 0, 1, 0, 3,
2297 	0, 1, 0, 2, 0, 1, 0, 8
2298 };
2299 
2300 
2301 void
2302 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2303 {
2304 	/*
2305 	 * Now we also need to check the mapping array in a couple of ways.
2306 	 * 1) Did we move the cum-ack point?
2307 	 *
2308 	 * When you first glance at this you might think that all entries
2309 	 * that make up the position of the cum-ack would be in the
2310 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2311 	 * deliverable. Thats true with one exception, when its a fragmented
2312 	 * message we may not deliver the data until some threshold (or all
2313 	 * of it) is in place. So we must OR the nr_mapping_array and
2314 	 * mapping_array to get a true picture of the cum-ack.
2315 	 */
2316 	struct sctp_association *asoc;
2317 	int at;
2318 	uint8_t val;
2319 	int slide_from, slide_end, lgap, distance;
2320 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2321 
2322 	asoc = &stcb->asoc;
2323 
2324 	old_cumack = asoc->cumulative_tsn;
2325 	old_base = asoc->mapping_array_base_tsn;
2326 	old_highest = asoc->highest_tsn_inside_map;
2327 	/*
2328 	 * We could probably improve this a small bit by calculating the
2329 	 * offset of the current cum-ack as the starting point.
2330 	 */
2331 	at = 0;
2332 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2333 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2334 		if (val == 0xff) {
2335 			at += 8;
2336 		} else {
2337 			/* there is a 0 bit */
2338 			at += sctp_map_lookup_tab[val];
2339 			break;
2340 		}
2341 	}
2342 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2343 
2344 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2345 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2346 #ifdef INVARIANTS
2347 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2348 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2349 #else
2350 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2351 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2352 		sctp_print_mapping_array(asoc);
2353 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2354 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2355 		}
2356 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2357 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2358 #endif
2359 	}
2360 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2361 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2362 	} else {
2363 		highest_tsn = asoc->highest_tsn_inside_map;
2364 	}
2365 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2366 		/* The complete array was completed by a single FR */
2367 		/* highest becomes the cum-ack */
2368 		int clr;
2369 #ifdef INVARIANTS
2370 		unsigned int i;
2371 #endif
2372 
2373 		/* clear the array */
2374 		clr = ((at + 7) >> 3);
2375 		if (clr > asoc->mapping_array_size) {
2376 			clr = asoc->mapping_array_size;
2377 		}
2378 		memset(asoc->mapping_array, 0, clr);
2379 		memset(asoc->nr_mapping_array, 0, clr);
2380 #ifdef INVARIANTS
2381 		for (i = 0; i < asoc->mapping_array_size; i++) {
2382 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2383 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2384 				sctp_print_mapping_array(asoc);
2385 			}
2386 		}
2387 #endif
2388 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2389 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2390 	} else if (at >= 8) {
2391 		/* we can slide the mapping array down */
2392 		/* slide_from holds where we hit the first NON 0xff byte */
2393 
2394 		/*
2395 		 * now calculate the ceiling of the move using our highest
2396 		 * TSN value
2397 		 */
2398 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2399 		slide_end = (lgap >> 3);
2400 		if (slide_end < slide_from) {
2401 			sctp_print_mapping_array(asoc);
2402 #ifdef INVARIANTS
2403 			panic("impossible slide");
2404 #else
2405 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2406 			    lgap, slide_end, slide_from, at);
2407 			return;
2408 #endif
2409 		}
2410 		if (slide_end > asoc->mapping_array_size) {
2411 #ifdef INVARIANTS
2412 			panic("would overrun buffer");
2413 #else
2414 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2415 			    asoc->mapping_array_size, slide_end);
2416 			slide_end = asoc->mapping_array_size;
2417 #endif
2418 		}
2419 		distance = (slide_end - slide_from) + 1;
2420 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2421 			sctp_log_map(old_base, old_cumack, old_highest,
2422 			    SCTP_MAP_PREPARE_SLIDE);
2423 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2424 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2425 		}
2426 		if (distance + slide_from > asoc->mapping_array_size ||
2427 		    distance < 0) {
2428 			/*
2429 			 * Here we do NOT slide forward the array so that
2430 			 * hopefully when more data comes in to fill it up
2431 			 * we will be able to slide it forward. Really I
2432 			 * don't think this should happen :-0
2433 			 */
2434 
2435 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2436 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2437 				    (uint32_t)asoc->mapping_array_size,
2438 				    SCTP_MAP_SLIDE_NONE);
2439 			}
2440 		} else {
2441 			int ii;
2442 
2443 			for (ii = 0; ii < distance; ii++) {
2444 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2445 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2446 
2447 			}
2448 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2449 				asoc->mapping_array[ii] = 0;
2450 				asoc->nr_mapping_array[ii] = 0;
2451 			}
2452 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2453 				asoc->highest_tsn_inside_map += (slide_from << 3);
2454 			}
2455 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2456 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2457 			}
2458 			asoc->mapping_array_base_tsn += (slide_from << 3);
2459 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2460 				sctp_log_map(asoc->mapping_array_base_tsn,
2461 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2462 				    SCTP_MAP_SLIDE_RESULT);
2463 			}
2464 		}
2465 	}
2466 }
2467 
2468 void
2469 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2470 {
2471 	struct sctp_association *asoc;
2472 	uint32_t highest_tsn;
2473 	int is_a_gap;
2474 
2475 	sctp_slide_mapping_arrays(stcb);
2476 	asoc = &stcb->asoc;
2477 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2478 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2479 	} else {
2480 		highest_tsn = asoc->highest_tsn_inside_map;
2481 	}
2482 	/* Is there a gap now? */
2483 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2484 
2485 	/*
2486 	 * Now we need to see if we need to queue a sack or just start the
2487 	 * timer (if allowed).
2488 	 */
2489 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2490 		/*
2491 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2492 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2493 		 * SACK
2494 		 */
2495 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2496 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2497 			    stcb->sctp_ep, stcb, NULL,
2498 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2499 		}
2500 		sctp_send_shutdown(stcb,
2501 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2502 		if (is_a_gap) {
2503 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2504 		}
2505 	} else {
2506 		/*
2507 		 * CMT DAC algorithm: increase number of packets received
2508 		 * since last ack
2509 		 */
2510 		stcb->asoc.cmt_dac_pkts_rcvd++;
2511 
2512 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2513 							 * SACK */
2514 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2515 							 * longer is one */
2516 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2517 		    (is_a_gap) ||	/* is still a gap */
2518 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2519 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2520 		    ) {
2521 
2522 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2523 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2524 			    (stcb->asoc.send_sack == 0) &&
2525 			    (stcb->asoc.numduptsns == 0) &&
2526 			    (stcb->asoc.delayed_ack) &&
2527 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2528 
2529 				/*
2530 				 * CMT DAC algorithm: With CMT, delay acks
2531 				 * even in the face of
2532 				 *
2533 				 * reordering. Therefore, if acks that do
2534 				 * not have to be sent because of the above
2535 				 * reasons, will be delayed. That is, acks
2536 				 * that would have been sent due to gap
2537 				 * reports will be delayed with DAC. Start
2538 				 * the delayed ack timer.
2539 				 */
2540 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2541 				    stcb->sctp_ep, stcb, NULL);
2542 			} else {
2543 				/*
2544 				 * Ok we must build a SACK since the timer
2545 				 * is pending, we got our first packet OR
2546 				 * there are gaps or duplicates.
2547 				 */
2548 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2549 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2550 			}
2551 		} else {
2552 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2553 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2554 				    stcb->sctp_ep, stcb, NULL);
2555 			}
2556 		}
2557 	}
2558 }
2559 
2560 int
2561 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2562     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2563     struct sctp_nets *net, uint32_t *high_tsn)
2564 {
2565 	struct sctp_chunkhdr *ch, chunk_buf;
2566 	struct sctp_association *asoc;
2567 	int num_chunks = 0;	/* number of control chunks processed */
2568 	int stop_proc = 0;
2569 	int chk_length, break_flag, last_chunk;
2570 	int abort_flag = 0, was_a_gap;
2571 	struct mbuf *m;
2572 	uint32_t highest_tsn;
2573 
2574 	/* set the rwnd */
2575 	sctp_set_rwnd(stcb, &stcb->asoc);
2576 
2577 	m = *mm;
2578 	SCTP_TCB_LOCK_ASSERT(stcb);
2579 	asoc = &stcb->asoc;
2580 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2581 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2582 	} else {
2583 		highest_tsn = asoc->highest_tsn_inside_map;
2584 	}
2585 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2586 	/*
2587 	 * setup where we got the last DATA packet from for any SACK that
2588 	 * may need to go out. Don't bump the net. This is done ONLY when a
2589 	 * chunk is assigned.
2590 	 */
2591 	asoc->last_data_chunk_from = net;
2592 
2593 	/*-
2594 	 * Now before we proceed we must figure out if this is a wasted
2595 	 * cluster... i.e. it is a small packet sent in and yet the driver
2596 	 * underneath allocated a full cluster for it. If so we must copy it
2597 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2598 	 * with cluster starvation. Note for __Panda__ we don't do this
2599 	 * since it has clusters all the way down to 64 bytes.
2600 	 */
2601 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2602 		/* we only handle mbufs that are singletons.. not chains */
2603 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2604 		if (m) {
2605 			/* ok lets see if we can copy the data up */
2606 			caddr_t *from, *to;
2607 
2608 			/* get the pointers and copy */
2609 			to = mtod(m, caddr_t *);
2610 			from = mtod((*mm), caddr_t *);
2611 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2612 			/* copy the length and free up the old */
2613 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2614 			sctp_m_freem(*mm);
2615 			/* success, back copy */
2616 			*mm = m;
2617 		} else {
2618 			/* We are in trouble in the mbuf world .. yikes */
2619 			m = *mm;
2620 		}
2621 	}
2622 	/* get pointer to the first chunk header */
2623 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2624 	    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2625 	if (ch == NULL) {
2626 		return (1);
2627 	}
2628 	/*
2629 	 * process all DATA chunks...
2630 	 */
2631 	*high_tsn = asoc->cumulative_tsn;
2632 	break_flag = 0;
2633 	asoc->data_pkts_seen++;
2634 	while (stop_proc == 0) {
2635 		/* validate chunk length */
2636 		chk_length = ntohs(ch->chunk_length);
2637 		if (length - *offset < chk_length) {
2638 			/* all done, mutulated chunk */
2639 			stop_proc = 1;
2640 			continue;
2641 		}
2642 		if ((asoc->idata_supported == 1) &&
2643 		    (ch->chunk_type == SCTP_DATA)) {
2644 			struct mbuf *op_err;
2645 			char msg[SCTP_DIAG_INFO_LEN];
2646 
2647 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2648 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2649 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2650 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2651 			return (2);
2652 		}
2653 		if ((asoc->idata_supported == 0) &&
2654 		    (ch->chunk_type == SCTP_IDATA)) {
2655 			struct mbuf *op_err;
2656 			char msg[SCTP_DIAG_INFO_LEN];
2657 
2658 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2659 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2660 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2661 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2662 			return (2);
2663 		}
2664 		if ((ch->chunk_type == SCTP_DATA) ||
2665 		    (ch->chunk_type == SCTP_IDATA)) {
2666 			int clen;
2667 
2668 			if (ch->chunk_type == SCTP_DATA) {
2669 				clen = sizeof(struct sctp_data_chunk);
2670 			} else {
2671 				clen = sizeof(struct sctp_idata_chunk);
2672 			}
2673 			if (chk_length < clen) {
2674 				/*
2675 				 * Need to send an abort since we had a
2676 				 * invalid data chunk.
2677 				 */
2678 				struct mbuf *op_err;
2679 				char msg[SCTP_DIAG_INFO_LEN];
2680 
2681 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2682 				    chk_length);
2683 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2684 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2685 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2686 				return (2);
2687 			}
2688 #ifdef SCTP_AUDITING_ENABLED
2689 			sctp_audit_log(0xB1, 0);
2690 #endif
2691 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2692 				last_chunk = 1;
2693 			} else {
2694 				last_chunk = 0;
2695 			}
2696 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2697 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2698 			    last_chunk, ch->chunk_type)) {
2699 				num_chunks++;
2700 			}
2701 			if (abort_flag)
2702 				return (2);
2703 
2704 			if (break_flag) {
2705 				/*
2706 				 * Set because of out of rwnd space and no
2707 				 * drop rep space left.
2708 				 */
2709 				stop_proc = 1;
2710 				continue;
2711 			}
2712 		} else {
2713 			/* not a data chunk in the data region */
2714 			switch (ch->chunk_type) {
2715 			case SCTP_INITIATION:
2716 			case SCTP_INITIATION_ACK:
2717 			case SCTP_SELECTIVE_ACK:
2718 			case SCTP_NR_SELECTIVE_ACK:
2719 			case SCTP_HEARTBEAT_REQUEST:
2720 			case SCTP_HEARTBEAT_ACK:
2721 			case SCTP_ABORT_ASSOCIATION:
2722 			case SCTP_SHUTDOWN:
2723 			case SCTP_SHUTDOWN_ACK:
2724 			case SCTP_OPERATION_ERROR:
2725 			case SCTP_COOKIE_ECHO:
2726 			case SCTP_COOKIE_ACK:
2727 			case SCTP_ECN_ECHO:
2728 			case SCTP_ECN_CWR:
2729 			case SCTP_SHUTDOWN_COMPLETE:
2730 			case SCTP_AUTHENTICATION:
2731 			case SCTP_ASCONF_ACK:
2732 			case SCTP_PACKET_DROPPED:
2733 			case SCTP_STREAM_RESET:
2734 			case SCTP_FORWARD_CUM_TSN:
2735 			case SCTP_ASCONF:
2736 				{
2737 					/*
2738 					 * Now, what do we do with KNOWN
2739 					 * chunks that are NOT in the right
2740 					 * place?
2741 					 *
2742 					 * For now, I do nothing but ignore
2743 					 * them. We may later want to add
2744 					 * sysctl stuff to switch out and do
2745 					 * either an ABORT() or possibly
2746 					 * process them.
2747 					 */
2748 					struct mbuf *op_err;
2749 					char msg[SCTP_DIAG_INFO_LEN];
2750 
2751 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2752 					    ch->chunk_type);
2753 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2754 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2755 					return (2);
2756 				}
2757 			default:
2758 				/* unknown chunk type, use bit rules */
2759 				if (ch->chunk_type & 0x40) {
2760 					/* Add a error report to the queue */
2761 					struct mbuf *op_err;
2762 					struct sctp_gen_error_cause *cause;
2763 
2764 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2765 					    0, M_NOWAIT, 1, MT_DATA);
2766 					if (op_err != NULL) {
2767 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2768 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2769 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2770 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2771 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2772 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2773 							sctp_queue_op_err(stcb, op_err);
2774 						} else {
2775 							sctp_m_freem(op_err);
2776 						}
2777 					}
2778 				}
2779 				if ((ch->chunk_type & 0x80) == 0) {
2780 					/* discard the rest of this packet */
2781 					stop_proc = 1;
2782 				}	/* else skip this bad chunk and
2783 					 * continue... */
2784 				break;
2785 			}	/* switch of chunk type */
2786 		}
2787 		*offset += SCTP_SIZE32(chk_length);
2788 		if ((*offset >= length) || stop_proc) {
2789 			/* no more data left in the mbuf chain */
2790 			stop_proc = 1;
2791 			continue;
2792 		}
2793 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2794 		    sizeof(struct sctp_chunkhdr), (uint8_t *)&chunk_buf);
2795 		if (ch == NULL) {
2796 			*offset = length;
2797 			stop_proc = 1;
2798 			continue;
2799 		}
2800 	}
2801 	if (break_flag) {
2802 		/*
2803 		 * we need to report rwnd overrun drops.
2804 		 */
2805 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2806 	}
2807 	if (num_chunks) {
2808 		/*
2809 		 * Did we get data, if so update the time for auto-close and
2810 		 * give peer credit for being alive.
2811 		 */
2812 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2813 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2814 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2815 			    stcb->asoc.overall_error_count,
2816 			    0,
2817 			    SCTP_FROM_SCTP_INDATA,
2818 			    __LINE__);
2819 		}
2820 		stcb->asoc.overall_error_count = 0;
2821 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2822 	}
2823 	/* now service all of the reassm queue if needed */
2824 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2825 		/* Assure that we ack right away */
2826 		stcb->asoc.send_sack = 1;
2827 	}
2828 	/* Start a sack timer or QUEUE a SACK for sending */
2829 	sctp_sack_check(stcb, was_a_gap);
2830 	return (0);
2831 }
2832 
2833 static int
2834 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2835     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2836     int *num_frs,
2837     uint32_t *biggest_newly_acked_tsn,
2838     uint32_t *this_sack_lowest_newack,
2839     int *rto_ok)
2840 {
2841 	struct sctp_tmit_chunk *tp1;
2842 	unsigned int theTSN;
2843 	int j, wake_him = 0, circled = 0;
2844 
2845 	/* Recover the tp1 we last saw */
2846 	tp1 = *p_tp1;
2847 	if (tp1 == NULL) {
2848 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2849 	}
2850 	for (j = frag_strt; j <= frag_end; j++) {
2851 		theTSN = j + last_tsn;
2852 		while (tp1) {
2853 			if (tp1->rec.data.doing_fast_retransmit)
2854 				(*num_frs) += 1;
2855 
2856 			/*-
2857 			 * CMT: CUCv2 algorithm. For each TSN being
2858 			 * processed from the sent queue, track the
2859 			 * next expected pseudo-cumack, or
2860 			 * rtx_pseudo_cumack, if required. Separate
2861 			 * cumack trackers for first transmissions,
2862 			 * and retransmissions.
2863 			 */
2864 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2865 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2866 			    (tp1->snd_count == 1)) {
2867 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2868 				tp1->whoTo->find_pseudo_cumack = 0;
2869 			}
2870 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2871 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2872 			    (tp1->snd_count > 1)) {
2873 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2874 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2875 			}
2876 			if (tp1->rec.data.tsn == theTSN) {
2877 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2878 					/*-
2879 					 * must be held until
2880 					 * cum-ack passes
2881 					 */
2882 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2883 						/*-
2884 						 * If it is less than RESEND, it is
2885 						 * now no-longer in flight.
2886 						 * Higher values may already be set
2887 						 * via previous Gap Ack Blocks...
2888 						 * i.e. ACKED or RESEND.
2889 						 */
2890 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2891 						    *biggest_newly_acked_tsn)) {
2892 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2893 						}
2894 						/*-
2895 						 * CMT: SFR algo (and HTNA) - set
2896 						 * saw_newack to 1 for dest being
2897 						 * newly acked. update
2898 						 * this_sack_highest_newack if
2899 						 * appropriate.
2900 						 */
2901 						if (tp1->rec.data.chunk_was_revoked == 0)
2902 							tp1->whoTo->saw_newack = 1;
2903 
2904 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2905 						    tp1->whoTo->this_sack_highest_newack)) {
2906 							tp1->whoTo->this_sack_highest_newack =
2907 							    tp1->rec.data.tsn;
2908 						}
2909 						/*-
2910 						 * CMT DAC algo: also update
2911 						 * this_sack_lowest_newack
2912 						 */
2913 						if (*this_sack_lowest_newack == 0) {
2914 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2915 								sctp_log_sack(*this_sack_lowest_newack,
2916 								    last_tsn,
2917 								    tp1->rec.data.tsn,
2918 								    0,
2919 								    0,
2920 								    SCTP_LOG_TSN_ACKED);
2921 							}
2922 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2923 						}
2924 						/*-
2925 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2926 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2927 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2928 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2929 						 * Separate pseudo_cumack trackers for first transmissions and
2930 						 * retransmissions.
2931 						 */
2932 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2933 							if (tp1->rec.data.chunk_was_revoked == 0) {
2934 								tp1->whoTo->new_pseudo_cumack = 1;
2935 							}
2936 							tp1->whoTo->find_pseudo_cumack = 1;
2937 						}
2938 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2939 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2940 						}
2941 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2942 							if (tp1->rec.data.chunk_was_revoked == 0) {
2943 								tp1->whoTo->new_pseudo_cumack = 1;
2944 							}
2945 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2946 						}
2947 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2948 							sctp_log_sack(*biggest_newly_acked_tsn,
2949 							    last_tsn,
2950 							    tp1->rec.data.tsn,
2951 							    frag_strt,
2952 							    frag_end,
2953 							    SCTP_LOG_TSN_ACKED);
2954 						}
2955 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2956 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2957 							    tp1->whoTo->flight_size,
2958 							    tp1->book_size,
2959 							    (uint32_t)(uintptr_t)tp1->whoTo,
2960 							    tp1->rec.data.tsn);
2961 						}
2962 						sctp_flight_size_decrease(tp1);
2963 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2964 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2965 							    tp1);
2966 						}
2967 						sctp_total_flight_decrease(stcb, tp1);
2968 
2969 						tp1->whoTo->net_ack += tp1->send_size;
2970 						if (tp1->snd_count < 2) {
2971 							/*-
2972 							 * True non-retransmited chunk
2973 							 */
2974 							tp1->whoTo->net_ack2 += tp1->send_size;
2975 
2976 							/*-
2977 							 * update RTO too ?
2978 							 */
2979 							if (tp1->do_rtt) {
2980 								if (*rto_ok) {
2981 									tp1->whoTo->RTO =
2982 									    sctp_calculate_rto(stcb,
2983 									    &stcb->asoc,
2984 									    tp1->whoTo,
2985 									    &tp1->sent_rcv_time,
2986 									    sctp_align_safe_nocopy,
2987 									    SCTP_RTT_FROM_DATA);
2988 									*rto_ok = 0;
2989 								}
2990 								if (tp1->whoTo->rto_needed == 0) {
2991 									tp1->whoTo->rto_needed = 1;
2992 								}
2993 								tp1->do_rtt = 0;
2994 							}
2995 						}
2996 					}
2997 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2998 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2999 						    stcb->asoc.this_sack_highest_gap)) {
3000 							stcb->asoc.this_sack_highest_gap =
3001 							    tp1->rec.data.tsn;
3002 						}
3003 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3004 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3005 #ifdef SCTP_AUDITING_ENABLED
3006 							sctp_audit_log(0xB2,
3007 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3008 #endif
3009 						}
3010 					}
3011 					/*-
3012 					 * All chunks NOT UNSENT fall through here and are marked
3013 					 * (leave PR-SCTP ones that are to skip alone though)
3014 					 */
3015 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3016 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3017 						tp1->sent = SCTP_DATAGRAM_MARKED;
3018 					}
3019 					if (tp1->rec.data.chunk_was_revoked) {
3020 						/* deflate the cwnd */
3021 						tp1->whoTo->cwnd -= tp1->book_size;
3022 						tp1->rec.data.chunk_was_revoked = 0;
3023 					}
3024 					/* NR Sack code here */
3025 					if (nr_sacking &&
3026 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3027 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3028 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3029 #ifdef INVARIANTS
3030 						} else {
3031 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3032 #endif
3033 						}
3034 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3035 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3036 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3037 							stcb->asoc.trigger_reset = 1;
3038 						}
3039 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3040 						if (tp1->data) {
3041 							/*
3042 							 * sa_ignore
3043 							 * NO_NULL_CHK
3044 							 */
3045 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3046 							sctp_m_freem(tp1->data);
3047 							tp1->data = NULL;
3048 						}
3049 						wake_him++;
3050 					}
3051 				}
3052 				break;
3053 			}	/* if (tp1->tsn == theTSN) */
3054 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3055 				break;
3056 			}
3057 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3058 			if ((tp1 == NULL) && (circled == 0)) {
3059 				circled++;
3060 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3061 			}
3062 		}		/* end while (tp1) */
3063 		if (tp1 == NULL) {
3064 			circled = 0;
3065 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3066 		}
3067 		/* In case the fragments were not in order we must reset */
3068 	}			/* end for (j = fragStart */
3069 	*p_tp1 = tp1;
3070 	return (wake_him);	/* Return value only used for nr-sack */
3071 }
3072 
3073 
3074 static int
3075 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3076     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3077     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3078     int num_seg, int num_nr_seg, int *rto_ok)
3079 {
3080 	struct sctp_gap_ack_block *frag, block;
3081 	struct sctp_tmit_chunk *tp1;
3082 	int i;
3083 	int num_frs = 0;
3084 	int chunk_freed;
3085 	int non_revocable;
3086 	uint16_t frag_strt, frag_end, prev_frag_end;
3087 
3088 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3089 	prev_frag_end = 0;
3090 	chunk_freed = 0;
3091 
3092 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3093 		if (i == num_seg) {
3094 			prev_frag_end = 0;
3095 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3096 		}
3097 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3098 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3099 		*offset += sizeof(block);
3100 		if (frag == NULL) {
3101 			return (chunk_freed);
3102 		}
3103 		frag_strt = ntohs(frag->start);
3104 		frag_end = ntohs(frag->end);
3105 
3106 		if (frag_strt > frag_end) {
3107 			/* This gap report is malformed, skip it. */
3108 			continue;
3109 		}
3110 		if (frag_strt <= prev_frag_end) {
3111 			/* This gap report is not in order, so restart. */
3112 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3113 		}
3114 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3115 			*biggest_tsn_acked = last_tsn + frag_end;
3116 		}
3117 		if (i < num_seg) {
3118 			non_revocable = 0;
3119 		} else {
3120 			non_revocable = 1;
3121 		}
3122 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3123 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3124 		    this_sack_lowest_newack, rto_ok)) {
3125 			chunk_freed = 1;
3126 		}
3127 		prev_frag_end = frag_end;
3128 	}
3129 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3130 		if (num_frs)
3131 			sctp_log_fr(*biggest_tsn_acked,
3132 			    *biggest_newly_acked_tsn,
3133 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3134 	}
3135 	return (chunk_freed);
3136 }
3137 
3138 static void
3139 sctp_check_for_revoked(struct sctp_tcb *stcb,
3140     struct sctp_association *asoc, uint32_t cumack,
3141     uint32_t biggest_tsn_acked)
3142 {
3143 	struct sctp_tmit_chunk *tp1;
3144 
3145 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3146 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3147 			/*
3148 			 * ok this guy is either ACK or MARKED. If it is
3149 			 * ACKED it has been previously acked but not this
3150 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3151 			 * again.
3152 			 */
3153 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3154 				break;
3155 			}
3156 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3157 				/* it has been revoked */
3158 				tp1->sent = SCTP_DATAGRAM_SENT;
3159 				tp1->rec.data.chunk_was_revoked = 1;
3160 				/*
3161 				 * We must add this stuff back in to assure
3162 				 * timers and such get started.
3163 				 */
3164 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3165 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3166 					    tp1->whoTo->flight_size,
3167 					    tp1->book_size,
3168 					    (uint32_t)(uintptr_t)tp1->whoTo,
3169 					    tp1->rec.data.tsn);
3170 				}
3171 				sctp_flight_size_increase(tp1);
3172 				sctp_total_flight_increase(stcb, tp1);
3173 				/*
3174 				 * We inflate the cwnd to compensate for our
3175 				 * artificial inflation of the flight_size.
3176 				 */
3177 				tp1->whoTo->cwnd += tp1->book_size;
3178 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3179 					sctp_log_sack(asoc->last_acked_seq,
3180 					    cumack,
3181 					    tp1->rec.data.tsn,
3182 					    0,
3183 					    0,
3184 					    SCTP_LOG_TSN_REVOKED);
3185 				}
3186 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3187 				/* it has been re-acked in this SACK */
3188 				tp1->sent = SCTP_DATAGRAM_ACKED;
3189 			}
3190 		}
3191 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3192 			break;
3193 	}
3194 }
3195 
3196 
3197 static void
3198 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3199     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3200 {
3201 	struct sctp_tmit_chunk *tp1;
3202 	int strike_flag = 0;
3203 	struct timeval now;
3204 	int tot_retrans = 0;
3205 	uint32_t sending_seq;
3206 	struct sctp_nets *net;
3207 	int num_dests_sacked = 0;
3208 
3209 	/*
3210 	 * select the sending_seq, this is either the next thing ready to be
3211 	 * sent but not transmitted, OR, the next seq we assign.
3212 	 */
3213 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3214 	if (tp1 == NULL) {
3215 		sending_seq = asoc->sending_seq;
3216 	} else {
3217 		sending_seq = tp1->rec.data.tsn;
3218 	}
3219 
3220 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3221 	if ((asoc->sctp_cmt_on_off > 0) &&
3222 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3223 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3224 			if (net->saw_newack)
3225 				num_dests_sacked++;
3226 		}
3227 	}
3228 	if (stcb->asoc.prsctp_supported) {
3229 		(void)SCTP_GETTIME_TIMEVAL(&now);
3230 	}
3231 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3232 		strike_flag = 0;
3233 		if (tp1->no_fr_allowed) {
3234 			/* this one had a timeout or something */
3235 			continue;
3236 		}
3237 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3238 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3239 				sctp_log_fr(biggest_tsn_newly_acked,
3240 				    tp1->rec.data.tsn,
3241 				    tp1->sent,
3242 				    SCTP_FR_LOG_CHECK_STRIKE);
3243 		}
3244 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3245 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3246 			/* done */
3247 			break;
3248 		}
3249 		if (stcb->asoc.prsctp_supported) {
3250 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3251 				/* Is it expired? */
3252 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3253 					/* Yes so drop it */
3254 					if (tp1->data != NULL) {
3255 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3256 						    SCTP_SO_NOT_LOCKED);
3257 					}
3258 					continue;
3259 				}
3260 			}
3261 		}
3262 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3263 			/* we are beyond the tsn in the sack  */
3264 			break;
3265 		}
3266 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3267 			/* either a RESEND, ACKED, or MARKED */
3268 			/* skip */
3269 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3270 				/* Continue strikin FWD-TSN chunks */
3271 				tp1->rec.data.fwd_tsn_cnt++;
3272 			}
3273 			continue;
3274 		}
3275 		/*
3276 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3277 		 */
3278 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3279 			/*
3280 			 * No new acks were receieved for data sent to this
3281 			 * dest. Therefore, according to the SFR algo for
3282 			 * CMT, no data sent to this dest can be marked for
3283 			 * FR using this SACK.
3284 			 */
3285 			continue;
3286 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3287 		    tp1->whoTo->this_sack_highest_newack)) {
3288 			/*
3289 			 * CMT: New acks were receieved for data sent to
3290 			 * this dest. But no new acks were seen for data
3291 			 * sent after tp1. Therefore, according to the SFR
3292 			 * algo for CMT, tp1 cannot be marked for FR using
3293 			 * this SACK. This step covers part of the DAC algo
3294 			 * and the HTNA algo as well.
3295 			 */
3296 			continue;
3297 		}
3298 		/*
3299 		 * Here we check to see if we were have already done a FR
3300 		 * and if so we see if the biggest TSN we saw in the sack is
3301 		 * smaller than the recovery point. If so we don't strike
3302 		 * the tsn... otherwise we CAN strike the TSN.
3303 		 */
3304 		/*
3305 		 * @@@ JRI: Check for CMT if (accum_moved &&
3306 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3307 		 * 0)) {
3308 		 */
3309 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3310 			/*
3311 			 * Strike the TSN if in fast-recovery and cum-ack
3312 			 * moved.
3313 			 */
3314 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3315 				sctp_log_fr(biggest_tsn_newly_acked,
3316 				    tp1->rec.data.tsn,
3317 				    tp1->sent,
3318 				    SCTP_FR_LOG_STRIKE_CHUNK);
3319 			}
3320 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3321 				tp1->sent++;
3322 			}
3323 			if ((asoc->sctp_cmt_on_off > 0) &&
3324 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3325 				/*
3326 				 * CMT DAC algorithm: If SACK flag is set to
3327 				 * 0, then lowest_newack test will not pass
3328 				 * because it would have been set to the
3329 				 * cumack earlier. If not already to be
3330 				 * rtx'd, If not a mixed sack and if tp1 is
3331 				 * not between two sacked TSNs, then mark by
3332 				 * one more. NOTE that we are marking by one
3333 				 * additional time since the SACK DAC flag
3334 				 * indicates that two packets have been
3335 				 * received after this missing TSN.
3336 				 */
3337 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3338 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3339 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3340 						sctp_log_fr(16 + num_dests_sacked,
3341 						    tp1->rec.data.tsn,
3342 						    tp1->sent,
3343 						    SCTP_FR_LOG_STRIKE_CHUNK);
3344 					}
3345 					tp1->sent++;
3346 				}
3347 			}
3348 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3349 		    (asoc->sctp_cmt_on_off == 0)) {
3350 			/*
3351 			 * For those that have done a FR we must take
3352 			 * special consideration if we strike. I.e the
3353 			 * biggest_newly_acked must be higher than the
3354 			 * sending_seq at the time we did the FR.
3355 			 */
3356 			if (
3357 #ifdef SCTP_FR_TO_ALTERNATE
3358 			/*
3359 			 * If FR's go to new networks, then we must only do
3360 			 * this for singly homed asoc's. However if the FR's
3361 			 * go to the same network (Armando's work) then its
3362 			 * ok to FR multiple times.
3363 			 */
3364 			    (asoc->numnets < 2)
3365 #else
3366 			    (1)
3367 #endif
3368 			    ) {
3369 
3370 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3371 				    tp1->rec.data.fast_retran_tsn)) {
3372 					/*
3373 					 * Strike the TSN, since this ack is
3374 					 * beyond where things were when we
3375 					 * did a FR.
3376 					 */
3377 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3378 						sctp_log_fr(biggest_tsn_newly_acked,
3379 						    tp1->rec.data.tsn,
3380 						    tp1->sent,
3381 						    SCTP_FR_LOG_STRIKE_CHUNK);
3382 					}
3383 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3384 						tp1->sent++;
3385 					}
3386 					strike_flag = 1;
3387 					if ((asoc->sctp_cmt_on_off > 0) &&
3388 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3389 						/*
3390 						 * CMT DAC algorithm: If
3391 						 * SACK flag is set to 0,
3392 						 * then lowest_newack test
3393 						 * will not pass because it
3394 						 * would have been set to
3395 						 * the cumack earlier. If
3396 						 * not already to be rtx'd,
3397 						 * If not a mixed sack and
3398 						 * if tp1 is not between two
3399 						 * sacked TSNs, then mark by
3400 						 * one more. NOTE that we
3401 						 * are marking by one
3402 						 * additional time since the
3403 						 * SACK DAC flag indicates
3404 						 * that two packets have
3405 						 * been received after this
3406 						 * missing TSN.
3407 						 */
3408 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3409 						    (num_dests_sacked == 1) &&
3410 						    SCTP_TSN_GT(this_sack_lowest_newack,
3411 						    tp1->rec.data.tsn)) {
3412 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3413 								sctp_log_fr(32 + num_dests_sacked,
3414 								    tp1->rec.data.tsn,
3415 								    tp1->sent,
3416 								    SCTP_FR_LOG_STRIKE_CHUNK);
3417 							}
3418 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3419 								tp1->sent++;
3420 							}
3421 						}
3422 					}
3423 				}
3424 			}
3425 			/*
3426 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3427 			 * algo covers HTNA.
3428 			 */
3429 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3430 		    biggest_tsn_newly_acked)) {
3431 			/*
3432 			 * We don't strike these: This is the  HTNA
3433 			 * algorithm i.e. we don't strike If our TSN is
3434 			 * larger than the Highest TSN Newly Acked.
3435 			 */
3436 			;
3437 		} else {
3438 			/* Strike the TSN */
3439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3440 				sctp_log_fr(biggest_tsn_newly_acked,
3441 				    tp1->rec.data.tsn,
3442 				    tp1->sent,
3443 				    SCTP_FR_LOG_STRIKE_CHUNK);
3444 			}
3445 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3446 				tp1->sent++;
3447 			}
3448 			if ((asoc->sctp_cmt_on_off > 0) &&
3449 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3450 				/*
3451 				 * CMT DAC algorithm: If SACK flag is set to
3452 				 * 0, then lowest_newack test will not pass
3453 				 * because it would have been set to the
3454 				 * cumack earlier. If not already to be
3455 				 * rtx'd, If not a mixed sack and if tp1 is
3456 				 * not between two sacked TSNs, then mark by
3457 				 * one more. NOTE that we are marking by one
3458 				 * additional time since the SACK DAC flag
3459 				 * indicates that two packets have been
3460 				 * received after this missing TSN.
3461 				 */
3462 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3463 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3464 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3465 						sctp_log_fr(48 + num_dests_sacked,
3466 						    tp1->rec.data.tsn,
3467 						    tp1->sent,
3468 						    SCTP_FR_LOG_STRIKE_CHUNK);
3469 					}
3470 					tp1->sent++;
3471 				}
3472 			}
3473 		}
3474 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3475 			struct sctp_nets *alt;
3476 
3477 			/* fix counts and things */
3478 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3479 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3480 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3481 				    tp1->book_size,
3482 				    (uint32_t)(uintptr_t)tp1->whoTo,
3483 				    tp1->rec.data.tsn);
3484 			}
3485 			if (tp1->whoTo) {
3486 				tp1->whoTo->net_ack++;
3487 				sctp_flight_size_decrease(tp1);
3488 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3489 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3490 					    tp1);
3491 				}
3492 			}
3493 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3494 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3495 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3496 			}
3497 			/* add back to the rwnd */
3498 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3499 
3500 			/* remove from the total flight */
3501 			sctp_total_flight_decrease(stcb, tp1);
3502 
3503 			if ((stcb->asoc.prsctp_supported) &&
3504 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3505 				/*
3506 				 * Has it been retransmitted tv_sec times? -
3507 				 * we store the retran count there.
3508 				 */
3509 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3510 					/* Yes, so drop it */
3511 					if (tp1->data != NULL) {
3512 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3513 						    SCTP_SO_NOT_LOCKED);
3514 					}
3515 					/* Make sure to flag we had a FR */
3516 					tp1->whoTo->net_ack++;
3517 					continue;
3518 				}
3519 			}
3520 			/*
3521 			 * SCTP_PRINTF("OK, we are now ready to FR this
3522 			 * guy\n");
3523 			 */
3524 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3525 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3526 				    0, SCTP_FR_MARKED);
3527 			}
3528 			if (strike_flag) {
3529 				/* This is a subsequent FR */
3530 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3531 			}
3532 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3533 			if (asoc->sctp_cmt_on_off > 0) {
3534 				/*
3535 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3536 				 * If CMT is being used, then pick dest with
3537 				 * largest ssthresh for any retransmission.
3538 				 */
3539 				tp1->no_fr_allowed = 1;
3540 				alt = tp1->whoTo;
3541 				/* sa_ignore NO_NULL_CHK */
3542 				if (asoc->sctp_cmt_pf > 0) {
3543 					/*
3544 					 * JRS 5/18/07 - If CMT PF is on,
3545 					 * use the PF version of
3546 					 * find_alt_net()
3547 					 */
3548 					alt = sctp_find_alternate_net(stcb, alt, 2);
3549 				} else {
3550 					/*
3551 					 * JRS 5/18/07 - If only CMT is on,
3552 					 * use the CMT version of
3553 					 * find_alt_net()
3554 					 */
3555 					/* sa_ignore NO_NULL_CHK */
3556 					alt = sctp_find_alternate_net(stcb, alt, 1);
3557 				}
3558 				if (alt == NULL) {
3559 					alt = tp1->whoTo;
3560 				}
3561 				/*
3562 				 * CUCv2: If a different dest is picked for
3563 				 * the retransmission, then new
3564 				 * (rtx-)pseudo_cumack needs to be tracked
3565 				 * for orig dest. Let CUCv2 track new (rtx-)
3566 				 * pseudo-cumack always.
3567 				 */
3568 				if (tp1->whoTo) {
3569 					tp1->whoTo->find_pseudo_cumack = 1;
3570 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3571 				}
3572 			} else {/* CMT is OFF */
3573 
3574 #ifdef SCTP_FR_TO_ALTERNATE
3575 				/* Can we find an alternate? */
3576 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3577 #else
3578 				/*
3579 				 * default behavior is to NOT retransmit
3580 				 * FR's to an alternate. Armando Caro's
3581 				 * paper details why.
3582 				 */
3583 				alt = tp1->whoTo;
3584 #endif
3585 			}
3586 
3587 			tp1->rec.data.doing_fast_retransmit = 1;
3588 			tot_retrans++;
3589 			/* mark the sending seq for possible subsequent FR's */
3590 			/*
3591 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3592 			 * (uint32_t)tpi->rec.data.tsn);
3593 			 */
3594 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3595 				/*
3596 				 * If the queue of send is empty then its
3597 				 * the next sequence number that will be
3598 				 * assigned so we subtract one from this to
3599 				 * get the one we last sent.
3600 				 */
3601 				tp1->rec.data.fast_retran_tsn = sending_seq;
3602 			} else {
3603 				/*
3604 				 * If there are chunks on the send queue
3605 				 * (unsent data that has made it from the
3606 				 * stream queues but not out the door, we
3607 				 * take the first one (which will have the
3608 				 * lowest TSN) and subtract one to get the
3609 				 * one we last sent.
3610 				 */
3611 				struct sctp_tmit_chunk *ttt;
3612 
3613 				ttt = TAILQ_FIRST(&asoc->send_queue);
3614 				tp1->rec.data.fast_retran_tsn =
3615 				    ttt->rec.data.tsn;
3616 			}
3617 
3618 			if (tp1->do_rtt) {
3619 				/*
3620 				 * this guy had a RTO calculation pending on
3621 				 * it, cancel it
3622 				 */
3623 				if ((tp1->whoTo != NULL) &&
3624 				    (tp1->whoTo->rto_needed == 0)) {
3625 					tp1->whoTo->rto_needed = 1;
3626 				}
3627 				tp1->do_rtt = 0;
3628 			}
3629 			if (alt != tp1->whoTo) {
3630 				/* yes, there is an alternate. */
3631 				sctp_free_remote_addr(tp1->whoTo);
3632 				/* sa_ignore FREED_MEMORY */
3633 				tp1->whoTo = alt;
3634 				atomic_add_int(&alt->ref_count, 1);
3635 			}
3636 		}
3637 	}
3638 }
3639 
3640 struct sctp_tmit_chunk *
3641 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3642     struct sctp_association *asoc)
3643 {
3644 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3645 	struct timeval now;
3646 	int now_filled = 0;
3647 
3648 	if (asoc->prsctp_supported == 0) {
3649 		return (NULL);
3650 	}
3651 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3652 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3653 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3654 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3655 			/* no chance to advance, out of here */
3656 			break;
3657 		}
3658 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3659 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3660 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3661 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3662 				    asoc->advanced_peer_ack_point,
3663 				    tp1->rec.data.tsn, 0, 0);
3664 			}
3665 		}
3666 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3667 			/*
3668 			 * We can't fwd-tsn past any that are reliable aka
3669 			 * retransmitted until the asoc fails.
3670 			 */
3671 			break;
3672 		}
3673 		if (!now_filled) {
3674 			(void)SCTP_GETTIME_TIMEVAL(&now);
3675 			now_filled = 1;
3676 		}
3677 		/*
3678 		 * now we got a chunk which is marked for another
3679 		 * retransmission to a PR-stream but has run out its chances
3680 		 * already maybe OR has been marked to skip now. Can we skip
3681 		 * it if its a resend?
3682 		 */
3683 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3684 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3685 			/*
3686 			 * Now is this one marked for resend and its time is
3687 			 * now up?
3688 			 */
3689 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3690 				/* Yes so drop it */
3691 				if (tp1->data) {
3692 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3693 					    1, SCTP_SO_NOT_LOCKED);
3694 				}
3695 			} else {
3696 				/*
3697 				 * No, we are done when hit one for resend
3698 				 * whos time as not expired.
3699 				 */
3700 				break;
3701 			}
3702 		}
3703 		/*
3704 		 * Ok now if this chunk is marked to drop it we can clean up
3705 		 * the chunk, advance our peer ack point and we can check
3706 		 * the next chunk.
3707 		 */
3708 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3709 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3710 			/* advance PeerAckPoint goes forward */
3711 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3712 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3713 				a_adv = tp1;
3714 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3715 				/* No update but we do save the chk */
3716 				a_adv = tp1;
3717 			}
3718 		} else {
3719 			/*
3720 			 * If it is still in RESEND we can advance no
3721 			 * further
3722 			 */
3723 			break;
3724 		}
3725 	}
3726 	return (a_adv);
3727 }
3728 
3729 static int
3730 sctp_fs_audit(struct sctp_association *asoc)
3731 {
3732 	struct sctp_tmit_chunk *chk;
3733 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3734 	int ret;
3735 #ifndef INVARIANTS
3736 	int entry_flight, entry_cnt;
3737 #endif
3738 
3739 	ret = 0;
3740 #ifndef INVARIANTS
3741 	entry_flight = asoc->total_flight;
3742 	entry_cnt = asoc->total_flight_count;
3743 #endif
3744 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3745 		return (0);
3746 
3747 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3748 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3749 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3750 			    chk->rec.data.tsn,
3751 			    chk->send_size,
3752 			    chk->snd_count);
3753 			inflight++;
3754 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3755 			resend++;
3756 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3757 			inbetween++;
3758 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3759 			above++;
3760 		} else {
3761 			acked++;
3762 		}
3763 	}
3764 
3765 	if ((inflight > 0) || (inbetween > 0)) {
3766 #ifdef INVARIANTS
3767 		panic("Flight size-express incorrect? \n");
3768 #else
3769 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3770 		    entry_flight, entry_cnt);
3771 
3772 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3773 		    inflight, inbetween, resend, above, acked);
3774 		ret = 1;
3775 #endif
3776 	}
3777 	return (ret);
3778 }
3779 
3780 
3781 static void
3782 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3783     struct sctp_association *asoc,
3784     struct sctp_tmit_chunk *tp1)
3785 {
3786 	tp1->window_probe = 0;
3787 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3788 		/* TSN's skipped we do NOT move back. */
3789 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3790 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3791 		    tp1->book_size,
3792 		    (uint32_t)(uintptr_t)tp1->whoTo,
3793 		    tp1->rec.data.tsn);
3794 		return;
3795 	}
3796 	/* First setup this by shrinking flight */
3797 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3798 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3799 		    tp1);
3800 	}
3801 	sctp_flight_size_decrease(tp1);
3802 	sctp_total_flight_decrease(stcb, tp1);
3803 	/* Now mark for resend */
3804 	tp1->sent = SCTP_DATAGRAM_RESEND;
3805 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3806 
3807 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3808 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3809 		    tp1->whoTo->flight_size,
3810 		    tp1->book_size,
3811 		    (uint32_t)(uintptr_t)tp1->whoTo,
3812 		    tp1->rec.data.tsn);
3813 	}
3814 }
3815 
3816 void
3817 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3818     uint32_t rwnd, int *abort_now, int ecne_seen)
3819 {
3820 	struct sctp_nets *net;
3821 	struct sctp_association *asoc;
3822 	struct sctp_tmit_chunk *tp1, *tp2;
3823 	uint32_t old_rwnd;
3824 	int win_probe_recovery = 0;
3825 	int win_probe_recovered = 0;
3826 	int j, done_once = 0;
3827 	int rto_ok = 1;
3828 	uint32_t send_s;
3829 
3830 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3831 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3832 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3833 	}
3834 	SCTP_TCB_LOCK_ASSERT(stcb);
3835 #ifdef SCTP_ASOCLOG_OF_TSNS
3836 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3837 	stcb->asoc.cumack_log_at++;
3838 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3839 		stcb->asoc.cumack_log_at = 0;
3840 	}
3841 #endif
3842 	asoc = &stcb->asoc;
3843 	old_rwnd = asoc->peers_rwnd;
3844 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3845 		/* old ack */
3846 		return;
3847 	} else if (asoc->last_acked_seq == cumack) {
3848 		/* Window update sack */
3849 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3850 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3851 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3852 			/* SWS sender side engages */
3853 			asoc->peers_rwnd = 0;
3854 		}
3855 		if (asoc->peers_rwnd > old_rwnd) {
3856 			goto again;
3857 		}
3858 		return;
3859 	}
3860 	/* First setup for CC stuff */
3861 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3862 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3863 			/* Drag along the window_tsn for cwr's */
3864 			net->cwr_window_tsn = cumack;
3865 		}
3866 		net->prev_cwnd = net->cwnd;
3867 		net->net_ack = 0;
3868 		net->net_ack2 = 0;
3869 
3870 		/*
3871 		 * CMT: Reset CUC and Fast recovery algo variables before
3872 		 * SACK processing
3873 		 */
3874 		net->new_pseudo_cumack = 0;
3875 		net->will_exit_fast_recovery = 0;
3876 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3877 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3878 		}
3879 	}
3880 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3881 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3882 		    sctpchunk_listhead);
3883 		send_s = tp1->rec.data.tsn + 1;
3884 	} else {
3885 		send_s = asoc->sending_seq;
3886 	}
3887 	if (SCTP_TSN_GE(cumack, send_s)) {
3888 		struct mbuf *op_err;
3889 		char msg[SCTP_DIAG_INFO_LEN];
3890 
3891 		*abort_now = 1;
3892 		/* XXX */
3893 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3894 		    cumack, send_s);
3895 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3896 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3897 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3898 		return;
3899 	}
3900 	asoc->this_sack_highest_gap = cumack;
3901 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3902 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3903 		    stcb->asoc.overall_error_count,
3904 		    0,
3905 		    SCTP_FROM_SCTP_INDATA,
3906 		    __LINE__);
3907 	}
3908 	stcb->asoc.overall_error_count = 0;
3909 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3910 		/* process the new consecutive TSN first */
3911 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3912 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3913 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3914 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3915 				}
3916 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3917 					/*
3918 					 * If it is less than ACKED, it is
3919 					 * now no-longer in flight. Higher
3920 					 * values may occur during marking
3921 					 */
3922 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3923 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3924 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3925 							    tp1->whoTo->flight_size,
3926 							    tp1->book_size,
3927 							    (uint32_t)(uintptr_t)tp1->whoTo,
3928 							    tp1->rec.data.tsn);
3929 						}
3930 						sctp_flight_size_decrease(tp1);
3931 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3932 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3933 							    tp1);
3934 						}
3935 						/* sa_ignore NO_NULL_CHK */
3936 						sctp_total_flight_decrease(stcb, tp1);
3937 					}
3938 					tp1->whoTo->net_ack += tp1->send_size;
3939 					if (tp1->snd_count < 2) {
3940 						/*
3941 						 * True non-retransmited
3942 						 * chunk
3943 						 */
3944 						tp1->whoTo->net_ack2 +=
3945 						    tp1->send_size;
3946 
3947 						/* update RTO too? */
3948 						if (tp1->do_rtt) {
3949 							if (rto_ok) {
3950 								tp1->whoTo->RTO =
3951 								/*
3952 								 * sa_ignore
3953 								 * NO_NULL_CHK
3954 								 */
3955 								    sctp_calculate_rto(stcb,
3956 								    asoc, tp1->whoTo,
3957 								    &tp1->sent_rcv_time,
3958 								    sctp_align_safe_nocopy,
3959 								    SCTP_RTT_FROM_DATA);
3960 								rto_ok = 0;
3961 							}
3962 							if (tp1->whoTo->rto_needed == 0) {
3963 								tp1->whoTo->rto_needed = 1;
3964 							}
3965 							tp1->do_rtt = 0;
3966 						}
3967 					}
3968 					/*
3969 					 * CMT: CUCv2 algorithm. From the
3970 					 * cumack'd TSNs, for each TSN being
3971 					 * acked for the first time, set the
3972 					 * following variables for the
3973 					 * corresp destination.
3974 					 * new_pseudo_cumack will trigger a
3975 					 * cwnd update.
3976 					 * find_(rtx_)pseudo_cumack will
3977 					 * trigger search for the next
3978 					 * expected (rtx-)pseudo-cumack.
3979 					 */
3980 					tp1->whoTo->new_pseudo_cumack = 1;
3981 					tp1->whoTo->find_pseudo_cumack = 1;
3982 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3983 
3984 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3985 						/* sa_ignore NO_NULL_CHK */
3986 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3987 					}
3988 				}
3989 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3990 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3991 				}
3992 				if (tp1->rec.data.chunk_was_revoked) {
3993 					/* deflate the cwnd */
3994 					tp1->whoTo->cwnd -= tp1->book_size;
3995 					tp1->rec.data.chunk_was_revoked = 0;
3996 				}
3997 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3998 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3999 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4000 #ifdef INVARIANTS
4001 					} else {
4002 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4003 #endif
4004 					}
4005 				}
4006 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4007 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4008 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4009 					asoc->trigger_reset = 1;
4010 				}
4011 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4012 				if (tp1->data) {
4013 					/* sa_ignore NO_NULL_CHK */
4014 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4015 					sctp_m_freem(tp1->data);
4016 					tp1->data = NULL;
4017 				}
4018 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4019 					sctp_log_sack(asoc->last_acked_seq,
4020 					    cumack,
4021 					    tp1->rec.data.tsn,
4022 					    0,
4023 					    0,
4024 					    SCTP_LOG_FREE_SENT);
4025 				}
4026 				asoc->sent_queue_cnt--;
4027 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4028 			} else {
4029 				break;
4030 			}
4031 		}
4032 
4033 	}
4034 	/* sa_ignore NO_NULL_CHK */
4035 	if (stcb->sctp_socket) {
4036 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4037 		struct socket *so;
4038 
4039 #endif
4040 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4041 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4042 			/* sa_ignore NO_NULL_CHK */
4043 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4044 		}
4045 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4046 		so = SCTP_INP_SO(stcb->sctp_ep);
4047 		atomic_add_int(&stcb->asoc.refcnt, 1);
4048 		SCTP_TCB_UNLOCK(stcb);
4049 		SCTP_SOCKET_LOCK(so, 1);
4050 		SCTP_TCB_LOCK(stcb);
4051 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4052 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4053 			/* assoc was freed while we were unlocked */
4054 			SCTP_SOCKET_UNLOCK(so, 1);
4055 			return;
4056 		}
4057 #endif
4058 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4059 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4060 		SCTP_SOCKET_UNLOCK(so, 1);
4061 #endif
4062 	} else {
4063 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4064 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4065 		}
4066 	}
4067 
4068 	/* JRS - Use the congestion control given in the CC module */
4069 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4070 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4071 			if (net->net_ack2 > 0) {
4072 				/*
4073 				 * Karn's rule applies to clearing error
4074 				 * count, this is optional.
4075 				 */
4076 				net->error_count = 0;
4077 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4078 					/* addr came good */
4079 					net->dest_state |= SCTP_ADDR_REACHABLE;
4080 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4081 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4082 				}
4083 				if (net == stcb->asoc.primary_destination) {
4084 					if (stcb->asoc.alternate) {
4085 						/*
4086 						 * release the alternate,
4087 						 * primary is good
4088 						 */
4089 						sctp_free_remote_addr(stcb->asoc.alternate);
4090 						stcb->asoc.alternate = NULL;
4091 					}
4092 				}
4093 				if (net->dest_state & SCTP_ADDR_PF) {
4094 					net->dest_state &= ~SCTP_ADDR_PF;
4095 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4096 					    stcb->sctp_ep, stcb, net,
4097 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4098 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4099 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4100 					/* Done with this net */
4101 					net->net_ack = 0;
4102 				}
4103 				/* restore any doubled timers */
4104 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4105 				if (net->RTO < stcb->asoc.minrto) {
4106 					net->RTO = stcb->asoc.minrto;
4107 				}
4108 				if (net->RTO > stcb->asoc.maxrto) {
4109 					net->RTO = stcb->asoc.maxrto;
4110 				}
4111 			}
4112 		}
4113 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4114 	}
4115 	asoc->last_acked_seq = cumack;
4116 
4117 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4118 		/* nothing left in-flight */
4119 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4120 			net->flight_size = 0;
4121 			net->partial_bytes_acked = 0;
4122 		}
4123 		asoc->total_flight = 0;
4124 		asoc->total_flight_count = 0;
4125 	}
4126 	/* RWND update */
4127 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4128 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4129 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4130 		/* SWS sender side engages */
4131 		asoc->peers_rwnd = 0;
4132 	}
4133 	if (asoc->peers_rwnd > old_rwnd) {
4134 		win_probe_recovery = 1;
4135 	}
4136 	/* Now assure a timer where data is queued at */
4137 again:
4138 	j = 0;
4139 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4140 		int to_ticks;
4141 
4142 		if (win_probe_recovery && (net->window_probe)) {
4143 			win_probe_recovered = 1;
4144 			/*
4145 			 * Find first chunk that was used with window probe
4146 			 * and clear the sent
4147 			 */
4148 			/* sa_ignore FREED_MEMORY */
4149 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4150 				if (tp1->window_probe) {
4151 					/* move back to data send queue */
4152 					sctp_window_probe_recovery(stcb, asoc, tp1);
4153 					break;
4154 				}
4155 			}
4156 		}
4157 		if (net->RTO == 0) {
4158 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4159 		} else {
4160 			to_ticks = MSEC_TO_TICKS(net->RTO);
4161 		}
4162 		if (net->flight_size) {
4163 			j++;
4164 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4165 			    sctp_timeout_handler, &net->rxt_timer);
4166 			if (net->window_probe) {
4167 				net->window_probe = 0;
4168 			}
4169 		} else {
4170 			if (net->window_probe) {
4171 				/*
4172 				 * In window probes we must assure a timer
4173 				 * is still running there
4174 				 */
4175 				net->window_probe = 0;
4176 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4177 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4178 					    sctp_timeout_handler, &net->rxt_timer);
4179 				}
4180 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4181 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4182 				    stcb, net,
4183 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4184 			}
4185 		}
4186 	}
4187 	if ((j == 0) &&
4188 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4189 	    (asoc->sent_queue_retran_cnt == 0) &&
4190 	    (win_probe_recovered == 0) &&
4191 	    (done_once == 0)) {
4192 		/*
4193 		 * huh, this should not happen unless all packets are
4194 		 * PR-SCTP and marked to skip of course.
4195 		 */
4196 		if (sctp_fs_audit(asoc)) {
4197 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4198 				net->flight_size = 0;
4199 			}
4200 			asoc->total_flight = 0;
4201 			asoc->total_flight_count = 0;
4202 			asoc->sent_queue_retran_cnt = 0;
4203 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4204 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4205 					sctp_flight_size_increase(tp1);
4206 					sctp_total_flight_increase(stcb, tp1);
4207 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4208 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4209 				}
4210 			}
4211 		}
4212 		done_once = 1;
4213 		goto again;
4214 	}
4215 	/**********************************/
4216 	/* Now what about shutdown issues */
4217 	/**********************************/
4218 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4219 		/* nothing left on sendqueue.. consider done */
4220 		/* clean up */
4221 		if ((asoc->stream_queue_cnt == 1) &&
4222 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4223 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4224 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4225 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4226 		}
4227 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4228 		    (asoc->stream_queue_cnt == 0)) {
4229 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4230 				/* Need to abort here */
4231 				struct mbuf *op_err;
4232 
4233 		abort_out_now:
4234 				*abort_now = 1;
4235 				/* XXX */
4236 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4237 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4238 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4239 				return;
4240 			} else {
4241 				struct sctp_nets *netp;
4242 
4243 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4244 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4245 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4246 				}
4247 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4248 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4249 				sctp_stop_timers_for_shutdown(stcb);
4250 				if (asoc->alternate) {
4251 					netp = asoc->alternate;
4252 				} else {
4253 					netp = asoc->primary_destination;
4254 				}
4255 				sctp_send_shutdown(stcb, netp);
4256 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4257 				    stcb->sctp_ep, stcb, netp);
4258 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4259 				    stcb->sctp_ep, stcb, netp);
4260 			}
4261 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4262 		    (asoc->stream_queue_cnt == 0)) {
4263 			struct sctp_nets *netp;
4264 
4265 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4266 				goto abort_out_now;
4267 			}
4268 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4269 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4270 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4271 			sctp_stop_timers_for_shutdown(stcb);
4272 			if (asoc->alternate) {
4273 				netp = asoc->alternate;
4274 			} else {
4275 				netp = asoc->primary_destination;
4276 			}
4277 			sctp_send_shutdown_ack(stcb, netp);
4278 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4279 			    stcb->sctp_ep, stcb, netp);
4280 		}
4281 	}
4282 	/*********************************************/
4283 	/* Here we perform PR-SCTP procedures        */
4284 	/* (section 4.2)                             */
4285 	/*********************************************/
4286 	/* C1. update advancedPeerAckPoint */
4287 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4288 		asoc->advanced_peer_ack_point = cumack;
4289 	}
4290 	/* PR-Sctp issues need to be addressed too */
4291 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4292 		struct sctp_tmit_chunk *lchk;
4293 		uint32_t old_adv_peer_ack_point;
4294 
4295 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4296 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4297 		/* C3. See if we need to send a Fwd-TSN */
4298 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4299 			/*
4300 			 * ISSUE with ECN, see FWD-TSN processing.
4301 			 */
4302 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4303 				send_forward_tsn(stcb, asoc);
4304 			} else if (lchk) {
4305 				/* try to FR fwd-tsn's that get lost too */
4306 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4307 					send_forward_tsn(stcb, asoc);
4308 				}
4309 			}
4310 		}
4311 		if (lchk) {
4312 			/* Assure a timer is up */
4313 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4314 			    stcb->sctp_ep, stcb, lchk->whoTo);
4315 		}
4316 	}
4317 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4318 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4319 		    rwnd,
4320 		    stcb->asoc.peers_rwnd,
4321 		    stcb->asoc.total_flight,
4322 		    stcb->asoc.total_output_queue_size);
4323 	}
4324 }
4325 
4326 void
4327 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4328     struct sctp_tcb *stcb,
4329     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4330     int *abort_now, uint8_t flags,
4331     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4332 {
4333 	struct sctp_association *asoc;
4334 	struct sctp_tmit_chunk *tp1, *tp2;
4335 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4336 	uint16_t wake_him = 0;
4337 	uint32_t send_s = 0;
4338 	long j;
4339 	int accum_moved = 0;
4340 	int will_exit_fast_recovery = 0;
4341 	uint32_t a_rwnd, old_rwnd;
4342 	int win_probe_recovery = 0;
4343 	int win_probe_recovered = 0;
4344 	struct sctp_nets *net = NULL;
4345 	int done_once;
4346 	int rto_ok = 1;
4347 	uint8_t reneged_all = 0;
4348 	uint8_t cmt_dac_flag;
4349 
4350 	/*
4351 	 * we take any chance we can to service our queues since we cannot
4352 	 * get awoken when the socket is read from :<
4353 	 */
4354 	/*
4355 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4356 	 * old sack, if so discard. 2) If there is nothing left in the send
4357 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4358 	 * too, update any rwnd change and verify no timers are running.
4359 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4360 	 * moved process these first and note that it moved. 4) Process any
4361 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4362 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4363 	 * sync up flightsizes and things, stop all timers and also check
4364 	 * for shutdown_pending state. If so then go ahead and send off the
4365 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4366 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4367 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4368 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4369 	 * if in shutdown_recv state.
4370 	 */
4371 	SCTP_TCB_LOCK_ASSERT(stcb);
4372 	/* CMT DAC algo */
4373 	this_sack_lowest_newack = 0;
4374 	SCTP_STAT_INCR(sctps_slowpath_sack);
4375 	last_tsn = cum_ack;
4376 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4377 #ifdef SCTP_ASOCLOG_OF_TSNS
4378 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4379 	stcb->asoc.cumack_log_at++;
4380 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4381 		stcb->asoc.cumack_log_at = 0;
4382 	}
4383 #endif
4384 	a_rwnd = rwnd;
4385 
4386 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4387 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4388 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4389 	}
4390 	old_rwnd = stcb->asoc.peers_rwnd;
4391 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4392 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4393 		    stcb->asoc.overall_error_count,
4394 		    0,
4395 		    SCTP_FROM_SCTP_INDATA,
4396 		    __LINE__);
4397 	}
4398 	stcb->asoc.overall_error_count = 0;
4399 	asoc = &stcb->asoc;
4400 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4401 		sctp_log_sack(asoc->last_acked_seq,
4402 		    cum_ack,
4403 		    0,
4404 		    num_seg,
4405 		    num_dup,
4406 		    SCTP_LOG_NEW_SACK);
4407 	}
4408 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4409 		uint16_t i;
4410 		uint32_t *dupdata, dblock;
4411 
4412 		for (i = 0; i < num_dup; i++) {
4413 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4414 			    sizeof(uint32_t), (uint8_t *)&dblock);
4415 			if (dupdata == NULL) {
4416 				break;
4417 			}
4418 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4419 		}
4420 	}
4421 	/* reality check */
4422 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4423 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4424 		    sctpchunk_listhead);
4425 		send_s = tp1->rec.data.tsn + 1;
4426 	} else {
4427 		tp1 = NULL;
4428 		send_s = asoc->sending_seq;
4429 	}
4430 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4431 		struct mbuf *op_err;
4432 		char msg[SCTP_DIAG_INFO_LEN];
4433 
4434 		/*
4435 		 * no way, we have not even sent this TSN out yet. Peer is
4436 		 * hopelessly messed up with us.
4437 		 */
4438 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4439 		    cum_ack, send_s);
4440 		if (tp1) {
4441 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4442 			    tp1->rec.data.tsn, (void *)tp1);
4443 		}
4444 hopeless_peer:
4445 		*abort_now = 1;
4446 		/* XXX */
4447 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4448 		    cum_ack, send_s);
4449 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4450 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4451 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4452 		return;
4453 	}
4454 	/**********************/
4455 	/* 1) check the range */
4456 	/**********************/
4457 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4458 		/* acking something behind */
4459 		return;
4460 	}
4461 	/* update the Rwnd of the peer */
4462 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4463 	    TAILQ_EMPTY(&asoc->send_queue) &&
4464 	    (asoc->stream_queue_cnt == 0)) {
4465 		/* nothing left on send/sent and strmq */
4466 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4467 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4468 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4469 		}
4470 		asoc->peers_rwnd = a_rwnd;
4471 		if (asoc->sent_queue_retran_cnt) {
4472 			asoc->sent_queue_retran_cnt = 0;
4473 		}
4474 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4475 			/* SWS sender side engages */
4476 			asoc->peers_rwnd = 0;
4477 		}
4478 		/* stop any timers */
4479 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4480 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4481 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4482 			net->partial_bytes_acked = 0;
4483 			net->flight_size = 0;
4484 		}
4485 		asoc->total_flight = 0;
4486 		asoc->total_flight_count = 0;
4487 		return;
4488 	}
4489 	/*
4490 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4491 	 * things. The total byte count acked is tracked in netAckSz AND
4492 	 * netAck2 is used to track the total bytes acked that are un-
4493 	 * amibguious and were never retransmitted. We track these on a per
4494 	 * destination address basis.
4495 	 */
4496 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4497 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4498 			/* Drag along the window_tsn for cwr's */
4499 			net->cwr_window_tsn = cum_ack;
4500 		}
4501 		net->prev_cwnd = net->cwnd;
4502 		net->net_ack = 0;
4503 		net->net_ack2 = 0;
4504 
4505 		/*
4506 		 * CMT: Reset CUC and Fast recovery algo variables before
4507 		 * SACK processing
4508 		 */
4509 		net->new_pseudo_cumack = 0;
4510 		net->will_exit_fast_recovery = 0;
4511 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4512 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4513 		}
4514 	}
4515 	/* process the new consecutive TSN first */
4516 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4517 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4518 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4519 				accum_moved = 1;
4520 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4521 					/*
4522 					 * If it is less than ACKED, it is
4523 					 * now no-longer in flight. Higher
4524 					 * values may occur during marking
4525 					 */
4526 					if ((tp1->whoTo->dest_state &
4527 					    SCTP_ADDR_UNCONFIRMED) &&
4528 					    (tp1->snd_count < 2)) {
4529 						/*
4530 						 * If there was no retran
4531 						 * and the address is
4532 						 * un-confirmed and we sent
4533 						 * there and are now
4534 						 * sacked.. its confirmed,
4535 						 * mark it so.
4536 						 */
4537 						tp1->whoTo->dest_state &=
4538 						    ~SCTP_ADDR_UNCONFIRMED;
4539 					}
4540 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4541 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4542 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4543 							    tp1->whoTo->flight_size,
4544 							    tp1->book_size,
4545 							    (uint32_t)(uintptr_t)tp1->whoTo,
4546 							    tp1->rec.data.tsn);
4547 						}
4548 						sctp_flight_size_decrease(tp1);
4549 						sctp_total_flight_decrease(stcb, tp1);
4550 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4551 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4552 							    tp1);
4553 						}
4554 					}
4555 					tp1->whoTo->net_ack += tp1->send_size;
4556 
4557 					/* CMT SFR and DAC algos */
4558 					this_sack_lowest_newack = tp1->rec.data.tsn;
4559 					tp1->whoTo->saw_newack = 1;
4560 
4561 					if (tp1->snd_count < 2) {
4562 						/*
4563 						 * True non-retransmited
4564 						 * chunk
4565 						 */
4566 						tp1->whoTo->net_ack2 +=
4567 						    tp1->send_size;
4568 
4569 						/* update RTO too? */
4570 						if (tp1->do_rtt) {
4571 							if (rto_ok) {
4572 								tp1->whoTo->RTO =
4573 								    sctp_calculate_rto(stcb,
4574 								    asoc, tp1->whoTo,
4575 								    &tp1->sent_rcv_time,
4576 								    sctp_align_safe_nocopy,
4577 								    SCTP_RTT_FROM_DATA);
4578 								rto_ok = 0;
4579 							}
4580 							if (tp1->whoTo->rto_needed == 0) {
4581 								tp1->whoTo->rto_needed = 1;
4582 							}
4583 							tp1->do_rtt = 0;
4584 						}
4585 					}
4586 					/*
4587 					 * CMT: CUCv2 algorithm. From the
4588 					 * cumack'd TSNs, for each TSN being
4589 					 * acked for the first time, set the
4590 					 * following variables for the
4591 					 * corresp destination.
4592 					 * new_pseudo_cumack will trigger a
4593 					 * cwnd update.
4594 					 * find_(rtx_)pseudo_cumack will
4595 					 * trigger search for the next
4596 					 * expected (rtx-)pseudo-cumack.
4597 					 */
4598 					tp1->whoTo->new_pseudo_cumack = 1;
4599 					tp1->whoTo->find_pseudo_cumack = 1;
4600 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4601 
4602 
4603 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4604 						sctp_log_sack(asoc->last_acked_seq,
4605 						    cum_ack,
4606 						    tp1->rec.data.tsn,
4607 						    0,
4608 						    0,
4609 						    SCTP_LOG_TSN_ACKED);
4610 					}
4611 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4612 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4613 					}
4614 				}
4615 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4616 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4617 #ifdef SCTP_AUDITING_ENABLED
4618 					sctp_audit_log(0xB3,
4619 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4620 #endif
4621 				}
4622 				if (tp1->rec.data.chunk_was_revoked) {
4623 					/* deflate the cwnd */
4624 					tp1->whoTo->cwnd -= tp1->book_size;
4625 					tp1->rec.data.chunk_was_revoked = 0;
4626 				}
4627 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4628 					tp1->sent = SCTP_DATAGRAM_ACKED;
4629 				}
4630 			}
4631 		} else {
4632 			break;
4633 		}
4634 	}
4635 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4636 	/* always set this up to cum-ack */
4637 	asoc->this_sack_highest_gap = last_tsn;
4638 
4639 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4640 
4641 		/*
4642 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4643 		 * to be greater than the cumack. Also reset saw_newack to 0
4644 		 * for all dests.
4645 		 */
4646 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4647 			net->saw_newack = 0;
4648 			net->this_sack_highest_newack = last_tsn;
4649 		}
4650 
4651 		/*
4652 		 * thisSackHighestGap will increase while handling NEW
4653 		 * segments this_sack_highest_newack will increase while
4654 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4655 		 * used for CMT DAC algo. saw_newack will also change.
4656 		 */
4657 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4658 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4659 		    num_seg, num_nr_seg, &rto_ok)) {
4660 			wake_him++;
4661 		}
4662 		/*
4663 		 * validate the biggest_tsn_acked in the gap acks if strict
4664 		 * adherence is wanted.
4665 		 */
4666 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4667 			/*
4668 			 * peer is either confused or we are under attack.
4669 			 * We must abort.
4670 			 */
4671 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4672 			    biggest_tsn_acked, send_s);
4673 			goto hopeless_peer;
4674 		}
4675 	}
4676 	/*******************************************/
4677 	/* cancel ALL T3-send timer if accum moved */
4678 	/*******************************************/
4679 	if (asoc->sctp_cmt_on_off > 0) {
4680 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4681 			if (net->new_pseudo_cumack)
4682 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4683 				    stcb, net,
4684 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4685 
4686 		}
4687 	} else {
4688 		if (accum_moved) {
4689 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4690 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4691 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4692 			}
4693 		}
4694 	}
4695 	/********************************************/
4696 	/* drop the acked chunks from the sentqueue */
4697 	/********************************************/
4698 	asoc->last_acked_seq = cum_ack;
4699 
4700 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4701 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4702 			break;
4703 		}
4704 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4705 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4706 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4707 #ifdef INVARIANTS
4708 			} else {
4709 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4710 #endif
4711 			}
4712 		}
4713 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4714 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4715 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4716 			asoc->trigger_reset = 1;
4717 		}
4718 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4719 		if (PR_SCTP_ENABLED(tp1->flags)) {
4720 			if (asoc->pr_sctp_cnt != 0)
4721 				asoc->pr_sctp_cnt--;
4722 		}
4723 		asoc->sent_queue_cnt--;
4724 		if (tp1->data) {
4725 			/* sa_ignore NO_NULL_CHK */
4726 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4727 			sctp_m_freem(tp1->data);
4728 			tp1->data = NULL;
4729 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4730 				asoc->sent_queue_cnt_removeable--;
4731 			}
4732 		}
4733 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4734 			sctp_log_sack(asoc->last_acked_seq,
4735 			    cum_ack,
4736 			    tp1->rec.data.tsn,
4737 			    0,
4738 			    0,
4739 			    SCTP_LOG_FREE_SENT);
4740 		}
4741 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4742 		wake_him++;
4743 	}
4744 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4745 #ifdef INVARIANTS
4746 		panic("Warning flight size is positive and should be 0");
4747 #else
4748 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4749 		    asoc->total_flight);
4750 #endif
4751 		asoc->total_flight = 0;
4752 	}
4753 	/* sa_ignore NO_NULL_CHK */
4754 	if ((wake_him) && (stcb->sctp_socket)) {
4755 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4756 		struct socket *so;
4757 
4758 #endif
4759 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4760 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4761 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4762 		}
4763 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4764 		so = SCTP_INP_SO(stcb->sctp_ep);
4765 		atomic_add_int(&stcb->asoc.refcnt, 1);
4766 		SCTP_TCB_UNLOCK(stcb);
4767 		SCTP_SOCKET_LOCK(so, 1);
4768 		SCTP_TCB_LOCK(stcb);
4769 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4770 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4771 			/* assoc was freed while we were unlocked */
4772 			SCTP_SOCKET_UNLOCK(so, 1);
4773 			return;
4774 		}
4775 #endif
4776 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4777 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4778 		SCTP_SOCKET_UNLOCK(so, 1);
4779 #endif
4780 	} else {
4781 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4782 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4783 		}
4784 	}
4785 
4786 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4787 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4788 			/* Setup so we will exit RFC2582 fast recovery */
4789 			will_exit_fast_recovery = 1;
4790 		}
4791 	}
4792 	/*
4793 	 * Check for revoked fragments:
4794 	 *
4795 	 * if Previous sack - Had no frags then we can't have any revoked if
4796 	 * Previous sack - Had frag's then - If we now have frags aka
4797 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4798 	 * some of them. else - The peer revoked all ACKED fragments, since
4799 	 * we had some before and now we have NONE.
4800 	 */
4801 
4802 	if (num_seg) {
4803 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4804 		asoc->saw_sack_with_frags = 1;
4805 	} else if (asoc->saw_sack_with_frags) {
4806 		int cnt_revoked = 0;
4807 
4808 		/* Peer revoked all dg's marked or acked */
4809 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4810 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4811 				tp1->sent = SCTP_DATAGRAM_SENT;
4812 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4813 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4814 					    tp1->whoTo->flight_size,
4815 					    tp1->book_size,
4816 					    (uint32_t)(uintptr_t)tp1->whoTo,
4817 					    tp1->rec.data.tsn);
4818 				}
4819 				sctp_flight_size_increase(tp1);
4820 				sctp_total_flight_increase(stcb, tp1);
4821 				tp1->rec.data.chunk_was_revoked = 1;
4822 				/*
4823 				 * To ensure that this increase in
4824 				 * flightsize, which is artificial, does not
4825 				 * throttle the sender, we also increase the
4826 				 * cwnd artificially.
4827 				 */
4828 				tp1->whoTo->cwnd += tp1->book_size;
4829 				cnt_revoked++;
4830 			}
4831 		}
4832 		if (cnt_revoked) {
4833 			reneged_all = 1;
4834 		}
4835 		asoc->saw_sack_with_frags = 0;
4836 	}
4837 	if (num_nr_seg > 0)
4838 		asoc->saw_sack_with_nr_frags = 1;
4839 	else
4840 		asoc->saw_sack_with_nr_frags = 0;
4841 
4842 	/* JRS - Use the congestion control given in the CC module */
4843 	if (ecne_seen == 0) {
4844 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4845 			if (net->net_ack2 > 0) {
4846 				/*
4847 				 * Karn's rule applies to clearing error
4848 				 * count, this is optional.
4849 				 */
4850 				net->error_count = 0;
4851 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4852 					/* addr came good */
4853 					net->dest_state |= SCTP_ADDR_REACHABLE;
4854 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4855 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4856 				}
4857 				if (net == stcb->asoc.primary_destination) {
4858 					if (stcb->asoc.alternate) {
4859 						/*
4860 						 * release the alternate,
4861 						 * primary is good
4862 						 */
4863 						sctp_free_remote_addr(stcb->asoc.alternate);
4864 						stcb->asoc.alternate = NULL;
4865 					}
4866 				}
4867 				if (net->dest_state & SCTP_ADDR_PF) {
4868 					net->dest_state &= ~SCTP_ADDR_PF;
4869 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4870 					    stcb->sctp_ep, stcb, net,
4871 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4872 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4873 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4874 					/* Done with this net */
4875 					net->net_ack = 0;
4876 				}
4877 				/* restore any doubled timers */
4878 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4879 				if (net->RTO < stcb->asoc.minrto) {
4880 					net->RTO = stcb->asoc.minrto;
4881 				}
4882 				if (net->RTO > stcb->asoc.maxrto) {
4883 					net->RTO = stcb->asoc.maxrto;
4884 				}
4885 			}
4886 		}
4887 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4888 	}
4889 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4890 		/* nothing left in-flight */
4891 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4892 			/* stop all timers */
4893 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4894 			    stcb, net,
4895 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4896 			net->flight_size = 0;
4897 			net->partial_bytes_acked = 0;
4898 		}
4899 		asoc->total_flight = 0;
4900 		asoc->total_flight_count = 0;
4901 	}
4902 	/**********************************/
4903 	/* Now what about shutdown issues */
4904 	/**********************************/
4905 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4906 		/* nothing left on sendqueue.. consider done */
4907 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4908 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4909 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4910 		}
4911 		asoc->peers_rwnd = a_rwnd;
4912 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4913 			/* SWS sender side engages */
4914 			asoc->peers_rwnd = 0;
4915 		}
4916 		/* clean up */
4917 		if ((asoc->stream_queue_cnt == 1) &&
4918 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4919 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4920 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4921 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4922 		}
4923 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4924 		    (asoc->stream_queue_cnt == 0)) {
4925 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4926 				/* Need to abort here */
4927 				struct mbuf *op_err;
4928 
4929 		abort_out_now:
4930 				*abort_now = 1;
4931 				/* XXX */
4932 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4933 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4934 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4935 				return;
4936 			} else {
4937 				struct sctp_nets *netp;
4938 
4939 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4940 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4941 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4942 				}
4943 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4944 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4945 				sctp_stop_timers_for_shutdown(stcb);
4946 				if (asoc->alternate) {
4947 					netp = asoc->alternate;
4948 				} else {
4949 					netp = asoc->primary_destination;
4950 				}
4951 				sctp_send_shutdown(stcb, netp);
4952 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4953 				    stcb->sctp_ep, stcb, netp);
4954 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4955 				    stcb->sctp_ep, stcb, netp);
4956 			}
4957 			return;
4958 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4959 		    (asoc->stream_queue_cnt == 0)) {
4960 			struct sctp_nets *netp;
4961 
4962 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4963 				goto abort_out_now;
4964 			}
4965 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4966 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4967 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4968 			sctp_stop_timers_for_shutdown(stcb);
4969 			if (asoc->alternate) {
4970 				netp = asoc->alternate;
4971 			} else {
4972 				netp = asoc->primary_destination;
4973 			}
4974 			sctp_send_shutdown_ack(stcb, netp);
4975 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4976 			    stcb->sctp_ep, stcb, netp);
4977 			return;
4978 		}
4979 	}
4980 	/*
4981 	 * Now here we are going to recycle net_ack for a different use...
4982 	 * HEADS UP.
4983 	 */
4984 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4985 		net->net_ack = 0;
4986 	}
4987 
4988 	/*
4989 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4990 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4991 	 * automatically ensure that.
4992 	 */
4993 	if ((asoc->sctp_cmt_on_off > 0) &&
4994 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4995 	    (cmt_dac_flag == 0)) {
4996 		this_sack_lowest_newack = cum_ack;
4997 	}
4998 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4999 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5000 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5001 	}
5002 	/* JRS - Use the congestion control given in the CC module */
5003 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5004 
5005 	/* Now are we exiting loss recovery ? */
5006 	if (will_exit_fast_recovery) {
5007 		/* Ok, we must exit fast recovery */
5008 		asoc->fast_retran_loss_recovery = 0;
5009 	}
5010 	if ((asoc->sat_t3_loss_recovery) &&
5011 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5012 		/* end satellite t3 loss recovery */
5013 		asoc->sat_t3_loss_recovery = 0;
5014 	}
5015 	/*
5016 	 * CMT Fast recovery
5017 	 */
5018 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5019 		if (net->will_exit_fast_recovery) {
5020 			/* Ok, we must exit fast recovery */
5021 			net->fast_retran_loss_recovery = 0;
5022 		}
5023 	}
5024 
5025 	/* Adjust and set the new rwnd value */
5026 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5027 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5028 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5029 	}
5030 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5031 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5032 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5033 		/* SWS sender side engages */
5034 		asoc->peers_rwnd = 0;
5035 	}
5036 	if (asoc->peers_rwnd > old_rwnd) {
5037 		win_probe_recovery = 1;
5038 	}
5039 	/*
5040 	 * Now we must setup so we have a timer up for anyone with
5041 	 * outstanding data.
5042 	 */
5043 	done_once = 0;
5044 again:
5045 	j = 0;
5046 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5047 		if (win_probe_recovery && (net->window_probe)) {
5048 			win_probe_recovered = 1;
5049 			/*-
5050 			 * Find first chunk that was used with
5051 			 * window probe and clear the event. Put
5052 			 * it back into the send queue as if has
5053 			 * not been sent.
5054 			 */
5055 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5056 				if (tp1->window_probe) {
5057 					sctp_window_probe_recovery(stcb, asoc, tp1);
5058 					break;
5059 				}
5060 			}
5061 		}
5062 		if (net->flight_size) {
5063 			j++;
5064 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5065 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5066 				    stcb->sctp_ep, stcb, net);
5067 			}
5068 			if (net->window_probe) {
5069 				net->window_probe = 0;
5070 			}
5071 		} else {
5072 			if (net->window_probe) {
5073 				/*
5074 				 * In window probes we must assure a timer
5075 				 * is still running there
5076 				 */
5077 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5078 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5079 					    stcb->sctp_ep, stcb, net);
5080 
5081 				}
5082 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5083 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5084 				    stcb, net,
5085 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5086 			}
5087 		}
5088 	}
5089 	if ((j == 0) &&
5090 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5091 	    (asoc->sent_queue_retran_cnt == 0) &&
5092 	    (win_probe_recovered == 0) &&
5093 	    (done_once == 0)) {
5094 		/*
5095 		 * huh, this should not happen unless all packets are
5096 		 * PR-SCTP and marked to skip of course.
5097 		 */
5098 		if (sctp_fs_audit(asoc)) {
5099 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5100 				net->flight_size = 0;
5101 			}
5102 			asoc->total_flight = 0;
5103 			asoc->total_flight_count = 0;
5104 			asoc->sent_queue_retran_cnt = 0;
5105 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5106 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5107 					sctp_flight_size_increase(tp1);
5108 					sctp_total_flight_increase(stcb, tp1);
5109 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5110 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5111 				}
5112 			}
5113 		}
5114 		done_once = 1;
5115 		goto again;
5116 	}
5117 	/*********************************************/
5118 	/* Here we perform PR-SCTP procedures        */
5119 	/* (section 4.2)                             */
5120 	/*********************************************/
5121 	/* C1. update advancedPeerAckPoint */
5122 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5123 		asoc->advanced_peer_ack_point = cum_ack;
5124 	}
5125 	/* C2. try to further move advancedPeerAckPoint ahead */
5126 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5127 		struct sctp_tmit_chunk *lchk;
5128 		uint32_t old_adv_peer_ack_point;
5129 
5130 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5131 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5132 		/* C3. See if we need to send a Fwd-TSN */
5133 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5134 			/*
5135 			 * ISSUE with ECN, see FWD-TSN processing.
5136 			 */
5137 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5138 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5139 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5140 				    old_adv_peer_ack_point);
5141 			}
5142 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5143 				send_forward_tsn(stcb, asoc);
5144 			} else if (lchk) {
5145 				/* try to FR fwd-tsn's that get lost too */
5146 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5147 					send_forward_tsn(stcb, asoc);
5148 				}
5149 			}
5150 		}
5151 		if (lchk) {
5152 			/* Assure a timer is up */
5153 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5154 			    stcb->sctp_ep, stcb, lchk->whoTo);
5155 		}
5156 	}
5157 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5158 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5159 		    a_rwnd,
5160 		    stcb->asoc.peers_rwnd,
5161 		    stcb->asoc.total_flight,
5162 		    stcb->asoc.total_output_queue_size);
5163 	}
5164 }
5165 
5166 void
5167 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5168 {
5169 	/* Copy cum-ack */
5170 	uint32_t cum_ack, a_rwnd;
5171 
5172 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5173 	/* Arrange so a_rwnd does NOT change */
5174 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5175 
5176 	/* Now call the express sack handling */
5177 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5178 }
5179 
5180 static void
5181 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5182     struct sctp_stream_in *strmin)
5183 {
5184 	struct sctp_queued_to_read *ctl, *nctl;
5185 	struct sctp_association *asoc;
5186 	uint32_t mid;
5187 	int need_reasm_check = 0;
5188 
5189 	asoc = &stcb->asoc;
5190 	mid = strmin->last_mid_delivered;
5191 	/*
5192 	 * First deliver anything prior to and including the stream no that
5193 	 * came in.
5194 	 */
5195 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5196 		if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5197 			/* this is deliverable now */
5198 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5199 				if (ctl->on_strm_q) {
5200 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5201 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5202 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5203 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5204 #ifdef INVARIANTS
5205 					} else {
5206 						panic("strmin: %p ctl: %p unknown %d",
5207 						    strmin, ctl, ctl->on_strm_q);
5208 #endif
5209 					}
5210 					ctl->on_strm_q = 0;
5211 				}
5212 				/* subtract pending on streams */
5213 				asoc->size_on_all_streams -= ctl->length;
5214 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5215 				/* deliver it to at least the delivery-q */
5216 				if (stcb->sctp_socket) {
5217 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5218 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5219 					    ctl,
5220 					    &stcb->sctp_socket->so_rcv,
5221 					    1, SCTP_READ_LOCK_HELD,
5222 					    SCTP_SO_NOT_LOCKED);
5223 				}
5224 			} else {
5225 				/* Its a fragmented message */
5226 				if (ctl->first_frag_seen) {
5227 					/*
5228 					 * Make it so this is next to
5229 					 * deliver, we restore later
5230 					 */
5231 					strmin->last_mid_delivered = ctl->mid - 1;
5232 					need_reasm_check = 1;
5233 					break;
5234 				}
5235 			}
5236 		} else {
5237 			/* no more delivery now. */
5238 			break;
5239 		}
5240 	}
5241 	if (need_reasm_check) {
5242 		int ret;
5243 
5244 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5245 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5246 			/* Restore the next to deliver unless we are ahead */
5247 			strmin->last_mid_delivered = mid;
5248 		}
5249 		if (ret == 0) {
5250 			/* Left the front Partial one on */
5251 			return;
5252 		}
5253 		need_reasm_check = 0;
5254 	}
5255 	/*
5256 	 * now we must deliver things in queue the normal way  if any are
5257 	 * now ready.
5258 	 */
5259 	mid = strmin->last_mid_delivered + 1;
5260 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5261 		if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5262 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5263 				/* this is deliverable now */
5264 				if (ctl->on_strm_q) {
5265 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5266 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5267 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5268 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5269 #ifdef INVARIANTS
5270 					} else {
5271 						panic("strmin: %p ctl: %p unknown %d",
5272 						    strmin, ctl, ctl->on_strm_q);
5273 #endif
5274 					}
5275 					ctl->on_strm_q = 0;
5276 				}
5277 				/* subtract pending on streams */
5278 				asoc->size_on_all_streams -= ctl->length;
5279 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5280 				/* deliver it to at least the delivery-q */
5281 				strmin->last_mid_delivered = ctl->mid;
5282 				if (stcb->sctp_socket) {
5283 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5284 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5285 					    ctl,
5286 					    &stcb->sctp_socket->so_rcv, 1,
5287 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5288 
5289 				}
5290 				mid = strmin->last_mid_delivered + 1;
5291 			} else {
5292 				/* Its a fragmented message */
5293 				if (ctl->first_frag_seen) {
5294 					/*
5295 					 * Make it so this is next to
5296 					 * deliver
5297 					 */
5298 					strmin->last_mid_delivered = ctl->mid - 1;
5299 					need_reasm_check = 1;
5300 					break;
5301 				}
5302 			}
5303 		} else {
5304 			break;
5305 		}
5306 	}
5307 	if (need_reasm_check) {
5308 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5309 	}
5310 }
5311 
5312 
5313 
5314 static void
5315 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5316     struct sctp_association *asoc,
5317     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5318 {
5319 	struct sctp_queued_to_read *control;
5320 	struct sctp_stream_in *strm;
5321 	struct sctp_tmit_chunk *chk, *nchk;
5322 	int cnt_removed = 0;
5323 
5324 	/*
5325 	 * For now large messages held on the stream reasm that are complete
5326 	 * will be tossed too. We could in theory do more work to spin
5327 	 * through and stop after dumping one msg aka seeing the start of a
5328 	 * new msg at the head, and call the delivery function... to see if
5329 	 * it can be delivered... But for now we just dump everything on the
5330 	 * queue.
5331 	 */
5332 	strm = &asoc->strmin[stream];
5333 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5334 	if (control == NULL) {
5335 		/* Not found */
5336 		return;
5337 	}
5338 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5339 		return;
5340 	}
5341 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5342 		/* Purge hanging chunks */
5343 		if (!asoc->idata_supported && (ordered == 0)) {
5344 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5345 				break;
5346 			}
5347 		}
5348 		cnt_removed++;
5349 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5350 		asoc->size_on_reasm_queue -= chk->send_size;
5351 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5352 		if (chk->data) {
5353 			sctp_m_freem(chk->data);
5354 			chk->data = NULL;
5355 		}
5356 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5357 	}
5358 	if (!TAILQ_EMPTY(&control->reasm)) {
5359 		/* This has to be old data, unordered */
5360 		if (control->data) {
5361 			sctp_m_freem(control->data);
5362 			control->data = NULL;
5363 		}
5364 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5365 		chk = TAILQ_FIRST(&control->reasm);
5366 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5367 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5368 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5369 			    chk, SCTP_READ_LOCK_HELD);
5370 		}
5371 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5372 		return;
5373 	}
5374 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5375 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5376 		control->on_strm_q = 0;
5377 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5378 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5379 		control->on_strm_q = 0;
5380 #ifdef INVARIANTS
5381 	} else if (control->on_strm_q) {
5382 		panic("strm: %p ctl: %p unknown %d",
5383 		    strm, control, control->on_strm_q);
5384 #endif
5385 	}
5386 	control->on_strm_q = 0;
5387 	if (control->on_read_q == 0) {
5388 		sctp_free_remote_addr(control->whoFrom);
5389 		if (control->data) {
5390 			sctp_m_freem(control->data);
5391 			control->data = NULL;
5392 		}
5393 		sctp_free_a_readq(stcb, control);
5394 	}
5395 }
5396 
5397 void
5398 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5399     struct sctp_forward_tsn_chunk *fwd,
5400     int *abort_flag, struct mbuf *m, int offset)
5401 {
5402 	/* The pr-sctp fwd tsn */
5403 	/*
5404 	 * here we will perform all the data receiver side steps for
5405 	 * processing FwdTSN, as required in by pr-sctp draft:
5406 	 *
5407 	 * Assume we get FwdTSN(x):
5408 	 *
5409 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5410 	 * + others we have 3) examine and update re-ordering queue on
5411 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5412 	 * report where we are.
5413 	 */
5414 	struct sctp_association *asoc;
5415 	uint32_t new_cum_tsn, gap;
5416 	unsigned int i, fwd_sz, m_size;
5417 	uint32_t str_seq;
5418 	struct sctp_stream_in *strm;
5419 	struct sctp_queued_to_read *ctl, *sv;
5420 
5421 	asoc = &stcb->asoc;
5422 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5423 		SCTPDBG(SCTP_DEBUG_INDATA1,
5424 		    "Bad size too small/big fwd-tsn\n");
5425 		return;
5426 	}
5427 	m_size = (stcb->asoc.mapping_array_size << 3);
5428 	/*************************************************************/
5429 	/* 1. Here we update local cumTSN and shift the bitmap array */
5430 	/*************************************************************/
5431 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5432 
5433 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5434 		/* Already got there ... */
5435 		return;
5436 	}
5437 	/*
5438 	 * now we know the new TSN is more advanced, let's find the actual
5439 	 * gap
5440 	 */
5441 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5442 	asoc->cumulative_tsn = new_cum_tsn;
5443 	if (gap >= m_size) {
5444 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5445 			struct mbuf *op_err;
5446 			char msg[SCTP_DIAG_INFO_LEN];
5447 
5448 			/*
5449 			 * out of range (of single byte chunks in the rwnd I
5450 			 * give out). This must be an attacker.
5451 			 */
5452 			*abort_flag = 1;
5453 			snprintf(msg, sizeof(msg),
5454 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5455 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5456 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5457 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5458 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5459 			return;
5460 		}
5461 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5462 
5463 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5464 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5465 		asoc->highest_tsn_inside_map = new_cum_tsn;
5466 
5467 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5468 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5469 
5470 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5471 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5472 		}
5473 	} else {
5474 		SCTP_TCB_LOCK_ASSERT(stcb);
5475 		for (i = 0; i <= gap; i++) {
5476 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5477 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5478 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5479 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5480 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5481 				}
5482 			}
5483 		}
5484 	}
5485 	/*************************************************************/
5486 	/* 2. Clear up re-assembly queue                             */
5487 	/*************************************************************/
5488 
5489 	/* This is now done as part of clearing up the stream/seq */
5490 	if (asoc->idata_supported == 0) {
5491 		uint16_t sid;
5492 
5493 		/* Flush all the un-ordered data based on cum-tsn */
5494 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5495 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5496 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5497 		}
5498 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5499 	}
5500 	/*******************************************************/
5501 	/* 3. Update the PR-stream re-ordering queues and fix  */
5502 	/* delivery issues as needed.                       */
5503 	/*******************************************************/
5504 	fwd_sz -= sizeof(*fwd);
5505 	if (m && fwd_sz) {
5506 		/* New method. */
5507 		unsigned int num_str;
5508 		uint32_t mid, cur_mid;
5509 		uint16_t sid;
5510 		uint16_t ordered, flags;
5511 		struct sctp_strseq *stseq, strseqbuf;
5512 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5513 
5514 		offset += sizeof(*fwd);
5515 
5516 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5517 		if (asoc->idata_supported) {
5518 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5519 		} else {
5520 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5521 		}
5522 		for (i = 0; i < num_str; i++) {
5523 			if (asoc->idata_supported) {
5524 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5525 				    sizeof(struct sctp_strseq_mid),
5526 				    (uint8_t *)&strseqbuf_m);
5527 				offset += sizeof(struct sctp_strseq_mid);
5528 				if (stseq_m == NULL) {
5529 					break;
5530 				}
5531 				sid = ntohs(stseq_m->sid);
5532 				mid = ntohl(stseq_m->mid);
5533 				flags = ntohs(stseq_m->flags);
5534 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5535 					ordered = 0;
5536 				} else {
5537 					ordered = 1;
5538 				}
5539 			} else {
5540 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5541 				    sizeof(struct sctp_strseq),
5542 				    (uint8_t *)&strseqbuf);
5543 				offset += sizeof(struct sctp_strseq);
5544 				if (stseq == NULL) {
5545 					break;
5546 				}
5547 				sid = ntohs(stseq->sid);
5548 				mid = (uint32_t)ntohs(stseq->ssn);
5549 				ordered = 1;
5550 			}
5551 			/* Convert */
5552 
5553 			/* now process */
5554 
5555 			/*
5556 			 * Ok we now look for the stream/seq on the read
5557 			 * queue where its not all delivered. If we find it
5558 			 * we transmute the read entry into a PDI_ABORTED.
5559 			 */
5560 			if (sid >= asoc->streamincnt) {
5561 				/* screwed up streams, stop!  */
5562 				break;
5563 			}
5564 			if ((asoc->str_of_pdapi == sid) &&
5565 			    (asoc->ssn_of_pdapi == mid)) {
5566 				/*
5567 				 * If this is the one we were partially
5568 				 * delivering now then we no longer are.
5569 				 * Note this will change with the reassembly
5570 				 * re-write.
5571 				 */
5572 				asoc->fragmented_delivery_inprogress = 0;
5573 			}
5574 			strm = &asoc->strmin[sid];
5575 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5576 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5577 			}
5578 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5579 				if ((ctl->sinfo_stream == sid) &&
5580 				    (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5581 					str_seq = (sid << 16) | (0x0000ffff & mid);
5582 					ctl->pdapi_aborted = 1;
5583 					sv = stcb->asoc.control_pdapi;
5584 					ctl->end_added = 1;
5585 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5586 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5587 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5588 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5589 #ifdef INVARIANTS
5590 					} else if (ctl->on_strm_q) {
5591 						panic("strm: %p ctl: %p unknown %d",
5592 						    strm, ctl, ctl->on_strm_q);
5593 #endif
5594 					}
5595 					ctl->on_strm_q = 0;
5596 					stcb->asoc.control_pdapi = ctl;
5597 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5598 					    stcb,
5599 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5600 					    (void *)&str_seq,
5601 					    SCTP_SO_NOT_LOCKED);
5602 					stcb->asoc.control_pdapi = sv;
5603 					break;
5604 				} else if ((ctl->sinfo_stream == sid) &&
5605 				    SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5606 					/* We are past our victim SSN */
5607 					break;
5608 				}
5609 			}
5610 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5611 				/* Update the sequence number */
5612 				strm->last_mid_delivered = mid;
5613 			}
5614 			/* now kick the stream the new way */
5615 			/* sa_ignore NO_NULL_CHK */
5616 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5617 		}
5618 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5619 	}
5620 	/*
5621 	 * Now slide thing forward.
5622 	 */
5623 	sctp_slide_mapping_arrays(stcb);
5624 }
5625