xref: /freebsd/sys/netinet/sctp_indata.c (revision a5ff72cb0e51a7675d4e2b5810a2b6dad5b91960)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t stream_no,
134     uint32_t stream_seq, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = stream_no;
145 	read_queue_e->sinfo_ssn = stream_seq;
146 	read_queue_e->sinfo_flags = (flags << 8);
147 	read_queue_e->sinfo_ppid = ppid;
148 	read_queue_e->sinfo_context = context;
149 	read_queue_e->sinfo_tsn = tsn;
150 	read_queue_e->sinfo_cumtsn = tsn;
151 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t bits, unordered;
337 
338 	bits = (control->sinfo_flags >> 8);
339 	unordered = bits & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/*
345 				 * Only one stream can be here in old style
346 				 * -- abort
347 				 */
348 				return (-1);
349 			}
350 			TAILQ_INSERT_TAIL(q, control, next_instrm);
351 			control->on_strm_q = SCTP_ON_UNORDERED;
352 			return (0);
353 		}
354 	} else {
355 		q = &strm->inqueue;
356 	}
357 	if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 		control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_TSN_GT(at->msg_id, control->msg_id)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (at->msg_id == control->msg_id) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q,
400 					    at, control, next_instrm);
401 					if (unordered) {
402 						control->on_strm_q = SCTP_ON_UNORDERED;
403 					} else {
404 						control->on_strm_q = SCTP_ON_ORDERED;
405 					}
406 					break;
407 				}
408 			}
409 		}
410 	}
411 	return (0);
412 }
413 
414 static void
415 sctp_abort_in_reasm(struct sctp_tcb *stcb,
416     struct sctp_queued_to_read *control,
417     struct sctp_tmit_chunk *chk,
418     int *abort_flag, int opspot)
419 {
420 	char msg[SCTP_DIAG_INFO_LEN];
421 	struct mbuf *oper;
422 
423 	if (stcb->asoc.idata_supported) {
424 		snprintf(msg, sizeof(msg),
425 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
426 		    opspot,
427 		    control->fsn_included,
428 		    chk->rec.data.TSN_seq,
429 		    chk->rec.data.stream_number,
430 		    chk->rec.data.fsn_num, chk->rec.data.stream_seq);
431 	} else {
432 		snprintf(msg, sizeof(msg),
433 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
434 		    opspot,
435 		    control->fsn_included,
436 		    chk->rec.data.TSN_seq,
437 		    chk->rec.data.stream_number,
438 		    chk->rec.data.fsn_num,
439 		    (uint16_t) chk->rec.data.stream_seq);
440 	}
441 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
442 	sctp_m_freem(chk->data);
443 	chk->data = NULL;
444 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
445 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
446 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
447 	*abort_flag = 1;
448 }
449 
450 static void
451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
452 {
453 	/*
454 	 * The control could not be placed and must be cleaned.
455 	 */
456 	struct sctp_tmit_chunk *chk, *nchk;
457 
458 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
459 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
460 		if (chk->data)
461 			sctp_m_freem(chk->data);
462 		chk->data = NULL;
463 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	}
465 	sctp_free_a_readq(stcb, control);
466 }
467 
468 /*
469  * Queue the chunk either right into the socket buffer if it is the next one
470  * to go OR put it in the correct place in the delivery queue.  If we do
471  * append to the so_buf, keep doing so until we are out of order as
472  * long as the control's entered are non-fragmented.
473  */
474 static void
475 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
476     struct sctp_stream_in *strm,
477     struct sctp_association *asoc,
478     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
479 {
480 	/*
481 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
482 	 * all the data in one stream this could happen quite rapidly. One
483 	 * could use the TSN to keep track of things, but this scheme breaks
484 	 * down in the other type of stream useage that could occur. Send a
485 	 * single msg to stream 0, send 4Billion messages to stream 1, now
486 	 * send a message to stream 0. You have a situation where the TSN
487 	 * has wrapped but not in the stream. Is this worth worrying about
488 	 * or should we just change our queue sort at the bottom to be by
489 	 * TSN.
490 	 *
491 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
492 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
493 	 * assignment this could happen... and I don't see how this would be
494 	 * a violation. So for now I am undecided an will leave the sort by
495 	 * SSN alone. Maybe a hybred approach is the answer
496 	 *
497 	 */
498 	struct sctp_queued_to_read *at;
499 	int queue_needed;
500 	uint32_t nxt_todel;
501 	struct mbuf *op_err;
502 	char msg[SCTP_DIAG_INFO_LEN];
503 
504 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
505 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
506 	}
507 	if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) {
508 		/* The incoming sseq is behind where we last delivered? */
509 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
510 		    control->sinfo_ssn, strm->last_sequence_delivered);
511 protocol_error:
512 		/*
513 		 * throw it in the stream so it gets cleaned up in
514 		 * association destruction
515 		 */
516 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
517 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
518 		    strm->last_sequence_delivered, control->sinfo_tsn,
519 		    control->sinfo_stream, control->sinfo_ssn);
520 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
521 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
522 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
523 		*abort_flag = 1;
524 		return;
525 
526 	}
527 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
528 		goto protocol_error;
529 	}
530 	queue_needed = 1;
531 	asoc->size_on_all_streams += control->length;
532 	sctp_ucount_incr(asoc->cnt_on_all_streams);
533 	nxt_todel = strm->last_sequence_delivered + 1;
534 	if (nxt_todel == control->sinfo_ssn) {
535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
536 		struct socket *so;
537 
538 		so = SCTP_INP_SO(stcb->sctp_ep);
539 		atomic_add_int(&stcb->asoc.refcnt, 1);
540 		SCTP_TCB_UNLOCK(stcb);
541 		SCTP_SOCKET_LOCK(so, 1);
542 		SCTP_TCB_LOCK(stcb);
543 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
544 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
545 			SCTP_SOCKET_UNLOCK(so, 1);
546 			return;
547 		}
548 #endif
549 		/* can be delivered right away? */
550 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
551 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
552 		}
553 		/* EY it wont be queued if it could be delivered directly */
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
560 		    control,
561 		    &stcb->sctp_socket->so_rcv, 1,
562 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
563 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
564 			/* all delivered */
565 			nxt_todel = strm->last_sequence_delivered + 1;
566 			if ((nxt_todel == control->sinfo_ssn) &&
567 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				if (control->on_strm_q == SCTP_ON_ORDERED) {
571 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
572 #ifdef INVARIANTS
573 				} else {
574 					panic("Huh control: %p is on_strm_q: %d",
575 					    control, control->on_strm_q);
576 #endif
577 				}
578 				control->on_strm_q = 0;
579 				strm->last_sequence_delivered++;
580 				/*
581 				 * We ignore the return of deliver_data here
582 				 * since we always can hold the chunk on the
583 				 * d-queue. And we have a finite number that
584 				 * can be delivered from the strq.
585 				 */
586 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
587 					sctp_log_strm_del(control, NULL,
588 					    SCTP_STR_LOG_FROM_IMMED_DEL);
589 				}
590 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
591 				sctp_add_to_readq(stcb->sctp_ep, stcb,
592 				    control,
593 				    &stcb->sctp_socket->so_rcv, 1,
594 				    SCTP_READ_LOCK_NOT_HELD,
595 				    SCTP_SO_NOT_LOCKED);
596 				continue;
597 			} else if (nxt_todel == control->sinfo_ssn) {
598 				*need_reasm = 1;
599 			}
600 			break;
601 		}
602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
603 		SCTP_SOCKET_UNLOCK(so, 1);
604 #endif
605 	}
606 	if (queue_needed) {
607 		/*
608 		 * Ok, we did not deliver this guy, find the correct place
609 		 * to put it on the queue.
610 		 */
611 		if (sctp_place_control_in_stream(strm, asoc, control)) {
612 			snprintf(msg, sizeof(msg),
613 			    "Queue to str msg_id: %u duplicate",
614 			    control->msg_id);
615 			clean_up_control(stcb, control);
616 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
617 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
618 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
619 			*abort_flag = 1;
620 		}
621 	}
622 }
623 
624 
625 static void
626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
627 {
628 	struct mbuf *m, *prev = NULL;
629 	struct sctp_tcb *stcb;
630 
631 	stcb = control->stcb;
632 	control->held_length = 0;
633 	control->length = 0;
634 	m = control->data;
635 	while (m) {
636 		if (SCTP_BUF_LEN(m) == 0) {
637 			/* Skip mbufs with NO length */
638 			if (prev == NULL) {
639 				/* First one */
640 				control->data = sctp_m_free(m);
641 				m = control->data;
642 			} else {
643 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
644 				m = SCTP_BUF_NEXT(prev);
645 			}
646 			if (m == NULL) {
647 				control->tail_mbuf = prev;
648 			}
649 			continue;
650 		}
651 		prev = m;
652 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
653 		if (control->on_read_q) {
654 			/*
655 			 * On read queue so we must increment the SB stuff,
656 			 * we assume caller has done any locks of SB.
657 			 */
658 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
659 		}
660 		m = SCTP_BUF_NEXT(m);
661 	}
662 	if (prev) {
663 		control->tail_mbuf = prev;
664 	}
665 }
666 
667 static void
668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
669 {
670 	struct mbuf *prev = NULL;
671 	struct sctp_tcb *stcb;
672 
673 	stcb = control->stcb;
674 	if (stcb == NULL) {
675 #ifdef INVARIANTS
676 		panic("Control broken");
677 #else
678 		return;
679 #endif
680 	}
681 	if (control->tail_mbuf == NULL) {
682 		/* TSNH */
683 		control->data = m;
684 		sctp_setup_tail_pointer(control);
685 		return;
686 	}
687 	control->tail_mbuf->m_next = m;
688 	while (m) {
689 		if (SCTP_BUF_LEN(m) == 0) {
690 			/* Skip mbufs with NO length */
691 			if (prev == NULL) {
692 				/* First one */
693 				control->tail_mbuf->m_next = sctp_m_free(m);
694 				m = control->tail_mbuf->m_next;
695 			} else {
696 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
697 				m = SCTP_BUF_NEXT(prev);
698 			}
699 			if (m == NULL) {
700 				control->tail_mbuf = prev;
701 			}
702 			continue;
703 		}
704 		prev = m;
705 		if (control->on_read_q) {
706 			/*
707 			 * On read queue so we must increment the SB stuff,
708 			 * we assume caller has done any locks of SB.
709 			 */
710 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
711 		}
712 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
713 		m = SCTP_BUF_NEXT(m);
714 	}
715 	if (prev) {
716 		control->tail_mbuf = prev;
717 	}
718 }
719 
720 static void
721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
722 {
723 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
724 	nc->sinfo_stream = control->sinfo_stream;
725 	nc->sinfo_ssn = control->sinfo_ssn;
726 	TAILQ_INIT(&nc->reasm);
727 	nc->top_fsn = control->top_fsn;
728 	nc->msg_id = control->msg_id;
729 	nc->sinfo_flags = control->sinfo_flags;
730 	nc->sinfo_ppid = control->sinfo_ppid;
731 	nc->sinfo_context = control->sinfo_context;
732 	nc->fsn_included = 0xffffffff;
733 	nc->sinfo_tsn = control->sinfo_tsn;
734 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
735 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
736 	nc->whoFrom = control->whoFrom;
737 	atomic_add_int(&nc->whoFrom->ref_count, 1);
738 	nc->stcb = control->stcb;
739 	nc->port_from = control->port_from;
740 }
741 
742 static int
743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm,
744     struct sctp_queued_to_read *control, uint32_t pd_point)
745 {
746 	/*
747 	 * Special handling for the old un-ordered data chunk. All the
748 	 * chunks/TSN's go to msg_id 0. So we have to do the old style
749 	 * watching to see if we have it all. If you return one, no other
750 	 * control entries on the un-ordered queue will be looked at. In
751 	 * theory there should be no others entries in reality, unless the
752 	 * guy is sending both unordered NDATA and unordered DATA...
753 	 */
754 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
755 	uint32_t fsn;
756 	struct sctp_queued_to_read *nc = NULL;
757 	int cnt_added;
758 
759 	if (control->first_frag_seen == 0) {
760 		/* Nothing we can do, we have not seen the first piece yet */
761 		return (1);
762 	}
763 	/* Collapse any we can */
764 	cnt_added = 0;
765 restart:
766 	fsn = control->fsn_included + 1;
767 	/* Now what can we add? */
768 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
769 		if (chk->rec.data.fsn_num == fsn) {
770 			/* Ok lets add it */
771 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
772 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk);
773 			fsn++;
774 			cnt_added++;
775 			chk = NULL;
776 			if (control->end_added) {
777 				/* We are done */
778 				if (!TAILQ_EMPTY(&control->reasm)) {
779 					/*
780 					 * Ok we have to move anything left
781 					 * on the control queue to a new
782 					 * control.
783 					 */
784 					sctp_alloc_a_readq(stcb, nc);
785 					sctp_build_readq_entry_from_ctl(nc, control);
786 					tchk = TAILQ_FIRST(&control->reasm);
787 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
788 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
789 						nc->first_frag_seen = 1;
790 						nc->fsn_included = tchk->rec.data.fsn_num;
791 						nc->data = tchk->data;
792 						sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq);
793 						tchk->data = NULL;
794 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
795 						sctp_setup_tail_pointer(nc);
796 						tchk = TAILQ_FIRST(&control->reasm);
797 					}
798 					/* Spin the rest onto the queue */
799 					while (tchk) {
800 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
801 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
802 						tchk = TAILQ_FIRST(&control->reasm);
803 					}
804 					/*
805 					 * Now lets add it to the queue
806 					 * after removing control
807 					 */
808 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
809 					nc->on_strm_q = SCTP_ON_UNORDERED;
810 					if (control->on_strm_q) {
811 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
812 						control->on_strm_q = 0;
813 					}
814 				}
815 				if (control->on_read_q == 0) {
816 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
817 					    &stcb->sctp_socket->so_rcv, control->end_added,
818 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
819 				}
820 				if (control->pdapi_started) {
821 					strm->pd_api_started = 0;
822 					control->pdapi_started = 0;
823 				}
824 				if (control->on_strm_q) {
825 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
826 					control->on_strm_q = 0;
827 				}
828 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
829 				if ((nc) && (nc->first_frag_seen)) {
830 					/*
831 					 * Switch to the new guy and
832 					 * continue
833 					 */
834 					control = nc;
835 					nc = NULL;
836 					goto restart;
837 				}
838 				return (1);
839 			}
840 		} else {
841 			/* Can't add more */
842 			break;
843 		}
844 	}
845 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
846 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
847 		    &stcb->sctp_socket->so_rcv, control->end_added,
848 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
849 		strm->pd_api_started = 1;
850 		control->pdapi_started = 1;
851 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
852 		return (0);
853 	} else {
854 		return (1);
855 	}
856 }
857 
858 static void
859 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc,
860     struct sctp_queued_to_read *control,
861     struct sctp_tmit_chunk *chk,
862     int *abort_flag)
863 {
864 	struct sctp_tmit_chunk *at;
865 	int inserted = 0;
866 
867 	/*
868 	 * Here we need to place the chunk into the control structure sorted
869 	 * in the correct order.
870 	 */
871 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
872 		/* Its the very first one. */
873 		SCTPDBG(SCTP_DEBUG_XXX,
874 		    "chunk is a first fsn: %u becomes fsn_included\n",
875 		    chk->rec.data.fsn_num);
876 		if (control->first_frag_seen) {
877 			/*
878 			 * In old un-ordered we can reassembly on one
879 			 * control multiple messages. As long as the next
880 			 * FIRST is greater then the old first (TSN i.e. FSN
881 			 * wise)
882 			 */
883 			struct mbuf *tdata;
884 			uint32_t tmp;
885 
886 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) {
887 				/*
888 				 * Easy way the start of a new guy beyond
889 				 * the lowest
890 				 */
891 				goto place_chunk;
892 			}
893 			if ((chk->rec.data.fsn_num == control->fsn_included) ||
894 			    (control->pdapi_started)) {
895 				/*
896 				 * Ok this should not happen, if it does we
897 				 * started the pd-api on the higher TSN
898 				 * (since the equals part is a TSN failure
899 				 * it must be that).
900 				 *
901 				 * We are completly hosed in that case since I
902 				 * have no way to recover. This really will
903 				 * only happen if we can get more TSN's
904 				 * higher before the pd-api-point.
905 				 */
906 				sctp_abort_in_reasm(stcb, control, chk,
907 				    abort_flag,
908 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
909 
910 				return;
911 			}
912 			/*
913 			 * Ok we have two firsts and the one we just got is
914 			 * smaller than the one we previously placed.. yuck!
915 			 * We must swap them out.
916 			 */
917 			/* swap the mbufs */
918 			tdata = control->data;
919 			control->data = chk->data;
920 			chk->data = tdata;
921 			/* Swap the lengths */
922 			tmp = control->length;
923 			control->length = chk->send_size;
924 			chk->send_size = tmp;
925 			/* Fix the FSN included */
926 			tmp = control->fsn_included;
927 			control->fsn_included = chk->rec.data.fsn_num;
928 			chk->rec.data.fsn_num = tmp;
929 			goto place_chunk;
930 		}
931 		control->first_frag_seen = 1;
932 		control->top_fsn = control->fsn_included = chk->rec.data.fsn_num;
933 		control->data = chk->data;
934 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
935 		chk->data = NULL;
936 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
937 		sctp_setup_tail_pointer(control);
938 		return;
939 	}
940 place_chunk:
941 	if (TAILQ_EMPTY(&control->reasm)) {
942 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
943 		asoc->size_on_reasm_queue += chk->send_size;
944 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
945 		return;
946 	}
947 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
948 		if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
949 			/*
950 			 * This one in queue is bigger than the new one,
951 			 * insert the new one before at.
952 			 */
953 			asoc->size_on_reasm_queue += chk->send_size;
954 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 			inserted = 1;
956 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 			break;
958 		} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
959 			/*
960 			 * They sent a duplicate fsn number. This really
961 			 * should not happen since the FSN is a TSN and it
962 			 * should have been dropped earlier.
963 			 */
964 			if (chk->data) {
965 				sctp_m_freem(chk->data);
966 				chk->data = NULL;
967 			}
968 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
969 			sctp_abort_in_reasm(stcb, control, chk,
970 			    abort_flag,
971 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
972 			return;
973 		}
974 	}
975 	if (inserted == 0) {
976 		/* Its at the end */
977 		asoc->size_on_reasm_queue += chk->send_size;
978 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
979 		control->top_fsn = chk->rec.data.fsn_num;
980 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
981 	}
982 }
983 
984 static int
985 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm)
986 {
987 	/*
988 	 * Given a stream, strm, see if any of the SSN's on it that are
989 	 * fragmented are ready to deliver. If so go ahead and place them on
990 	 * the read queue. In so placing if we have hit the end, then we
991 	 * need to remove them from the stream's queue.
992 	 */
993 	struct sctp_queued_to_read *control, *nctl = NULL;
994 	uint32_t next_to_del;
995 	uint32_t pd_point;
996 	int ret = 0;
997 
998 	if (stcb->sctp_socket) {
999 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1000 		    stcb->sctp_ep->partial_delivery_point);
1001 	} else {
1002 		pd_point = stcb->sctp_ep->partial_delivery_point;
1003 	}
1004 	control = TAILQ_FIRST(&strm->uno_inqueue);
1005 	if ((control) &&
1006 	    (asoc->idata_supported == 0)) {
1007 		/* Special handling needed for "old" data format */
1008 		if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) {
1009 			goto done_un;
1010 		}
1011 	}
1012 	if (strm->pd_api_started) {
1013 		/* Can't add more */
1014 		return (0);
1015 	}
1016 	while (control) {
1017 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1018 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included);
1019 		nctl = TAILQ_NEXT(control, next_instrm);
1020 		if (control->end_added) {
1021 			/* We just put the last bit on */
1022 			if (control->on_strm_q) {
1023 #ifdef INVARIANTS
1024 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1025 					panic("Huh control: %p on_q: %d -- not unordered?",
1026 					    control, control->on_strm_q);
1027 				}
1028 #endif
1029 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1030 				control->on_strm_q = 0;
1031 			}
1032 			if (control->on_read_q == 0) {
1033 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1034 				    control,
1035 				    &stcb->sctp_socket->so_rcv, control->end_added,
1036 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1037 			}
1038 		} else {
1039 			/* Can we do a PD-API for this un-ordered guy? */
1040 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1041 				strm->pd_api_started = 1;
1042 				control->pdapi_started = 1;
1043 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1044 				    control,
1045 				    &stcb->sctp_socket->so_rcv, control->end_added,
1046 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1047 
1048 				break;
1049 			}
1050 		}
1051 		control = nctl;
1052 	}
1053 done_un:
1054 	control = TAILQ_FIRST(&strm->inqueue);
1055 	if (strm->pd_api_started) {
1056 		/* Can't add more */
1057 		return (0);
1058 	}
1059 	if (control == NULL) {
1060 		return (ret);
1061 	}
1062 	if (strm->last_sequence_delivered == control->sinfo_ssn) {
1063 		/*
1064 		 * Ok the guy at the top was being partially delivered
1065 		 * completed, so we remove it. Note the pd_api flag was
1066 		 * taken off when the chunk was merged on in
1067 		 * sctp_queue_data_for_reasm below.
1068 		 */
1069 		nctl = TAILQ_NEXT(control, next_instrm);
1070 		SCTPDBG(SCTP_DEBUG_XXX,
1071 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1072 		    control, control->end_added, control->sinfo_ssn,
1073 		    control->top_fsn, control->fsn_included,
1074 		    strm->last_sequence_delivered);
1075 		if (control->end_added) {
1076 			if (control->on_strm_q) {
1077 #ifdef INVARIANTS
1078 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1079 					panic("Huh control: %p on_q: %d -- not ordered?",
1080 					    control, control->on_strm_q);
1081 				}
1082 #endif
1083 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1084 				control->on_strm_q = 0;
1085 			}
1086 			if (control->on_read_q == 0) {
1087 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1088 				    control,
1089 				    &stcb->sctp_socket->so_rcv, control->end_added,
1090 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1091 			}
1092 			if (strm->pd_api_started && control->pdapi_started) {
1093 				control->pdapi_started = 0;
1094 				strm->pd_api_started = 0;
1095 			}
1096 			control = nctl;
1097 		}
1098 	}
1099 	if (strm->pd_api_started) {
1100 		/*
1101 		 * Can't add more must have gotten an un-ordered above being
1102 		 * partially delivered.
1103 		 */
1104 		return (0);
1105 	}
1106 deliver_more:
1107 	next_to_del = strm->last_sequence_delivered + 1;
1108 	if (control) {
1109 		SCTPDBG(SCTP_DEBUG_XXX,
1110 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1111 		    control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included,
1112 		    next_to_del);
1113 		nctl = TAILQ_NEXT(control, next_instrm);
1114 		if ((control->sinfo_ssn == next_to_del) &&
1115 		    (control->first_frag_seen)) {
1116 			/* Ok we can deliver it onto the stream. */
1117 			if (control->end_added) {
1118 				/* We are done with it afterwards */
1119 				if (control->on_strm_q) {
1120 #ifdef INVARIANTS
1121 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1122 						panic("Huh control: %p on_q: %d -- not ordered?",
1123 						    control, control->on_strm_q);
1124 					}
1125 #endif
1126 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1127 					control->on_strm_q = 0;
1128 				}
1129 				ret++;
1130 			}
1131 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1132 				/*
1133 				 * A singleton now slipping through - mark
1134 				 * it non-revokable too
1135 				 */
1136 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1137 			} else if (control->end_added == 0) {
1138 				/*
1139 				 * Check if we can defer adding until its
1140 				 * all there
1141 				 */
1142 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1143 					/*
1144 					 * Don't need it or cannot add more
1145 					 * (one being delivered that way)
1146 					 */
1147 					goto out;
1148 				}
1149 			}
1150 			if (control->on_read_q == 0) {
1151 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1152 				    control,
1153 				    &stcb->sctp_socket->so_rcv, control->end_added,
1154 				    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1155 			}
1156 			strm->last_sequence_delivered = next_to_del;
1157 			if ((control->end_added) && (control->last_frag_seen)) {
1158 				control = nctl;
1159 				goto deliver_more;
1160 			} else {
1161 				/* We are now doing PD API */
1162 				strm->pd_api_started = 1;
1163 				control->pdapi_started = 1;
1164 			}
1165 		}
1166 	}
1167 out:
1168 	return (ret);
1169 }
1170 
1171 void
1172 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1173     struct sctp_stream_in *strm,
1174     struct sctp_tcb *stcb, struct sctp_association *asoc,
1175     struct sctp_tmit_chunk *chk)
1176 {
1177 	/*
1178 	 * Given a control and a chunk, merge the data from the chk onto the
1179 	 * control and free up the chunk resources.
1180 	 */
1181 	int i_locked = 0;
1182 
1183 	if (control->on_read_q) {
1184 		/*
1185 		 * Its being pd-api'd so we must do some locks.
1186 		 */
1187 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1188 		i_locked = 1;
1189 	}
1190 	if (control->data == NULL) {
1191 		control->data = chk->data;
1192 		sctp_setup_tail_pointer(control);
1193 	} else {
1194 		sctp_add_to_tail_pointer(control, chk->data);
1195 	}
1196 	control->fsn_included = chk->rec.data.fsn_num;
1197 	asoc->size_on_reasm_queue -= chk->send_size;
1198 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1199 	sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1200 	chk->data = NULL;
1201 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1202 		control->first_frag_seen = 1;
1203 	}
1204 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1205 		/* Its complete */
1206 		if ((control->on_strm_q) && (control->on_read_q)) {
1207 			if (control->pdapi_started) {
1208 				control->pdapi_started = 0;
1209 				strm->pd_api_started = 0;
1210 			}
1211 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1212 				/* Unordered */
1213 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1214 				control->on_strm_q = 0;
1215 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1216 				/* Ordered */
1217 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1218 				control->on_strm_q = 0;
1219 #ifdef INVARIANTS
1220 			} else if (control->on_strm_q) {
1221 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1222 				    control->on_strm_q);
1223 #endif
1224 			}
1225 		}
1226 		control->end_added = 1;
1227 		control->last_frag_seen = 1;
1228 	}
1229 	if (i_locked) {
1230 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1231 	}
1232 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1233 }
1234 
1235 /*
1236  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1237  * queue, see if anthing can be delivered. If so pull it off (or as much as
1238  * we can. If we run out of space then we must dump what we can and set the
1239  * appropriate flag to say we queued what we could.
1240  */
1241 static void
1242 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1243     struct sctp_stream_in *strm,
1244     struct sctp_queued_to_read *control,
1245     struct sctp_tmit_chunk *chk,
1246     int created_control,
1247     int *abort_flag, uint32_t tsn)
1248 {
1249 	uint32_t next_fsn;
1250 	struct sctp_tmit_chunk *at, *nat;
1251 	int cnt_added, unordered;
1252 
1253 	/*
1254 	 * For old un-ordered data chunks.
1255 	 */
1256 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1257 		unordered = 1;
1258 	} else {
1259 		unordered = 0;
1260 	}
1261 	/* Must be added to the stream-in queue */
1262 	if (created_control) {
1263 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1264 			/* Duplicate SSN? */
1265 			clean_up_control(stcb, control);
1266 			sctp_abort_in_reasm(stcb, control, chk,
1267 			    abort_flag,
1268 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1269 			return;
1270 		}
1271 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1272 			/*
1273 			 * Ok we created this control and now lets validate
1274 			 * that its legal i.e. there is a B bit set, if not
1275 			 * and we have up to the cum-ack then its invalid.
1276 			 */
1277 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1278 				sctp_abort_in_reasm(stcb, control, chk,
1279 				    abort_flag,
1280 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1281 				return;
1282 			}
1283 		}
1284 	}
1285 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1286 		sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag);
1287 		return;
1288 	}
1289 	/*
1290 	 * Ok we must queue the chunk into the reasembly portion: o if its
1291 	 * the first it goes to the control mbuf. o if its not first but the
1292 	 * next in sequence it goes to the control, and each succeeding one
1293 	 * in order also goes. o if its not in order we place it on the list
1294 	 * in its place.
1295 	 */
1296 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1297 		/* Its the very first one. */
1298 		SCTPDBG(SCTP_DEBUG_XXX,
1299 		    "chunk is a first fsn: %u becomes fsn_included\n",
1300 		    chk->rec.data.fsn_num);
1301 		if (control->first_frag_seen) {
1302 			/*
1303 			 * Error on senders part, they either sent us two
1304 			 * data chunks with FIRST, or they sent two
1305 			 * un-ordered chunks that were fragmented at the
1306 			 * same time in the same stream.
1307 			 */
1308 			sctp_abort_in_reasm(stcb, control, chk,
1309 			    abort_flag,
1310 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1311 			return;
1312 		}
1313 		control->first_frag_seen = 1;
1314 		control->fsn_included = chk->rec.data.fsn_num;
1315 		control->data = chk->data;
1316 		sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
1317 		chk->data = NULL;
1318 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1319 		sctp_setup_tail_pointer(control);
1320 	} else {
1321 		/* Place the chunk in our list */
1322 		int inserted = 0;
1323 
1324 		if (control->last_frag_seen == 0) {
1325 			/* Still willing to raise highest FSN seen */
1326 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1327 				SCTPDBG(SCTP_DEBUG_XXX,
1328 				    "We have a new top_fsn: %u\n",
1329 				    chk->rec.data.fsn_num);
1330 				control->top_fsn = chk->rec.data.fsn_num;
1331 			}
1332 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1333 				SCTPDBG(SCTP_DEBUG_XXX,
1334 				    "The last fsn is now in place fsn: %u\n",
1335 				    chk->rec.data.fsn_num);
1336 				control->last_frag_seen = 1;
1337 			}
1338 			if (asoc->idata_supported || control->first_frag_seen) {
1339 				/*
1340 				 * For IDATA we always check since we know
1341 				 * that the first fragment is 0. For old
1342 				 * DATA we have to receive the first before
1343 				 * we knwo the first FSN (which is the TSN).
1344 				 */
1345 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1346 					/*
1347 					 * We have already delivered up to
1348 					 * this so its a dup
1349 					 */
1350 					sctp_abort_in_reasm(stcb, control, chk,
1351 					    abort_flag,
1352 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1353 					return;
1354 				}
1355 			}
1356 		} else {
1357 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1358 				/* Second last? huh? */
1359 				SCTPDBG(SCTP_DEBUG_XXX,
1360 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1361 				    chk->rec.data.fsn_num, control->top_fsn);
1362 				sctp_abort_in_reasm(stcb, control,
1363 				    chk, abort_flag,
1364 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1365 				return;
1366 			}
1367 			if (asoc->idata_supported || control->first_frag_seen) {
1368 				/*
1369 				 * For IDATA we always check since we know
1370 				 * that the first fragment is 0. For old
1371 				 * DATA we have to receive the first before
1372 				 * we knwo the first FSN (which is the TSN).
1373 				 */
1374 
1375 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) {
1376 					/*
1377 					 * We have already delivered up to
1378 					 * this so its a dup
1379 					 */
1380 					SCTPDBG(SCTP_DEBUG_XXX,
1381 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1382 					    chk->rec.data.fsn_num, control->fsn_included);
1383 					sctp_abort_in_reasm(stcb, control, chk,
1384 					    abort_flag,
1385 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1386 					return;
1387 				}
1388 			}
1389 			/*
1390 			 * validate not beyond top FSN if we have seen last
1391 			 * one
1392 			 */
1393 			if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) {
1394 				SCTPDBG(SCTP_DEBUG_XXX,
1395 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1396 				    chk->rec.data.fsn_num,
1397 				    control->top_fsn);
1398 				sctp_abort_in_reasm(stcb, control, chk,
1399 				    abort_flag,
1400 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1401 				return;
1402 			}
1403 		}
1404 		/*
1405 		 * If we reach here, we need to place the new chunk in the
1406 		 * reassembly for this control.
1407 		 */
1408 		SCTPDBG(SCTP_DEBUG_XXX,
1409 		    "chunk is a not first fsn: %u needs to be inserted\n",
1410 		    chk->rec.data.fsn_num);
1411 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1412 			if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) {
1413 				/*
1414 				 * This one in queue is bigger than the new
1415 				 * one, insert the new one before at.
1416 				 */
1417 				SCTPDBG(SCTP_DEBUG_XXX,
1418 				    "Insert it before fsn: %u\n",
1419 				    at->rec.data.fsn_num);
1420 				asoc->size_on_reasm_queue += chk->send_size;
1421 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1422 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1423 				inserted = 1;
1424 				break;
1425 			} else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) {
1426 				/*
1427 				 * Gak, He sent me a duplicate str seq
1428 				 * number
1429 				 */
1430 				/*
1431 				 * foo bar, I guess I will just free this
1432 				 * new guy, should we abort too? FIX ME
1433 				 * MAYBE? Or it COULD be that the SSN's have
1434 				 * wrapped. Maybe I should compare to TSN
1435 				 * somehow... sigh for now just blow away
1436 				 * the chunk!
1437 				 */
1438 				SCTPDBG(SCTP_DEBUG_XXX,
1439 				    "Duplicate to fsn: %u -- abort\n",
1440 				    at->rec.data.fsn_num);
1441 				sctp_abort_in_reasm(stcb, control,
1442 				    chk, abort_flag,
1443 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1444 				return;
1445 			}
1446 		}
1447 		if (inserted == 0) {
1448 			/* Goes on the end */
1449 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1450 			    chk->rec.data.fsn_num);
1451 			asoc->size_on_reasm_queue += chk->send_size;
1452 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1453 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1454 		}
1455 	}
1456 	/*
1457 	 * Ok lets see if we can suck any up into the control structure that
1458 	 * are in seq if it makes sense.
1459 	 */
1460 	cnt_added = 0;
1461 	/*
1462 	 * If the first fragment has not been seen there is no sense in
1463 	 * looking.
1464 	 */
1465 	if (control->first_frag_seen) {
1466 		next_fsn = control->fsn_included + 1;
1467 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1468 			if (at->rec.data.fsn_num == next_fsn) {
1469 				/* We can add this one now to the control */
1470 				SCTPDBG(SCTP_DEBUG_XXX,
1471 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1472 				    control, at,
1473 				    at->rec.data.fsn_num,
1474 				    next_fsn, control->fsn_included);
1475 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1476 				sctp_add_chk_to_control(control, strm, stcb, asoc, at);
1477 				cnt_added++;
1478 				next_fsn++;
1479 				if (control->end_added && control->pdapi_started) {
1480 					if (strm->pd_api_started) {
1481 						strm->pd_api_started = 0;
1482 						control->pdapi_started = 0;
1483 					}
1484 					if (control->on_read_q == 0) {
1485 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1486 						    control,
1487 						    &stcb->sctp_socket->so_rcv, control->end_added,
1488 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1489 					}
1490 					break;
1491 				}
1492 			} else {
1493 				break;
1494 			}
1495 		}
1496 	}
1497 	if ((control->on_read_q) && (cnt_added > 0)) {
1498 		/* Need to wakeup the reader */
1499 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1500 	}
1501 }
1502 
1503 static struct sctp_queued_to_read *
1504 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old)
1505 {
1506 	struct sctp_queued_to_read *reasm;
1507 
1508 	if (ordered) {
1509 		TAILQ_FOREACH(reasm, &strm->inqueue, next_instrm) {
1510 			if (reasm->msg_id == msg_id) {
1511 				break;
1512 			}
1513 		}
1514 	} else {
1515 		if (old) {
1516 			reasm = TAILQ_FIRST(&strm->uno_inqueue);
1517 			return (reasm);
1518 		}
1519 		TAILQ_FOREACH(reasm, &strm->uno_inqueue, next_instrm) {
1520 			if (reasm->msg_id == msg_id) {
1521 				break;
1522 			}
1523 		}
1524 	}
1525 	return (reasm);
1526 }
1527 
1528 
1529 static int
1530 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1531     struct mbuf **m, int offset, int chk_length,
1532     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1533     int *break_flag, int last_chunk, uint8_t chtype)
1534 {
1535 	/* Process a data chunk */
1536 	/* struct sctp_tmit_chunk *chk; */
1537 	struct sctp_data_chunk *ch;
1538 	struct sctp_idata_chunk *nch, chunk_buf;
1539 	struct sctp_tmit_chunk *chk;
1540 	uint32_t tsn, fsn, gap, msg_id;
1541 	struct mbuf *dmbuf;
1542 	int the_len;
1543 	int need_reasm_check = 0;
1544 	uint16_t strmno;
1545 	struct mbuf *op_err;
1546 	char msg[SCTP_DIAG_INFO_LEN];
1547 	struct sctp_queued_to_read *control = NULL;
1548 	uint32_t protocol_id;
1549 	uint8_t chunk_flags;
1550 	struct sctp_stream_reset_list *liste;
1551 	struct sctp_stream_in *strm;
1552 	int ordered;
1553 	size_t clen;
1554 	int created_control = 0;
1555 	uint8_t old_data;
1556 
1557 	chk = NULL;
1558 	if (chtype == SCTP_IDATA) {
1559 		nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1560 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1561 		ch = (struct sctp_data_chunk *)nch;
1562 		clen = sizeof(struct sctp_idata_chunk);
1563 		tsn = ntohl(ch->dp.tsn);
1564 		msg_id = ntohl(nch->dp.msg_id);
1565 		if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG)
1566 			fsn = 0;
1567 		else
1568 			fsn = ntohl(nch->dp.ppid_fsn.fsn);
1569 		old_data = 0;
1570 	} else {
1571 		ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1572 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1573 		tsn = ntohl(ch->dp.tsn);
1574 		clen = sizeof(struct sctp_data_chunk);
1575 		fsn = tsn;
1576 		msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence));
1577 		nch = NULL;
1578 		old_data = 1;
1579 	}
1580 	chunk_flags = ch->ch.chunk_flags;
1581 	if ((size_t)chk_length == clen) {
1582 		/*
1583 		 * Need to send an abort since we had a empty data chunk.
1584 		 */
1585 		op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
1586 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1587 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1588 		*abort_flag = 1;
1589 		return (0);
1590 	}
1591 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1592 		asoc->send_sack = 1;
1593 	}
1594 	protocol_id = ch->dp.protocol_id;
1595 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1596 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1597 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1598 	}
1599 	if (stcb == NULL) {
1600 		return (0);
1601 	}
1602 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1603 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1604 		/* It is a duplicate */
1605 		SCTP_STAT_INCR(sctps_recvdupdata);
1606 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1607 			/* Record a dup for the next outbound sack */
1608 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1609 			asoc->numduptsns++;
1610 		}
1611 		asoc->send_sack = 1;
1612 		return (0);
1613 	}
1614 	/* Calculate the number of TSN's between the base and this TSN */
1615 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1616 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1617 		/* Can't hold the bit in the mapping at max array, toss it */
1618 		return (0);
1619 	}
1620 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1621 		SCTP_TCB_LOCK_ASSERT(stcb);
1622 		if (sctp_expand_mapping_array(asoc, gap)) {
1623 			/* Can't expand, drop it */
1624 			return (0);
1625 		}
1626 	}
1627 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1628 		*high_tsn = tsn;
1629 	}
1630 	/* See if we have received this one already */
1631 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1632 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1633 		SCTP_STAT_INCR(sctps_recvdupdata);
1634 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1635 			/* Record a dup for the next outbound sack */
1636 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1637 			asoc->numduptsns++;
1638 		}
1639 		asoc->send_sack = 1;
1640 		return (0);
1641 	}
1642 	/*
1643 	 * Check to see about the GONE flag, duplicates would cause a sack
1644 	 * to be sent up above
1645 	 */
1646 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1647 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1648 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1649 		/*
1650 		 * wait a minute, this guy is gone, there is no longer a
1651 		 * receiver. Send peer an ABORT!
1652 		 */
1653 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1654 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1655 		*abort_flag = 1;
1656 		return (0);
1657 	}
1658 	/*
1659 	 * Now before going further we see if there is room. If NOT then we
1660 	 * MAY let one through only IF this TSN is the one we are waiting
1661 	 * for on a partial delivery API.
1662 	 */
1663 
1664 	/* Is the stream valid? */
1665 	strmno = ntohs(ch->dp.stream_id);
1666 
1667 	if (strmno >= asoc->streamincnt) {
1668 		struct sctp_error_invalid_stream *cause;
1669 
1670 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1671 		    0, M_NOWAIT, 1, MT_DATA);
1672 		if (op_err != NULL) {
1673 			/* add some space up front so prepend will work well */
1674 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1675 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1676 			/*
1677 			 * Error causes are just param's and this one has
1678 			 * two back to back phdr, one with the error type
1679 			 * and size, the other with the streamid and a rsvd
1680 			 */
1681 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1682 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1683 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1684 			cause->stream_id = ch->dp.stream_id;
1685 			cause->reserved = htons(0);
1686 			sctp_queue_op_err(stcb, op_err);
1687 		}
1688 		SCTP_STAT_INCR(sctps_badsid);
1689 		SCTP_TCB_LOCK_ASSERT(stcb);
1690 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1691 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1692 			asoc->highest_tsn_inside_nr_map = tsn;
1693 		}
1694 		if (tsn == (asoc->cumulative_tsn + 1)) {
1695 			/* Update cum-ack */
1696 			asoc->cumulative_tsn = tsn;
1697 		}
1698 		return (0);
1699 	}
1700 	strm = &asoc->strmin[strmno];
1701 	/*
1702 	 * If its a fragmented message, lets see if we can find the control
1703 	 * on the reassembly queues.
1704 	 */
1705 	if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) {
1706 		/*
1707 		 * The first *must* be fsn 0, and other (middle/end) pieces
1708 		 * can *not* be fsn 0.
1709 		 */
1710 		goto err_out;
1711 	}
1712 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1713 		/* See if we can find the re-assembly entity */
1714 		control = find_reasm_entry(strm, msg_id, ordered, old_data);
1715 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1716 		    chunk_flags, control);
1717 		if (control) {
1718 			/* We found something, does it belong? */
1719 			if (ordered && (msg_id != control->sinfo_ssn)) {
1720 		err_out:
1721 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1722 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1723 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1724 				*abort_flag = 1;
1725 				return (0);
1726 			}
1727 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1728 				/*
1729 				 * We can't have a switched order with an
1730 				 * unordered chunk
1731 				 */
1732 				goto err_out;
1733 			}
1734 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1735 				/*
1736 				 * We can't have a switched unordered with a
1737 				 * ordered chunk
1738 				 */
1739 				goto err_out;
1740 			}
1741 		}
1742 	} else {
1743 		/*
1744 		 * Its a complete segment. Lets validate we don't have a
1745 		 * re-assembly going on with the same Stream/Seq (for
1746 		 * ordered) or in the same Stream for unordered.
1747 		 */
1748 		SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n",
1749 		    chunk_flags);
1750 		if (find_reasm_entry(strm, msg_id, ordered, old_data)) {
1751 			SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n",
1752 			    chunk_flags,
1753 			    msg_id);
1754 
1755 			goto err_out;
1756 		}
1757 	}
1758 	/* now do the tests */
1759 	if (((asoc->cnt_on_all_streams +
1760 	    asoc->cnt_on_reasm_queue +
1761 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1762 	    (((int)asoc->my_rwnd) <= 0)) {
1763 		/*
1764 		 * When we have NO room in the rwnd we check to make sure
1765 		 * the reader is doing its job...
1766 		 */
1767 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1768 			/* some to read, wake-up */
1769 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1770 			struct socket *so;
1771 
1772 			so = SCTP_INP_SO(stcb->sctp_ep);
1773 			atomic_add_int(&stcb->asoc.refcnt, 1);
1774 			SCTP_TCB_UNLOCK(stcb);
1775 			SCTP_SOCKET_LOCK(so, 1);
1776 			SCTP_TCB_LOCK(stcb);
1777 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1778 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1779 				/* assoc was freed while we were unlocked */
1780 				SCTP_SOCKET_UNLOCK(so, 1);
1781 				return (0);
1782 			}
1783 #endif
1784 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1786 			SCTP_SOCKET_UNLOCK(so, 1);
1787 #endif
1788 		}
1789 		/* now is it in the mapping array of what we have accepted? */
1790 		if (nch == NULL) {
1791 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1792 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1793 				/* Nope not in the valid range dump it */
1794 		dump_packet:
1795 				sctp_set_rwnd(stcb, asoc);
1796 				if ((asoc->cnt_on_all_streams +
1797 				    asoc->cnt_on_reasm_queue +
1798 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1799 					SCTP_STAT_INCR(sctps_datadropchklmt);
1800 				} else {
1801 					SCTP_STAT_INCR(sctps_datadroprwnd);
1802 				}
1803 				*break_flag = 1;
1804 				return (0);
1805 			}
1806 		} else {
1807 			if (control == NULL) {
1808 				goto dump_packet;
1809 			}
1810 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1811 				goto dump_packet;
1812 			}
1813 		}
1814 	}
1815 #ifdef SCTP_ASOCLOG_OF_TSNS
1816 	SCTP_TCB_LOCK_ASSERT(stcb);
1817 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1818 		asoc->tsn_in_at = 0;
1819 		asoc->tsn_in_wrapped = 1;
1820 	}
1821 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1822 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1823 	asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id;
1824 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1825 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1826 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1827 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1828 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1829 	asoc->tsn_in_at++;
1830 #endif
1831 	/*
1832 	 * Before we continue lets validate that we are not being fooled by
1833 	 * an evil attacker. We can only have Nk chunks based on our TSN
1834 	 * spread allowed by the mapping array N * 8 bits, so there is no
1835 	 * way our stream sequence numbers could have wrapped. We of course
1836 	 * only validate the FIRST fragment so the bit must be set.
1837 	 */
1838 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1839 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1840 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1841 	    SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) {
1842 		/* The incoming sseq is behind where we last delivered? */
1843 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1844 		    msg_id, asoc->strmin[strmno].last_sequence_delivered);
1845 
1846 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1847 		    asoc->strmin[strmno].last_sequence_delivered,
1848 		    tsn, strmno, msg_id);
1849 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1850 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1851 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1852 		*abort_flag = 1;
1853 		return (0);
1854 	}
1855 	/************************************
1856 	 * From here down we may find ch-> invalid
1857 	 * so its a good idea NOT to use it.
1858 	 *************************************/
1859 	if (nch) {
1860 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1861 	} else {
1862 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1863 	}
1864 	if (last_chunk == 0) {
1865 		if (nch) {
1866 			dmbuf = SCTP_M_COPYM(*m,
1867 			    (offset + sizeof(struct sctp_idata_chunk)),
1868 			    the_len, M_NOWAIT);
1869 		} else {
1870 			dmbuf = SCTP_M_COPYM(*m,
1871 			    (offset + sizeof(struct sctp_data_chunk)),
1872 			    the_len, M_NOWAIT);
1873 		}
1874 #ifdef SCTP_MBUF_LOGGING
1875 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1876 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1877 		}
1878 #endif
1879 	} else {
1880 		/* We can steal the last chunk */
1881 		int l_len;
1882 
1883 		dmbuf = *m;
1884 		/* lop off the top part */
1885 		if (nch) {
1886 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1887 		} else {
1888 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1889 		}
1890 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1891 			l_len = SCTP_BUF_LEN(dmbuf);
1892 		} else {
1893 			/*
1894 			 * need to count up the size hopefully does not hit
1895 			 * this to often :-0
1896 			 */
1897 			struct mbuf *lat;
1898 
1899 			l_len = 0;
1900 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1901 				l_len += SCTP_BUF_LEN(lat);
1902 			}
1903 		}
1904 		if (l_len > the_len) {
1905 			/* Trim the end round bytes off  too */
1906 			m_adj(dmbuf, -(l_len - the_len));
1907 		}
1908 	}
1909 	if (dmbuf == NULL) {
1910 		SCTP_STAT_INCR(sctps_nomem);
1911 		return (0);
1912 	}
1913 	/*
1914 	 * Now no matter what we need a control, get one if we don't have
1915 	 * one (we may have gotten it above when we found the message was
1916 	 * fragmented
1917 	 */
1918 	if (control == NULL) {
1919 		sctp_alloc_a_readq(stcb, control);
1920 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1921 		    protocol_id,
1922 		    strmno, msg_id,
1923 		    chunk_flags,
1924 		    NULL, fsn, msg_id);
1925 		if (control == NULL) {
1926 			SCTP_STAT_INCR(sctps_nomem);
1927 			return (0);
1928 		}
1929 		if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1930 			control->data = dmbuf;
1931 			control->tail_mbuf = NULL;
1932 			control->end_added = control->last_frag_seen = control->first_frag_seen = 1;
1933 			control->top_fsn = control->fsn_included = fsn;
1934 		}
1935 		created_control = 1;
1936 	}
1937 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n",
1938 	    chunk_flags, ordered, msg_id, control);
1939 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1940 	    TAILQ_EMPTY(&asoc->resetHead) &&
1941 	    ((ordered == 0) ||
1942 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id &&
1943 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1944 		/* Candidate for express delivery */
1945 		/*
1946 		 * Its not fragmented, No PD-API is up, Nothing in the
1947 		 * delivery queue, Its un-ordered OR ordered and the next to
1948 		 * deliver AND nothing else is stuck on the stream queue,
1949 		 * And there is room for it in the socket buffer. Lets just
1950 		 * stuff it up the buffer....
1951 		 */
1952 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1953 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1954 			asoc->highest_tsn_inside_nr_map = tsn;
1955 		}
1956 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n",
1957 		    control, msg_id);
1958 
1959 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1960 		    control, &stcb->sctp_socket->so_rcv,
1961 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1962 
1963 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1964 			/* for ordered, bump what we delivered */
1965 			strm->last_sequence_delivered++;
1966 		}
1967 		SCTP_STAT_INCR(sctps_recvexpress);
1968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1969 			sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno,
1970 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1971 		}
1972 		control = NULL;
1973 		goto finish_express_del;
1974 	}
1975 	/* Now will we need a chunk too? */
1976 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1977 		sctp_alloc_a_chunk(stcb, chk);
1978 		if (chk == NULL) {
1979 			/* No memory so we drop the chunk */
1980 			SCTP_STAT_INCR(sctps_nomem);
1981 			if (last_chunk == 0) {
1982 				/* we copied it, free the copy */
1983 				sctp_m_freem(dmbuf);
1984 			}
1985 			return (0);
1986 		}
1987 		chk->rec.data.TSN_seq = tsn;
1988 		chk->no_fr_allowed = 0;
1989 		chk->rec.data.fsn_num = fsn;
1990 		chk->rec.data.stream_seq = msg_id;
1991 		chk->rec.data.stream_number = strmno;
1992 		chk->rec.data.payloadtype = protocol_id;
1993 		chk->rec.data.context = stcb->asoc.context;
1994 		chk->rec.data.doing_fast_retransmit = 0;
1995 		chk->rec.data.rcv_flags = chunk_flags;
1996 		chk->asoc = asoc;
1997 		chk->send_size = the_len;
1998 		chk->whoTo = net;
1999 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n",
2000 		    chk,
2001 		    control, msg_id);
2002 		atomic_add_int(&net->ref_count, 1);
2003 		chk->data = dmbuf;
2004 	}
2005 	/* Set the appropriate TSN mark */
2006 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2007 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2008 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2009 			asoc->highest_tsn_inside_nr_map = tsn;
2010 		}
2011 	} else {
2012 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2013 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2014 			asoc->highest_tsn_inside_map = tsn;
2015 		}
2016 	}
2017 	/* Now is it complete (i.e. not fragmented)? */
2018 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2019 		/*
2020 		 * Special check for when streams are resetting. We could be
2021 		 * more smart about this and check the actual stream to see
2022 		 * if it is not being reset.. that way we would not create a
2023 		 * HOLB when amongst streams being reset and those not being
2024 		 * reset.
2025 		 *
2026 		 */
2027 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2028 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2029 			/*
2030 			 * yep its past where we need to reset... go ahead
2031 			 * and queue it.
2032 			 */
2033 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2034 				/* first one on */
2035 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2036 			} else {
2037 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2038 				unsigned char inserted = 0;
2039 
2040 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2041 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2042 
2043 						continue;
2044 					} else {
2045 						/* found it */
2046 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2047 						inserted = 1;
2048 						break;
2049 					}
2050 				}
2051 				if (inserted == 0) {
2052 					/*
2053 					 * must be put at end, use prevP
2054 					 * (all setup from loop) to setup
2055 					 * nextP.
2056 					 */
2057 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2058 				}
2059 			}
2060 			goto finish_express_del;
2061 		}
2062 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2063 			/* queue directly into socket buffer */
2064 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n",
2065 			    control, msg_id);
2066 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2067 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2068 			    control,
2069 			    &stcb->sctp_socket->so_rcv, 1,
2070 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2071 
2072 		} else {
2073 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control,
2074 			    msg_id);
2075 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2076 			if (*abort_flag) {
2077 				if (last_chunk) {
2078 					*m = NULL;
2079 				}
2080 				return (0);
2081 			}
2082 		}
2083 		goto finish_express_del;
2084 	}
2085 	/* If we reach here its a reassembly */
2086 	need_reasm_check = 1;
2087 	SCTPDBG(SCTP_DEBUG_XXX,
2088 	    "Queue data to stream for reasm control: %p msg_id: %u\n",
2089 	    control, msg_id);
2090 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2091 	if (*abort_flag) {
2092 		/*
2093 		 * the assoc is now gone and chk was put onto the reasm
2094 		 * queue, which has all been freed.
2095 		 */
2096 		if (last_chunk) {
2097 			*m = NULL;
2098 		}
2099 		return (0);
2100 	}
2101 finish_express_del:
2102 	/* Here we tidy up things */
2103 	if (tsn == (asoc->cumulative_tsn + 1)) {
2104 		/* Update cum-ack */
2105 		asoc->cumulative_tsn = tsn;
2106 	}
2107 	if (last_chunk) {
2108 		*m = NULL;
2109 	}
2110 	if (ordered) {
2111 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2112 	} else {
2113 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2114 	}
2115 	SCTP_STAT_INCR(sctps_recvdata);
2116 	/* Set it present please */
2117 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2118 		sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2119 	}
2120 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2121 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2122 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2123 	}
2124 	/* check the special flag for stream resets */
2125 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2126 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2127 		/*
2128 		 * we have finished working through the backlogged TSN's now
2129 		 * time to reset streams. 1: call reset function. 2: free
2130 		 * pending_reply space 3: distribute any chunks in
2131 		 * pending_reply_queue.
2132 		 */
2133 		struct sctp_queued_to_read *ctl, *nctl;
2134 
2135 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2136 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2137 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2138 		SCTP_FREE(liste, SCTP_M_STRESET);
2139 		/* sa_ignore FREED_MEMORY */
2140 		liste = TAILQ_FIRST(&asoc->resetHead);
2141 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2142 			/* All can be removed */
2143 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2144 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2145 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2146 				if (*abort_flag) {
2147 					return (0);
2148 				}
2149 			}
2150 		} else {
2151 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2152 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2153 					break;
2154 				}
2155 				/*
2156 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2157 				 * process it which is the NOT of
2158 				 * ctl->sinfo_tsn > liste->tsn
2159 				 */
2160 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2161 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2162 				if (*abort_flag) {
2163 					return (0);
2164 				}
2165 			}
2166 		}
2167 		/*
2168 		 * Now service re-assembly to pick up anything that has been
2169 		 * held on reassembly queue?
2170 		 */
2171 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2172 		need_reasm_check = 0;
2173 	}
2174 	if (need_reasm_check) {
2175 		/* Another one waits ? */
2176 		(void)sctp_deliver_reasm_check(stcb, asoc, strm);
2177 	}
2178 	return (1);
2179 }
2180 
2181 static const int8_t sctp_map_lookup_tab[256] = {
2182 	0, 1, 0, 2, 0, 1, 0, 3,
2183 	0, 1, 0, 2, 0, 1, 0, 4,
2184 	0, 1, 0, 2, 0, 1, 0, 3,
2185 	0, 1, 0, 2, 0, 1, 0, 5,
2186 	0, 1, 0, 2, 0, 1, 0, 3,
2187 	0, 1, 0, 2, 0, 1, 0, 4,
2188 	0, 1, 0, 2, 0, 1, 0, 3,
2189 	0, 1, 0, 2, 0, 1, 0, 6,
2190 	0, 1, 0, 2, 0, 1, 0, 3,
2191 	0, 1, 0, 2, 0, 1, 0, 4,
2192 	0, 1, 0, 2, 0, 1, 0, 3,
2193 	0, 1, 0, 2, 0, 1, 0, 5,
2194 	0, 1, 0, 2, 0, 1, 0, 3,
2195 	0, 1, 0, 2, 0, 1, 0, 4,
2196 	0, 1, 0, 2, 0, 1, 0, 3,
2197 	0, 1, 0, 2, 0, 1, 0, 7,
2198 	0, 1, 0, 2, 0, 1, 0, 3,
2199 	0, 1, 0, 2, 0, 1, 0, 4,
2200 	0, 1, 0, 2, 0, 1, 0, 3,
2201 	0, 1, 0, 2, 0, 1, 0, 5,
2202 	0, 1, 0, 2, 0, 1, 0, 3,
2203 	0, 1, 0, 2, 0, 1, 0, 4,
2204 	0, 1, 0, 2, 0, 1, 0, 3,
2205 	0, 1, 0, 2, 0, 1, 0, 6,
2206 	0, 1, 0, 2, 0, 1, 0, 3,
2207 	0, 1, 0, 2, 0, 1, 0, 4,
2208 	0, 1, 0, 2, 0, 1, 0, 3,
2209 	0, 1, 0, 2, 0, 1, 0, 5,
2210 	0, 1, 0, 2, 0, 1, 0, 3,
2211 	0, 1, 0, 2, 0, 1, 0, 4,
2212 	0, 1, 0, 2, 0, 1, 0, 3,
2213 	0, 1, 0, 2, 0, 1, 0, 8
2214 };
2215 
2216 
2217 void
2218 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2219 {
2220 	/*
2221 	 * Now we also need to check the mapping array in a couple of ways.
2222 	 * 1) Did we move the cum-ack point?
2223 	 *
2224 	 * When you first glance at this you might think that all entries that
2225 	 * make up the postion of the cum-ack would be in the nr-mapping
2226 	 * array only.. i.e. things up to the cum-ack are always
2227 	 * deliverable. Thats true with one exception, when its a fragmented
2228 	 * message we may not deliver the data until some threshold (or all
2229 	 * of it) is in place. So we must OR the nr_mapping_array and
2230 	 * mapping_array to get a true picture of the cum-ack.
2231 	 */
2232 	struct sctp_association *asoc;
2233 	int at;
2234 	uint8_t val;
2235 	int slide_from, slide_end, lgap, distance;
2236 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2237 
2238 	asoc = &stcb->asoc;
2239 
2240 	old_cumack = asoc->cumulative_tsn;
2241 	old_base = asoc->mapping_array_base_tsn;
2242 	old_highest = asoc->highest_tsn_inside_map;
2243 	/*
2244 	 * We could probably improve this a small bit by calculating the
2245 	 * offset of the current cum-ack as the starting point.
2246 	 */
2247 	at = 0;
2248 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2249 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2250 		if (val == 0xff) {
2251 			at += 8;
2252 		} else {
2253 			/* there is a 0 bit */
2254 			at += sctp_map_lookup_tab[val];
2255 			break;
2256 		}
2257 	}
2258 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2259 
2260 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2261 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2262 #ifdef INVARIANTS
2263 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2264 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2265 #else
2266 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2267 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2268 		sctp_print_mapping_array(asoc);
2269 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2270 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2271 		}
2272 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2273 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2274 #endif
2275 	}
2276 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2277 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2278 	} else {
2279 		highest_tsn = asoc->highest_tsn_inside_map;
2280 	}
2281 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2282 		/* The complete array was completed by a single FR */
2283 		/* highest becomes the cum-ack */
2284 		int clr;
2285 
2286 #ifdef INVARIANTS
2287 		unsigned int i;
2288 
2289 #endif
2290 
2291 		/* clear the array */
2292 		clr = ((at + 7) >> 3);
2293 		if (clr > asoc->mapping_array_size) {
2294 			clr = asoc->mapping_array_size;
2295 		}
2296 		memset(asoc->mapping_array, 0, clr);
2297 		memset(asoc->nr_mapping_array, 0, clr);
2298 #ifdef INVARIANTS
2299 		for (i = 0; i < asoc->mapping_array_size; i++) {
2300 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2301 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2302 				sctp_print_mapping_array(asoc);
2303 			}
2304 		}
2305 #endif
2306 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2307 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2308 	} else if (at >= 8) {
2309 		/* we can slide the mapping array down */
2310 		/* slide_from holds where we hit the first NON 0xff byte */
2311 
2312 		/*
2313 		 * now calculate the ceiling of the move using our highest
2314 		 * TSN value
2315 		 */
2316 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2317 		slide_end = (lgap >> 3);
2318 		if (slide_end < slide_from) {
2319 			sctp_print_mapping_array(asoc);
2320 #ifdef INVARIANTS
2321 			panic("impossible slide");
2322 #else
2323 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2324 			    lgap, slide_end, slide_from, at);
2325 			return;
2326 #endif
2327 		}
2328 		if (slide_end > asoc->mapping_array_size) {
2329 #ifdef INVARIANTS
2330 			panic("would overrun buffer");
2331 #else
2332 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2333 			    asoc->mapping_array_size, slide_end);
2334 			slide_end = asoc->mapping_array_size;
2335 #endif
2336 		}
2337 		distance = (slide_end - slide_from) + 1;
2338 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2339 			sctp_log_map(old_base, old_cumack, old_highest,
2340 			    SCTP_MAP_PREPARE_SLIDE);
2341 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2342 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2343 		}
2344 		if (distance + slide_from > asoc->mapping_array_size ||
2345 		    distance < 0) {
2346 			/*
2347 			 * Here we do NOT slide forward the array so that
2348 			 * hopefully when more data comes in to fill it up
2349 			 * we will be able to slide it forward. Really I
2350 			 * don't think this should happen :-0
2351 			 */
2352 
2353 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2354 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2355 				    (uint32_t) asoc->mapping_array_size,
2356 				    SCTP_MAP_SLIDE_NONE);
2357 			}
2358 		} else {
2359 			int ii;
2360 
2361 			for (ii = 0; ii < distance; ii++) {
2362 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2363 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2364 
2365 			}
2366 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2367 				asoc->mapping_array[ii] = 0;
2368 				asoc->nr_mapping_array[ii] = 0;
2369 			}
2370 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2371 				asoc->highest_tsn_inside_map += (slide_from << 3);
2372 			}
2373 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2374 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2375 			}
2376 			asoc->mapping_array_base_tsn += (slide_from << 3);
2377 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2378 				sctp_log_map(asoc->mapping_array_base_tsn,
2379 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2380 				    SCTP_MAP_SLIDE_RESULT);
2381 			}
2382 		}
2383 	}
2384 }
2385 
2386 void
2387 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2388 {
2389 	struct sctp_association *asoc;
2390 	uint32_t highest_tsn;
2391 
2392 	asoc = &stcb->asoc;
2393 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2394 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2395 	} else {
2396 		highest_tsn = asoc->highest_tsn_inside_map;
2397 	}
2398 
2399 	/*
2400 	 * Now we need to see if we need to queue a sack or just start the
2401 	 * timer (if allowed).
2402 	 */
2403 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2404 		/*
2405 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2406 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2407 		 * SACK
2408 		 */
2409 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2410 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2411 			    stcb->sctp_ep, stcb, NULL,
2412 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2413 		}
2414 		sctp_send_shutdown(stcb,
2415 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2416 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2417 	} else {
2418 		int is_a_gap;
2419 
2420 		/* is there a gap now ? */
2421 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2422 
2423 		/*
2424 		 * CMT DAC algorithm: increase number of packets received
2425 		 * since last ack
2426 		 */
2427 		stcb->asoc.cmt_dac_pkts_rcvd++;
2428 
2429 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2430 							 * SACK */
2431 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2432 							 * longer is one */
2433 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2434 		    (is_a_gap) ||	/* is still a gap */
2435 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2436 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2437 		    ) {
2438 
2439 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2440 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2441 			    (stcb->asoc.send_sack == 0) &&
2442 			    (stcb->asoc.numduptsns == 0) &&
2443 			    (stcb->asoc.delayed_ack) &&
2444 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2445 
2446 				/*
2447 				 * CMT DAC algorithm: With CMT, delay acks
2448 				 * even in the face of
2449 				 *
2450 				 * reordering. Therefore, if acks that do not
2451 				 * have to be sent because of the above
2452 				 * reasons, will be delayed. That is, acks
2453 				 * that would have been sent due to gap
2454 				 * reports will be delayed with DAC. Start
2455 				 * the delayed ack timer.
2456 				 */
2457 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2458 				    stcb->sctp_ep, stcb, NULL);
2459 			} else {
2460 				/*
2461 				 * Ok we must build a SACK since the timer
2462 				 * is pending, we got our first packet OR
2463 				 * there are gaps or duplicates.
2464 				 */
2465 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2466 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2467 			}
2468 		} else {
2469 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2470 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2471 				    stcb->sctp_ep, stcb, NULL);
2472 			}
2473 		}
2474 	}
2475 }
2476 
2477 int
2478 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2479     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2480     struct sctp_nets *net, uint32_t * high_tsn)
2481 {
2482 	struct sctp_chunkhdr *ch, chunk_buf;
2483 	struct sctp_association *asoc;
2484 	int num_chunks = 0;	/* number of control chunks processed */
2485 	int stop_proc = 0;
2486 	int chk_length, break_flag, last_chunk;
2487 	int abort_flag = 0, was_a_gap;
2488 	struct mbuf *m;
2489 	uint32_t highest_tsn;
2490 
2491 	/* set the rwnd */
2492 	sctp_set_rwnd(stcb, &stcb->asoc);
2493 
2494 	m = *mm;
2495 	SCTP_TCB_LOCK_ASSERT(stcb);
2496 	asoc = &stcb->asoc;
2497 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2498 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2499 	} else {
2500 		highest_tsn = asoc->highest_tsn_inside_map;
2501 	}
2502 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2503 	/*
2504 	 * setup where we got the last DATA packet from for any SACK that
2505 	 * may need to go out. Don't bump the net. This is done ONLY when a
2506 	 * chunk is assigned.
2507 	 */
2508 	asoc->last_data_chunk_from = net;
2509 
2510 	/*-
2511 	 * Now before we proceed we must figure out if this is a wasted
2512 	 * cluster... i.e. it is a small packet sent in and yet the driver
2513 	 * underneath allocated a full cluster for it. If so we must copy it
2514 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2515 	 * with cluster starvation. Note for __Panda__ we don't do this
2516 	 * since it has clusters all the way down to 64 bytes.
2517 	 */
2518 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2519 		/* we only handle mbufs that are singletons.. not chains */
2520 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2521 		if (m) {
2522 			/* ok lets see if we can copy the data up */
2523 			caddr_t *from, *to;
2524 
2525 			/* get the pointers and copy */
2526 			to = mtod(m, caddr_t *);
2527 			from = mtod((*mm), caddr_t *);
2528 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2529 			/* copy the length and free up the old */
2530 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2531 			sctp_m_freem(*mm);
2532 			/* sucess, back copy */
2533 			*mm = m;
2534 		} else {
2535 			/* We are in trouble in the mbuf world .. yikes */
2536 			m = *mm;
2537 		}
2538 	}
2539 	/* get pointer to the first chunk header */
2540 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2541 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2542 	if (ch == NULL) {
2543 		return (1);
2544 	}
2545 	/*
2546 	 * process all DATA chunks...
2547 	 */
2548 	*high_tsn = asoc->cumulative_tsn;
2549 	break_flag = 0;
2550 	asoc->data_pkts_seen++;
2551 	while (stop_proc == 0) {
2552 		/* validate chunk length */
2553 		chk_length = ntohs(ch->chunk_length);
2554 		if (length - *offset < chk_length) {
2555 			/* all done, mutulated chunk */
2556 			stop_proc = 1;
2557 			continue;
2558 		}
2559 		if ((asoc->idata_supported == 1) &&
2560 		    (ch->chunk_type == SCTP_DATA)) {
2561 			struct mbuf *op_err;
2562 			char msg[SCTP_DIAG_INFO_LEN];
2563 
2564 			snprintf(msg, sizeof(msg), "I-DATA chunk received when DATA was negotiated");
2565 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2566 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2567 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2568 			return (2);
2569 		}
2570 		if ((asoc->idata_supported == 0) &&
2571 		    (ch->chunk_type == SCTP_IDATA)) {
2572 			struct mbuf *op_err;
2573 			char msg[SCTP_DIAG_INFO_LEN];
2574 
2575 			snprintf(msg, sizeof(msg), "DATA chunk received when I-DATA was negotiated");
2576 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2577 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2578 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2579 			return (2);
2580 		}
2581 		if ((ch->chunk_type == SCTP_DATA) ||
2582 		    (ch->chunk_type == SCTP_IDATA)) {
2583 			int clen;
2584 
2585 			if (ch->chunk_type == SCTP_DATA) {
2586 				clen = sizeof(struct sctp_data_chunk);
2587 			} else {
2588 				clen = sizeof(struct sctp_idata_chunk);
2589 			}
2590 			if (chk_length < clen) {
2591 				/*
2592 				 * Need to send an abort since we had a
2593 				 * invalid data chunk.
2594 				 */
2595 				struct mbuf *op_err;
2596 				char msg[SCTP_DIAG_INFO_LEN];
2597 
2598 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2599 				    chk_length);
2600 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2601 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2602 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2603 				return (2);
2604 			}
2605 #ifdef SCTP_AUDITING_ENABLED
2606 			sctp_audit_log(0xB1, 0);
2607 #endif
2608 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2609 				last_chunk = 1;
2610 			} else {
2611 				last_chunk = 0;
2612 			}
2613 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2614 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2615 			    last_chunk, ch->chunk_type)) {
2616 				num_chunks++;
2617 			}
2618 			if (abort_flag)
2619 				return (2);
2620 
2621 			if (break_flag) {
2622 				/*
2623 				 * Set because of out of rwnd space and no
2624 				 * drop rep space left.
2625 				 */
2626 				stop_proc = 1;
2627 				continue;
2628 			}
2629 		} else {
2630 			/* not a data chunk in the data region */
2631 			switch (ch->chunk_type) {
2632 			case SCTP_INITIATION:
2633 			case SCTP_INITIATION_ACK:
2634 			case SCTP_SELECTIVE_ACK:
2635 			case SCTP_NR_SELECTIVE_ACK:
2636 			case SCTP_HEARTBEAT_REQUEST:
2637 			case SCTP_HEARTBEAT_ACK:
2638 			case SCTP_ABORT_ASSOCIATION:
2639 			case SCTP_SHUTDOWN:
2640 			case SCTP_SHUTDOWN_ACK:
2641 			case SCTP_OPERATION_ERROR:
2642 			case SCTP_COOKIE_ECHO:
2643 			case SCTP_COOKIE_ACK:
2644 			case SCTP_ECN_ECHO:
2645 			case SCTP_ECN_CWR:
2646 			case SCTP_SHUTDOWN_COMPLETE:
2647 			case SCTP_AUTHENTICATION:
2648 			case SCTP_ASCONF_ACK:
2649 			case SCTP_PACKET_DROPPED:
2650 			case SCTP_STREAM_RESET:
2651 			case SCTP_FORWARD_CUM_TSN:
2652 			case SCTP_ASCONF:
2653 				/*
2654 				 * Now, what do we do with KNOWN chunks that
2655 				 * are NOT in the right place?
2656 				 *
2657 				 * For now, I do nothing but ignore them. We
2658 				 * may later want to add sysctl stuff to
2659 				 * switch out and do either an ABORT() or
2660 				 * possibly process them.
2661 				 */
2662 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2663 					struct mbuf *op_err;
2664 					char msg[SCTP_DIAG_INFO_LEN];
2665 
2666 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2667 					    ch->chunk_type);
2668 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2669 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2670 					return (2);
2671 				}
2672 				break;
2673 			default:
2674 				/* unknown chunk type, use bit rules */
2675 				if (ch->chunk_type & 0x40) {
2676 					/* Add a error report to the queue */
2677 					struct mbuf *op_err;
2678 					struct sctp_gen_error_cause *cause;
2679 
2680 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2681 					    0, M_NOWAIT, 1, MT_DATA);
2682 					if (op_err != NULL) {
2683 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2684 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2685 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2686 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2687 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2688 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2689 							sctp_queue_op_err(stcb, op_err);
2690 						} else {
2691 							sctp_m_freem(op_err);
2692 						}
2693 					}
2694 				}
2695 				if ((ch->chunk_type & 0x80) == 0) {
2696 					/* discard the rest of this packet */
2697 					stop_proc = 1;
2698 				}	/* else skip this bad chunk and
2699 					 * continue... */
2700 				break;
2701 			}	/* switch of chunk type */
2702 		}
2703 		*offset += SCTP_SIZE32(chk_length);
2704 		if ((*offset >= length) || stop_proc) {
2705 			/* no more data left in the mbuf chain */
2706 			stop_proc = 1;
2707 			continue;
2708 		}
2709 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2710 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2711 		if (ch == NULL) {
2712 			*offset = length;
2713 			stop_proc = 1;
2714 			continue;
2715 		}
2716 	}
2717 	if (break_flag) {
2718 		/*
2719 		 * we need to report rwnd overrun drops.
2720 		 */
2721 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2722 	}
2723 	if (num_chunks) {
2724 		/*
2725 		 * Did we get data, if so update the time for auto-close and
2726 		 * give peer credit for being alive.
2727 		 */
2728 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2729 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2730 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2731 			    stcb->asoc.overall_error_count,
2732 			    0,
2733 			    SCTP_FROM_SCTP_INDATA,
2734 			    __LINE__);
2735 		}
2736 		stcb->asoc.overall_error_count = 0;
2737 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2738 	}
2739 	/* now service all of the reassm queue if needed */
2740 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2741 		/* Assure that we ack right away */
2742 		stcb->asoc.send_sack = 1;
2743 	}
2744 	/* Start a sack timer or QUEUE a SACK for sending */
2745 	sctp_sack_check(stcb, was_a_gap);
2746 	return (0);
2747 }
2748 
2749 static int
2750 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2751     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2752     int *num_frs,
2753     uint32_t * biggest_newly_acked_tsn,
2754     uint32_t * this_sack_lowest_newack,
2755     int *rto_ok)
2756 {
2757 	struct sctp_tmit_chunk *tp1;
2758 	unsigned int theTSN;
2759 	int j, wake_him = 0, circled = 0;
2760 
2761 	/* Recover the tp1 we last saw */
2762 	tp1 = *p_tp1;
2763 	if (tp1 == NULL) {
2764 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2765 	}
2766 	for (j = frag_strt; j <= frag_end; j++) {
2767 		theTSN = j + last_tsn;
2768 		while (tp1) {
2769 			if (tp1->rec.data.doing_fast_retransmit)
2770 				(*num_frs) += 1;
2771 
2772 			/*-
2773 			 * CMT: CUCv2 algorithm. For each TSN being
2774 			 * processed from the sent queue, track the
2775 			 * next expected pseudo-cumack, or
2776 			 * rtx_pseudo_cumack, if required. Separate
2777 			 * cumack trackers for first transmissions,
2778 			 * and retransmissions.
2779 			 */
2780 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2781 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2782 			    (tp1->snd_count == 1)) {
2783 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2784 				tp1->whoTo->find_pseudo_cumack = 0;
2785 			}
2786 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2787 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2788 			    (tp1->snd_count > 1)) {
2789 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2790 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2791 			}
2792 			if (tp1->rec.data.TSN_seq == theTSN) {
2793 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2794 					/*-
2795 					 * must be held until
2796 					 * cum-ack passes
2797 					 */
2798 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2799 						/*-
2800 						 * If it is less than RESEND, it is
2801 						 * now no-longer in flight.
2802 						 * Higher values may already be set
2803 						 * via previous Gap Ack Blocks...
2804 						 * i.e. ACKED or RESEND.
2805 						 */
2806 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2807 						    *biggest_newly_acked_tsn)) {
2808 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2809 						}
2810 						/*-
2811 						 * CMT: SFR algo (and HTNA) - set
2812 						 * saw_newack to 1 for dest being
2813 						 * newly acked. update
2814 						 * this_sack_highest_newack if
2815 						 * appropriate.
2816 						 */
2817 						if (tp1->rec.data.chunk_was_revoked == 0)
2818 							tp1->whoTo->saw_newack = 1;
2819 
2820 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2821 						    tp1->whoTo->this_sack_highest_newack)) {
2822 							tp1->whoTo->this_sack_highest_newack =
2823 							    tp1->rec.data.TSN_seq;
2824 						}
2825 						/*-
2826 						 * CMT DAC algo: also update
2827 						 * this_sack_lowest_newack
2828 						 */
2829 						if (*this_sack_lowest_newack == 0) {
2830 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2831 								sctp_log_sack(*this_sack_lowest_newack,
2832 								    last_tsn,
2833 								    tp1->rec.data.TSN_seq,
2834 								    0,
2835 								    0,
2836 								    SCTP_LOG_TSN_ACKED);
2837 							}
2838 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2839 						}
2840 						/*-
2841 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2842 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2843 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2844 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2845 						 * Separate pseudo_cumack trackers for first transmissions and
2846 						 * retransmissions.
2847 						 */
2848 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2849 							if (tp1->rec.data.chunk_was_revoked == 0) {
2850 								tp1->whoTo->new_pseudo_cumack = 1;
2851 							}
2852 							tp1->whoTo->find_pseudo_cumack = 1;
2853 						}
2854 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2855 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2856 						}
2857 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2858 							if (tp1->rec.data.chunk_was_revoked == 0) {
2859 								tp1->whoTo->new_pseudo_cumack = 1;
2860 							}
2861 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2862 						}
2863 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2864 							sctp_log_sack(*biggest_newly_acked_tsn,
2865 							    last_tsn,
2866 							    tp1->rec.data.TSN_seq,
2867 							    frag_strt,
2868 							    frag_end,
2869 							    SCTP_LOG_TSN_ACKED);
2870 						}
2871 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2872 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2873 							    tp1->whoTo->flight_size,
2874 							    tp1->book_size,
2875 							    (uint32_t) (uintptr_t) tp1->whoTo,
2876 							    tp1->rec.data.TSN_seq);
2877 						}
2878 						sctp_flight_size_decrease(tp1);
2879 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2880 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2881 							    tp1);
2882 						}
2883 						sctp_total_flight_decrease(stcb, tp1);
2884 
2885 						tp1->whoTo->net_ack += tp1->send_size;
2886 						if (tp1->snd_count < 2) {
2887 							/*-
2888 							 * True non-retransmited chunk
2889 							 */
2890 							tp1->whoTo->net_ack2 += tp1->send_size;
2891 
2892 							/*-
2893 							 * update RTO too ?
2894 							 */
2895 							if (tp1->do_rtt) {
2896 								if (*rto_ok) {
2897 									tp1->whoTo->RTO =
2898 									    sctp_calculate_rto(stcb,
2899 									    &stcb->asoc,
2900 									    tp1->whoTo,
2901 									    &tp1->sent_rcv_time,
2902 									    sctp_align_safe_nocopy,
2903 									    SCTP_RTT_FROM_DATA);
2904 									*rto_ok = 0;
2905 								}
2906 								if (tp1->whoTo->rto_needed == 0) {
2907 									tp1->whoTo->rto_needed = 1;
2908 								}
2909 								tp1->do_rtt = 0;
2910 							}
2911 						}
2912 					}
2913 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2914 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2915 						    stcb->asoc.this_sack_highest_gap)) {
2916 							stcb->asoc.this_sack_highest_gap =
2917 							    tp1->rec.data.TSN_seq;
2918 						}
2919 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2920 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2921 #ifdef SCTP_AUDITING_ENABLED
2922 							sctp_audit_log(0xB2,
2923 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2924 #endif
2925 						}
2926 					}
2927 					/*-
2928 					 * All chunks NOT UNSENT fall through here and are marked
2929 					 * (leave PR-SCTP ones that are to skip alone though)
2930 					 */
2931 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2932 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2933 						tp1->sent = SCTP_DATAGRAM_MARKED;
2934 					}
2935 					if (tp1->rec.data.chunk_was_revoked) {
2936 						/* deflate the cwnd */
2937 						tp1->whoTo->cwnd -= tp1->book_size;
2938 						tp1->rec.data.chunk_was_revoked = 0;
2939 					}
2940 					/* NR Sack code here */
2941 					if (nr_sacking &&
2942 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2943 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2944 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2945 #ifdef INVARIANTS
2946 						} else {
2947 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2948 #endif
2949 						}
2950 						if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2951 						    (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2952 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2953 							stcb->asoc.trigger_reset = 1;
2954 						}
2955 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2956 						if (tp1->data) {
2957 							/*
2958 							 * sa_ignore
2959 							 * NO_NULL_CHK
2960 							 */
2961 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2962 							sctp_m_freem(tp1->data);
2963 							tp1->data = NULL;
2964 						}
2965 						wake_him++;
2966 					}
2967 				}
2968 				break;
2969 			}	/* if (tp1->TSN_seq == theTSN) */
2970 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2971 				break;
2972 			}
2973 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2974 			if ((tp1 == NULL) && (circled == 0)) {
2975 				circled++;
2976 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2977 			}
2978 		}		/* end while (tp1) */
2979 		if (tp1 == NULL) {
2980 			circled = 0;
2981 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2982 		}
2983 		/* In case the fragments were not in order we must reset */
2984 	}			/* end for (j = fragStart */
2985 	*p_tp1 = tp1;
2986 	return (wake_him);	/* Return value only used for nr-sack */
2987 }
2988 
2989 
2990 static int
2991 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2992     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2993     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2994     int num_seg, int num_nr_seg, int *rto_ok)
2995 {
2996 	struct sctp_gap_ack_block *frag, block;
2997 	struct sctp_tmit_chunk *tp1;
2998 	int i;
2999 	int num_frs = 0;
3000 	int chunk_freed;
3001 	int non_revocable;
3002 	uint16_t frag_strt, frag_end, prev_frag_end;
3003 
3004 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3005 	prev_frag_end = 0;
3006 	chunk_freed = 0;
3007 
3008 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3009 		if (i == num_seg) {
3010 			prev_frag_end = 0;
3011 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3012 		}
3013 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3014 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3015 		*offset += sizeof(block);
3016 		if (frag == NULL) {
3017 			return (chunk_freed);
3018 		}
3019 		frag_strt = ntohs(frag->start);
3020 		frag_end = ntohs(frag->end);
3021 
3022 		if (frag_strt > frag_end) {
3023 			/* This gap report is malformed, skip it. */
3024 			continue;
3025 		}
3026 		if (frag_strt <= prev_frag_end) {
3027 			/* This gap report is not in order, so restart. */
3028 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3029 		}
3030 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3031 			*biggest_tsn_acked = last_tsn + frag_end;
3032 		}
3033 		if (i < num_seg) {
3034 			non_revocable = 0;
3035 		} else {
3036 			non_revocable = 1;
3037 		}
3038 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3039 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3040 		    this_sack_lowest_newack, rto_ok)) {
3041 			chunk_freed = 1;
3042 		}
3043 		prev_frag_end = frag_end;
3044 	}
3045 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3046 		if (num_frs)
3047 			sctp_log_fr(*biggest_tsn_acked,
3048 			    *biggest_newly_acked_tsn,
3049 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3050 	}
3051 	return (chunk_freed);
3052 }
3053 
3054 static void
3055 sctp_check_for_revoked(struct sctp_tcb *stcb,
3056     struct sctp_association *asoc, uint32_t cumack,
3057     uint32_t biggest_tsn_acked)
3058 {
3059 	struct sctp_tmit_chunk *tp1;
3060 
3061 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3062 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3063 			/*
3064 			 * ok this guy is either ACK or MARKED. If it is
3065 			 * ACKED it has been previously acked but not this
3066 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3067 			 * again.
3068 			 */
3069 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3070 				break;
3071 			}
3072 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3073 				/* it has been revoked */
3074 				tp1->sent = SCTP_DATAGRAM_SENT;
3075 				tp1->rec.data.chunk_was_revoked = 1;
3076 				/*
3077 				 * We must add this stuff back in to assure
3078 				 * timers and such get started.
3079 				 */
3080 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3081 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3082 					    tp1->whoTo->flight_size,
3083 					    tp1->book_size,
3084 					    (uint32_t) (uintptr_t) tp1->whoTo,
3085 					    tp1->rec.data.TSN_seq);
3086 				}
3087 				sctp_flight_size_increase(tp1);
3088 				sctp_total_flight_increase(stcb, tp1);
3089 				/*
3090 				 * We inflate the cwnd to compensate for our
3091 				 * artificial inflation of the flight_size.
3092 				 */
3093 				tp1->whoTo->cwnd += tp1->book_size;
3094 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3095 					sctp_log_sack(asoc->last_acked_seq,
3096 					    cumack,
3097 					    tp1->rec.data.TSN_seq,
3098 					    0,
3099 					    0,
3100 					    SCTP_LOG_TSN_REVOKED);
3101 				}
3102 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3103 				/* it has been re-acked in this SACK */
3104 				tp1->sent = SCTP_DATAGRAM_ACKED;
3105 			}
3106 		}
3107 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3108 			break;
3109 	}
3110 }
3111 
3112 
3113 static void
3114 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3115     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3116 {
3117 	struct sctp_tmit_chunk *tp1;
3118 	int strike_flag = 0;
3119 	struct timeval now;
3120 	int tot_retrans = 0;
3121 	uint32_t sending_seq;
3122 	struct sctp_nets *net;
3123 	int num_dests_sacked = 0;
3124 
3125 	/*
3126 	 * select the sending_seq, this is either the next thing ready to be
3127 	 * sent but not transmitted, OR, the next seq we assign.
3128 	 */
3129 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3130 	if (tp1 == NULL) {
3131 		sending_seq = asoc->sending_seq;
3132 	} else {
3133 		sending_seq = tp1->rec.data.TSN_seq;
3134 	}
3135 
3136 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3137 	if ((asoc->sctp_cmt_on_off > 0) &&
3138 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3139 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3140 			if (net->saw_newack)
3141 				num_dests_sacked++;
3142 		}
3143 	}
3144 	if (stcb->asoc.prsctp_supported) {
3145 		(void)SCTP_GETTIME_TIMEVAL(&now);
3146 	}
3147 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3148 		strike_flag = 0;
3149 		if (tp1->no_fr_allowed) {
3150 			/* this one had a timeout or something */
3151 			continue;
3152 		}
3153 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3154 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3155 				sctp_log_fr(biggest_tsn_newly_acked,
3156 				    tp1->rec.data.TSN_seq,
3157 				    tp1->sent,
3158 				    SCTP_FR_LOG_CHECK_STRIKE);
3159 		}
3160 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3161 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3162 			/* done */
3163 			break;
3164 		}
3165 		if (stcb->asoc.prsctp_supported) {
3166 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3167 				/* Is it expired? */
3168 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3169 					/* Yes so drop it */
3170 					if (tp1->data != NULL) {
3171 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3172 						    SCTP_SO_NOT_LOCKED);
3173 					}
3174 					continue;
3175 				}
3176 			}
3177 		}
3178 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3179 			/* we are beyond the tsn in the sack  */
3180 			break;
3181 		}
3182 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3183 			/* either a RESEND, ACKED, or MARKED */
3184 			/* skip */
3185 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3186 				/* Continue strikin FWD-TSN chunks */
3187 				tp1->rec.data.fwd_tsn_cnt++;
3188 			}
3189 			continue;
3190 		}
3191 		/*
3192 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3193 		 */
3194 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3195 			/*
3196 			 * No new acks were receieved for data sent to this
3197 			 * dest. Therefore, according to the SFR algo for
3198 			 * CMT, no data sent to this dest can be marked for
3199 			 * FR using this SACK.
3200 			 */
3201 			continue;
3202 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3203 		    tp1->whoTo->this_sack_highest_newack)) {
3204 			/*
3205 			 * CMT: New acks were receieved for data sent to
3206 			 * this dest. But no new acks were seen for data
3207 			 * sent after tp1. Therefore, according to the SFR
3208 			 * algo for CMT, tp1 cannot be marked for FR using
3209 			 * this SACK. This step covers part of the DAC algo
3210 			 * and the HTNA algo as well.
3211 			 */
3212 			continue;
3213 		}
3214 		/*
3215 		 * Here we check to see if we were have already done a FR
3216 		 * and if so we see if the biggest TSN we saw in the sack is
3217 		 * smaller than the recovery point. If so we don't strike
3218 		 * the tsn... otherwise we CAN strike the TSN.
3219 		 */
3220 		/*
3221 		 * @@@ JRI: Check for CMT if (accum_moved &&
3222 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3223 		 * 0)) {
3224 		 */
3225 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3226 			/*
3227 			 * Strike the TSN if in fast-recovery and cum-ack
3228 			 * moved.
3229 			 */
3230 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3231 				sctp_log_fr(biggest_tsn_newly_acked,
3232 				    tp1->rec.data.TSN_seq,
3233 				    tp1->sent,
3234 				    SCTP_FR_LOG_STRIKE_CHUNK);
3235 			}
3236 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3237 				tp1->sent++;
3238 			}
3239 			if ((asoc->sctp_cmt_on_off > 0) &&
3240 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3241 				/*
3242 				 * CMT DAC algorithm: If SACK flag is set to
3243 				 * 0, then lowest_newack test will not pass
3244 				 * because it would have been set to the
3245 				 * cumack earlier. If not already to be
3246 				 * rtx'd, If not a mixed sack and if tp1 is
3247 				 * not between two sacked TSNs, then mark by
3248 				 * one more. NOTE that we are marking by one
3249 				 * additional time since the SACK DAC flag
3250 				 * indicates that two packets have been
3251 				 * received after this missing TSN.
3252 				 */
3253 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3254 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3255 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3256 						sctp_log_fr(16 + num_dests_sacked,
3257 						    tp1->rec.data.TSN_seq,
3258 						    tp1->sent,
3259 						    SCTP_FR_LOG_STRIKE_CHUNK);
3260 					}
3261 					tp1->sent++;
3262 				}
3263 			}
3264 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3265 		    (asoc->sctp_cmt_on_off == 0)) {
3266 			/*
3267 			 * For those that have done a FR we must take
3268 			 * special consideration if we strike. I.e the
3269 			 * biggest_newly_acked must be higher than the
3270 			 * sending_seq at the time we did the FR.
3271 			 */
3272 			if (
3273 #ifdef SCTP_FR_TO_ALTERNATE
3274 			/*
3275 			 * If FR's go to new networks, then we must only do
3276 			 * this for singly homed asoc's. However if the FR's
3277 			 * go to the same network (Armando's work) then its
3278 			 * ok to FR multiple times.
3279 			 */
3280 			    (asoc->numnets < 2)
3281 #else
3282 			    (1)
3283 #endif
3284 			    ) {
3285 
3286 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3287 				    tp1->rec.data.fast_retran_tsn)) {
3288 					/*
3289 					 * Strike the TSN, since this ack is
3290 					 * beyond where things were when we
3291 					 * did a FR.
3292 					 */
3293 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 						sctp_log_fr(biggest_tsn_newly_acked,
3295 						    tp1->rec.data.TSN_seq,
3296 						    tp1->sent,
3297 						    SCTP_FR_LOG_STRIKE_CHUNK);
3298 					}
3299 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3300 						tp1->sent++;
3301 					}
3302 					strike_flag = 1;
3303 					if ((asoc->sctp_cmt_on_off > 0) &&
3304 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3305 						/*
3306 						 * CMT DAC algorithm: If
3307 						 * SACK flag is set to 0,
3308 						 * then lowest_newack test
3309 						 * will not pass because it
3310 						 * would have been set to
3311 						 * the cumack earlier. If
3312 						 * not already to be rtx'd,
3313 						 * If not a mixed sack and
3314 						 * if tp1 is not between two
3315 						 * sacked TSNs, then mark by
3316 						 * one more. NOTE that we
3317 						 * are marking by one
3318 						 * additional time since the
3319 						 * SACK DAC flag indicates
3320 						 * that two packets have
3321 						 * been received after this
3322 						 * missing TSN.
3323 						 */
3324 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3325 						    (num_dests_sacked == 1) &&
3326 						    SCTP_TSN_GT(this_sack_lowest_newack,
3327 						    tp1->rec.data.TSN_seq)) {
3328 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3329 								sctp_log_fr(32 + num_dests_sacked,
3330 								    tp1->rec.data.TSN_seq,
3331 								    tp1->sent,
3332 								    SCTP_FR_LOG_STRIKE_CHUNK);
3333 							}
3334 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3335 								tp1->sent++;
3336 							}
3337 						}
3338 					}
3339 				}
3340 			}
3341 			/*
3342 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3343 			 * algo covers HTNA.
3344 			 */
3345 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3346 		    biggest_tsn_newly_acked)) {
3347 			/*
3348 			 * We don't strike these: This is the  HTNA
3349 			 * algorithm i.e. we don't strike If our TSN is
3350 			 * larger than the Highest TSN Newly Acked.
3351 			 */
3352 			;
3353 		} else {
3354 			/* Strike the TSN */
3355 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3356 				sctp_log_fr(biggest_tsn_newly_acked,
3357 				    tp1->rec.data.TSN_seq,
3358 				    tp1->sent,
3359 				    SCTP_FR_LOG_STRIKE_CHUNK);
3360 			}
3361 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3362 				tp1->sent++;
3363 			}
3364 			if ((asoc->sctp_cmt_on_off > 0) &&
3365 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3366 				/*
3367 				 * CMT DAC algorithm: If SACK flag is set to
3368 				 * 0, then lowest_newack test will not pass
3369 				 * because it would have been set to the
3370 				 * cumack earlier. If not already to be
3371 				 * rtx'd, If not a mixed sack and if tp1 is
3372 				 * not between two sacked TSNs, then mark by
3373 				 * one more. NOTE that we are marking by one
3374 				 * additional time since the SACK DAC flag
3375 				 * indicates that two packets have been
3376 				 * received after this missing TSN.
3377 				 */
3378 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3379 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3380 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3381 						sctp_log_fr(48 + num_dests_sacked,
3382 						    tp1->rec.data.TSN_seq,
3383 						    tp1->sent,
3384 						    SCTP_FR_LOG_STRIKE_CHUNK);
3385 					}
3386 					tp1->sent++;
3387 				}
3388 			}
3389 		}
3390 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3391 			struct sctp_nets *alt;
3392 
3393 			/* fix counts and things */
3394 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3395 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3396 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3397 				    tp1->book_size,
3398 				    (uint32_t) (uintptr_t) tp1->whoTo,
3399 				    tp1->rec.data.TSN_seq);
3400 			}
3401 			if (tp1->whoTo) {
3402 				tp1->whoTo->net_ack++;
3403 				sctp_flight_size_decrease(tp1);
3404 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3405 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3406 					    tp1);
3407 				}
3408 			}
3409 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3410 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3411 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3412 			}
3413 			/* add back to the rwnd */
3414 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3415 
3416 			/* remove from the total flight */
3417 			sctp_total_flight_decrease(stcb, tp1);
3418 
3419 			if ((stcb->asoc.prsctp_supported) &&
3420 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3421 				/*
3422 				 * Has it been retransmitted tv_sec times? -
3423 				 * we store the retran count there.
3424 				 */
3425 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3426 					/* Yes, so drop it */
3427 					if (tp1->data != NULL) {
3428 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3429 						    SCTP_SO_NOT_LOCKED);
3430 					}
3431 					/* Make sure to flag we had a FR */
3432 					tp1->whoTo->net_ack++;
3433 					continue;
3434 				}
3435 			}
3436 			/*
3437 			 * SCTP_PRINTF("OK, we are now ready to FR this
3438 			 * guy\n");
3439 			 */
3440 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3441 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3442 				    0, SCTP_FR_MARKED);
3443 			}
3444 			if (strike_flag) {
3445 				/* This is a subsequent FR */
3446 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3447 			}
3448 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3449 			if (asoc->sctp_cmt_on_off > 0) {
3450 				/*
3451 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3452 				 * If CMT is being used, then pick dest with
3453 				 * largest ssthresh for any retransmission.
3454 				 */
3455 				tp1->no_fr_allowed = 1;
3456 				alt = tp1->whoTo;
3457 				/* sa_ignore NO_NULL_CHK */
3458 				if (asoc->sctp_cmt_pf > 0) {
3459 					/*
3460 					 * JRS 5/18/07 - If CMT PF is on,
3461 					 * use the PF version of
3462 					 * find_alt_net()
3463 					 */
3464 					alt = sctp_find_alternate_net(stcb, alt, 2);
3465 				} else {
3466 					/*
3467 					 * JRS 5/18/07 - If only CMT is on,
3468 					 * use the CMT version of
3469 					 * find_alt_net()
3470 					 */
3471 					/* sa_ignore NO_NULL_CHK */
3472 					alt = sctp_find_alternate_net(stcb, alt, 1);
3473 				}
3474 				if (alt == NULL) {
3475 					alt = tp1->whoTo;
3476 				}
3477 				/*
3478 				 * CUCv2: If a different dest is picked for
3479 				 * the retransmission, then new
3480 				 * (rtx-)pseudo_cumack needs to be tracked
3481 				 * for orig dest. Let CUCv2 track new (rtx-)
3482 				 * pseudo-cumack always.
3483 				 */
3484 				if (tp1->whoTo) {
3485 					tp1->whoTo->find_pseudo_cumack = 1;
3486 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3487 				}
3488 			} else {/* CMT is OFF */
3489 
3490 #ifdef SCTP_FR_TO_ALTERNATE
3491 				/* Can we find an alternate? */
3492 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3493 #else
3494 				/*
3495 				 * default behavior is to NOT retransmit
3496 				 * FR's to an alternate. Armando Caro's
3497 				 * paper details why.
3498 				 */
3499 				alt = tp1->whoTo;
3500 #endif
3501 			}
3502 
3503 			tp1->rec.data.doing_fast_retransmit = 1;
3504 			tot_retrans++;
3505 			/* mark the sending seq for possible subsequent FR's */
3506 			/*
3507 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3508 			 * (uint32_t)tpi->rec.data.TSN_seq);
3509 			 */
3510 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3511 				/*
3512 				 * If the queue of send is empty then its
3513 				 * the next sequence number that will be
3514 				 * assigned so we subtract one from this to
3515 				 * get the one we last sent.
3516 				 */
3517 				tp1->rec.data.fast_retran_tsn = sending_seq;
3518 			} else {
3519 				/*
3520 				 * If there are chunks on the send queue
3521 				 * (unsent data that has made it from the
3522 				 * stream queues but not out the door, we
3523 				 * take the first one (which will have the
3524 				 * lowest TSN) and subtract one to get the
3525 				 * one we last sent.
3526 				 */
3527 				struct sctp_tmit_chunk *ttt;
3528 
3529 				ttt = TAILQ_FIRST(&asoc->send_queue);
3530 				tp1->rec.data.fast_retran_tsn =
3531 				    ttt->rec.data.TSN_seq;
3532 			}
3533 
3534 			if (tp1->do_rtt) {
3535 				/*
3536 				 * this guy had a RTO calculation pending on
3537 				 * it, cancel it
3538 				 */
3539 				if ((tp1->whoTo != NULL) &&
3540 				    (tp1->whoTo->rto_needed == 0)) {
3541 					tp1->whoTo->rto_needed = 1;
3542 				}
3543 				tp1->do_rtt = 0;
3544 			}
3545 			if (alt != tp1->whoTo) {
3546 				/* yes, there is an alternate. */
3547 				sctp_free_remote_addr(tp1->whoTo);
3548 				/* sa_ignore FREED_MEMORY */
3549 				tp1->whoTo = alt;
3550 				atomic_add_int(&alt->ref_count, 1);
3551 			}
3552 		}
3553 	}
3554 }
3555 
3556 struct sctp_tmit_chunk *
3557 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3558     struct sctp_association *asoc)
3559 {
3560 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3561 	struct timeval now;
3562 	int now_filled = 0;
3563 
3564 	if (asoc->prsctp_supported == 0) {
3565 		return (NULL);
3566 	}
3567 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3568 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3569 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3570 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3571 			/* no chance to advance, out of here */
3572 			break;
3573 		}
3574 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3575 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3576 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3577 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3578 				    asoc->advanced_peer_ack_point,
3579 				    tp1->rec.data.TSN_seq, 0, 0);
3580 			}
3581 		}
3582 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3583 			/*
3584 			 * We can't fwd-tsn past any that are reliable aka
3585 			 * retransmitted until the asoc fails.
3586 			 */
3587 			break;
3588 		}
3589 		if (!now_filled) {
3590 			(void)SCTP_GETTIME_TIMEVAL(&now);
3591 			now_filled = 1;
3592 		}
3593 		/*
3594 		 * now we got a chunk which is marked for another
3595 		 * retransmission to a PR-stream but has run out its chances
3596 		 * already maybe OR has been marked to skip now. Can we skip
3597 		 * it if its a resend?
3598 		 */
3599 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3600 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3601 			/*
3602 			 * Now is this one marked for resend and its time is
3603 			 * now up?
3604 			 */
3605 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3606 				/* Yes so drop it */
3607 				if (tp1->data) {
3608 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3609 					    1, SCTP_SO_NOT_LOCKED);
3610 				}
3611 			} else {
3612 				/*
3613 				 * No, we are done when hit one for resend
3614 				 * whos time as not expired.
3615 				 */
3616 				break;
3617 			}
3618 		}
3619 		/*
3620 		 * Ok now if this chunk is marked to drop it we can clean up
3621 		 * the chunk, advance our peer ack point and we can check
3622 		 * the next chunk.
3623 		 */
3624 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3625 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3626 			/* advance PeerAckPoint goes forward */
3627 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3628 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3629 				a_adv = tp1;
3630 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3631 				/* No update but we do save the chk */
3632 				a_adv = tp1;
3633 			}
3634 		} else {
3635 			/*
3636 			 * If it is still in RESEND we can advance no
3637 			 * further
3638 			 */
3639 			break;
3640 		}
3641 	}
3642 	return (a_adv);
3643 }
3644 
3645 static int
3646 sctp_fs_audit(struct sctp_association *asoc)
3647 {
3648 	struct sctp_tmit_chunk *chk;
3649 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3650 	int ret;
3651 
3652 #ifndef INVARIANTS
3653 	int entry_flight, entry_cnt;
3654 
3655 #endif
3656 
3657 	ret = 0;
3658 #ifndef INVARIANTS
3659 	entry_flight = asoc->total_flight;
3660 	entry_cnt = asoc->total_flight_count;
3661 #endif
3662 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3663 		return (0);
3664 
3665 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3666 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3667 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3668 			    chk->rec.data.TSN_seq,
3669 			    chk->send_size,
3670 			    chk->snd_count);
3671 			inflight++;
3672 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3673 			resend++;
3674 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3675 			inbetween++;
3676 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3677 			above++;
3678 		} else {
3679 			acked++;
3680 		}
3681 	}
3682 
3683 	if ((inflight > 0) || (inbetween > 0)) {
3684 #ifdef INVARIANTS
3685 		panic("Flight size-express incorrect? \n");
3686 #else
3687 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3688 		    entry_flight, entry_cnt);
3689 
3690 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3691 		    inflight, inbetween, resend, above, acked);
3692 		ret = 1;
3693 #endif
3694 	}
3695 	return (ret);
3696 }
3697 
3698 
3699 static void
3700 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3701     struct sctp_association *asoc,
3702     struct sctp_tmit_chunk *tp1)
3703 {
3704 	tp1->window_probe = 0;
3705 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3706 		/* TSN's skipped we do NOT move back. */
3707 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3708 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3709 		    tp1->book_size,
3710 		    (uint32_t) (uintptr_t) tp1->whoTo,
3711 		    tp1->rec.data.TSN_seq);
3712 		return;
3713 	}
3714 	/* First setup this by shrinking flight */
3715 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3716 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3717 		    tp1);
3718 	}
3719 	sctp_flight_size_decrease(tp1);
3720 	sctp_total_flight_decrease(stcb, tp1);
3721 	/* Now mark for resend */
3722 	tp1->sent = SCTP_DATAGRAM_RESEND;
3723 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3724 
3725 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3726 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3727 		    tp1->whoTo->flight_size,
3728 		    tp1->book_size,
3729 		    (uint32_t) (uintptr_t) tp1->whoTo,
3730 		    tp1->rec.data.TSN_seq);
3731 	}
3732 }
3733 
3734 void
3735 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3736     uint32_t rwnd, int *abort_now, int ecne_seen)
3737 {
3738 	struct sctp_nets *net;
3739 	struct sctp_association *asoc;
3740 	struct sctp_tmit_chunk *tp1, *tp2;
3741 	uint32_t old_rwnd;
3742 	int win_probe_recovery = 0;
3743 	int win_probe_recovered = 0;
3744 	int j, done_once = 0;
3745 	int rto_ok = 1;
3746 
3747 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3748 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3749 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3750 	}
3751 	SCTP_TCB_LOCK_ASSERT(stcb);
3752 #ifdef SCTP_ASOCLOG_OF_TSNS
3753 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3754 	stcb->asoc.cumack_log_at++;
3755 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3756 		stcb->asoc.cumack_log_at = 0;
3757 	}
3758 #endif
3759 	asoc = &stcb->asoc;
3760 	old_rwnd = asoc->peers_rwnd;
3761 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3762 		/* old ack */
3763 		return;
3764 	} else if (asoc->last_acked_seq == cumack) {
3765 		/* Window update sack */
3766 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3767 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3768 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3769 			/* SWS sender side engages */
3770 			asoc->peers_rwnd = 0;
3771 		}
3772 		if (asoc->peers_rwnd > old_rwnd) {
3773 			goto again;
3774 		}
3775 		return;
3776 	}
3777 	/* First setup for CC stuff */
3778 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3779 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3780 			/* Drag along the window_tsn for cwr's */
3781 			net->cwr_window_tsn = cumack;
3782 		}
3783 		net->prev_cwnd = net->cwnd;
3784 		net->net_ack = 0;
3785 		net->net_ack2 = 0;
3786 
3787 		/*
3788 		 * CMT: Reset CUC and Fast recovery algo variables before
3789 		 * SACK processing
3790 		 */
3791 		net->new_pseudo_cumack = 0;
3792 		net->will_exit_fast_recovery = 0;
3793 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3794 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3795 		}
3796 	}
3797 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3798 		uint32_t send_s;
3799 
3800 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3801 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3802 			    sctpchunk_listhead);
3803 			send_s = tp1->rec.data.TSN_seq + 1;
3804 		} else {
3805 			send_s = asoc->sending_seq;
3806 		}
3807 		if (SCTP_TSN_GE(cumack, send_s)) {
3808 			struct mbuf *op_err;
3809 			char msg[SCTP_DIAG_INFO_LEN];
3810 
3811 			*abort_now = 1;
3812 			/* XXX */
3813 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3814 			    cumack, send_s);
3815 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3816 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3817 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3818 			return;
3819 		}
3820 	}
3821 	asoc->this_sack_highest_gap = cumack;
3822 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3823 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3824 		    stcb->asoc.overall_error_count,
3825 		    0,
3826 		    SCTP_FROM_SCTP_INDATA,
3827 		    __LINE__);
3828 	}
3829 	stcb->asoc.overall_error_count = 0;
3830 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3831 		/* process the new consecutive TSN first */
3832 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3833 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3834 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3835 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3836 				}
3837 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3838 					/*
3839 					 * If it is less than ACKED, it is
3840 					 * now no-longer in flight. Higher
3841 					 * values may occur during marking
3842 					 */
3843 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3844 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3845 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3846 							    tp1->whoTo->flight_size,
3847 							    tp1->book_size,
3848 							    (uint32_t) (uintptr_t) tp1->whoTo,
3849 							    tp1->rec.data.TSN_seq);
3850 						}
3851 						sctp_flight_size_decrease(tp1);
3852 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3853 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3854 							    tp1);
3855 						}
3856 						/* sa_ignore NO_NULL_CHK */
3857 						sctp_total_flight_decrease(stcb, tp1);
3858 					}
3859 					tp1->whoTo->net_ack += tp1->send_size;
3860 					if (tp1->snd_count < 2) {
3861 						/*
3862 						 * True non-retransmited
3863 						 * chunk
3864 						 */
3865 						tp1->whoTo->net_ack2 +=
3866 						    tp1->send_size;
3867 
3868 						/* update RTO too? */
3869 						if (tp1->do_rtt) {
3870 							if (rto_ok) {
3871 								tp1->whoTo->RTO =
3872 								/*
3873 								 * sa_ignore
3874 								 * NO_NULL_CH
3875 								 * K
3876 								 */
3877 								    sctp_calculate_rto(stcb,
3878 								    asoc, tp1->whoTo,
3879 								    &tp1->sent_rcv_time,
3880 								    sctp_align_safe_nocopy,
3881 								    SCTP_RTT_FROM_DATA);
3882 								rto_ok = 0;
3883 							}
3884 							if (tp1->whoTo->rto_needed == 0) {
3885 								tp1->whoTo->rto_needed = 1;
3886 							}
3887 							tp1->do_rtt = 0;
3888 						}
3889 					}
3890 					/*
3891 					 * CMT: CUCv2 algorithm. From the
3892 					 * cumack'd TSNs, for each TSN being
3893 					 * acked for the first time, set the
3894 					 * following variables for the
3895 					 * corresp destination.
3896 					 * new_pseudo_cumack will trigger a
3897 					 * cwnd update.
3898 					 * find_(rtx_)pseudo_cumack will
3899 					 * trigger search for the next
3900 					 * expected (rtx-)pseudo-cumack.
3901 					 */
3902 					tp1->whoTo->new_pseudo_cumack = 1;
3903 					tp1->whoTo->find_pseudo_cumack = 1;
3904 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3905 
3906 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3907 						/* sa_ignore NO_NULL_CHK */
3908 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3909 					}
3910 				}
3911 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3912 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3913 				}
3914 				if (tp1->rec.data.chunk_was_revoked) {
3915 					/* deflate the cwnd */
3916 					tp1->whoTo->cwnd -= tp1->book_size;
3917 					tp1->rec.data.chunk_was_revoked = 0;
3918 				}
3919 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3920 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3921 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3922 #ifdef INVARIANTS
3923 					} else {
3924 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3925 #endif
3926 					}
3927 				}
3928 				if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3929 				    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3930 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3931 					asoc->trigger_reset = 1;
3932 				}
3933 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3934 				if (tp1->data) {
3935 					/* sa_ignore NO_NULL_CHK */
3936 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3937 					sctp_m_freem(tp1->data);
3938 					tp1->data = NULL;
3939 				}
3940 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3941 					sctp_log_sack(asoc->last_acked_seq,
3942 					    cumack,
3943 					    tp1->rec.data.TSN_seq,
3944 					    0,
3945 					    0,
3946 					    SCTP_LOG_FREE_SENT);
3947 				}
3948 				asoc->sent_queue_cnt--;
3949 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3950 			} else {
3951 				break;
3952 			}
3953 		}
3954 
3955 	}
3956 	/* sa_ignore NO_NULL_CHK */
3957 	if (stcb->sctp_socket) {
3958 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3959 		struct socket *so;
3960 
3961 #endif
3962 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3963 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3964 			/* sa_ignore NO_NULL_CHK */
3965 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3966 		}
3967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3968 		so = SCTP_INP_SO(stcb->sctp_ep);
3969 		atomic_add_int(&stcb->asoc.refcnt, 1);
3970 		SCTP_TCB_UNLOCK(stcb);
3971 		SCTP_SOCKET_LOCK(so, 1);
3972 		SCTP_TCB_LOCK(stcb);
3973 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3974 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3975 			/* assoc was freed while we were unlocked */
3976 			SCTP_SOCKET_UNLOCK(so, 1);
3977 			return;
3978 		}
3979 #endif
3980 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3981 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3982 		SCTP_SOCKET_UNLOCK(so, 1);
3983 #endif
3984 	} else {
3985 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3986 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3987 		}
3988 	}
3989 
3990 	/* JRS - Use the congestion control given in the CC module */
3991 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3992 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3993 			if (net->net_ack2 > 0) {
3994 				/*
3995 				 * Karn's rule applies to clearing error
3996 				 * count, this is optional.
3997 				 */
3998 				net->error_count = 0;
3999 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4000 					/* addr came good */
4001 					net->dest_state |= SCTP_ADDR_REACHABLE;
4002 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4003 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4004 				}
4005 				if (net == stcb->asoc.primary_destination) {
4006 					if (stcb->asoc.alternate) {
4007 						/*
4008 						 * release the alternate,
4009 						 * primary is good
4010 						 */
4011 						sctp_free_remote_addr(stcb->asoc.alternate);
4012 						stcb->asoc.alternate = NULL;
4013 					}
4014 				}
4015 				if (net->dest_state & SCTP_ADDR_PF) {
4016 					net->dest_state &= ~SCTP_ADDR_PF;
4017 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4018 					    stcb->sctp_ep, stcb, net,
4019 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4020 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4021 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4022 					/* Done with this net */
4023 					net->net_ack = 0;
4024 				}
4025 				/* restore any doubled timers */
4026 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4027 				if (net->RTO < stcb->asoc.minrto) {
4028 					net->RTO = stcb->asoc.minrto;
4029 				}
4030 				if (net->RTO > stcb->asoc.maxrto) {
4031 					net->RTO = stcb->asoc.maxrto;
4032 				}
4033 			}
4034 		}
4035 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4036 	}
4037 	asoc->last_acked_seq = cumack;
4038 
4039 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4040 		/* nothing left in-flight */
4041 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4042 			net->flight_size = 0;
4043 			net->partial_bytes_acked = 0;
4044 		}
4045 		asoc->total_flight = 0;
4046 		asoc->total_flight_count = 0;
4047 	}
4048 	/* RWND update */
4049 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4050 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4051 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4052 		/* SWS sender side engages */
4053 		asoc->peers_rwnd = 0;
4054 	}
4055 	if (asoc->peers_rwnd > old_rwnd) {
4056 		win_probe_recovery = 1;
4057 	}
4058 	/* Now assure a timer where data is queued at */
4059 again:
4060 	j = 0;
4061 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4062 		int to_ticks;
4063 
4064 		if (win_probe_recovery && (net->window_probe)) {
4065 			win_probe_recovered = 1;
4066 			/*
4067 			 * Find first chunk that was used with window probe
4068 			 * and clear the sent
4069 			 */
4070 			/* sa_ignore FREED_MEMORY */
4071 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4072 				if (tp1->window_probe) {
4073 					/* move back to data send queue */
4074 					sctp_window_probe_recovery(stcb, asoc, tp1);
4075 					break;
4076 				}
4077 			}
4078 		}
4079 		if (net->RTO == 0) {
4080 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4081 		} else {
4082 			to_ticks = MSEC_TO_TICKS(net->RTO);
4083 		}
4084 		if (net->flight_size) {
4085 			j++;
4086 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4087 			    sctp_timeout_handler, &net->rxt_timer);
4088 			if (net->window_probe) {
4089 				net->window_probe = 0;
4090 			}
4091 		} else {
4092 			if (net->window_probe) {
4093 				/*
4094 				 * In window probes we must assure a timer
4095 				 * is still running there
4096 				 */
4097 				net->window_probe = 0;
4098 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4099 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4100 					    sctp_timeout_handler, &net->rxt_timer);
4101 				}
4102 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4103 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4104 				    stcb, net,
4105 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4106 			}
4107 		}
4108 	}
4109 	if ((j == 0) &&
4110 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4111 	    (asoc->sent_queue_retran_cnt == 0) &&
4112 	    (win_probe_recovered == 0) &&
4113 	    (done_once == 0)) {
4114 		/*
4115 		 * huh, this should not happen unless all packets are
4116 		 * PR-SCTP and marked to skip of course.
4117 		 */
4118 		if (sctp_fs_audit(asoc)) {
4119 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4120 				net->flight_size = 0;
4121 			}
4122 			asoc->total_flight = 0;
4123 			asoc->total_flight_count = 0;
4124 			asoc->sent_queue_retran_cnt = 0;
4125 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4126 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4127 					sctp_flight_size_increase(tp1);
4128 					sctp_total_flight_increase(stcb, tp1);
4129 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4130 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4131 				}
4132 			}
4133 		}
4134 		done_once = 1;
4135 		goto again;
4136 	}
4137 	/**********************************/
4138 	/* Now what about shutdown issues */
4139 	/**********************************/
4140 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4141 		/* nothing left on sendqueue.. consider done */
4142 		/* clean up */
4143 		if ((asoc->stream_queue_cnt == 1) &&
4144 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4145 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4146 		    (asoc->locked_on_sending)
4147 		    ) {
4148 			struct sctp_stream_queue_pending *sp;
4149 
4150 			/*
4151 			 * I may be in a state where we got all across.. but
4152 			 * cannot write more due to a shutdown... we abort
4153 			 * since the user did not indicate EOR in this case.
4154 			 * The sp will be cleaned during free of the asoc.
4155 			 */
4156 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4157 			    sctp_streamhead);
4158 			if ((sp) && (sp->length == 0)) {
4159 				/* Let cleanup code purge it */
4160 				if (sp->msg_is_complete) {
4161 					asoc->stream_queue_cnt--;
4162 				} else {
4163 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4164 					asoc->locked_on_sending = NULL;
4165 					asoc->stream_queue_cnt--;
4166 				}
4167 			}
4168 		}
4169 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4170 		    (asoc->stream_queue_cnt == 0)) {
4171 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4172 				/* Need to abort here */
4173 				struct mbuf *op_err;
4174 
4175 		abort_out_now:
4176 				*abort_now = 1;
4177 				/* XXX */
4178 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4179 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4180 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4181 				return;
4182 			} else {
4183 				struct sctp_nets *netp;
4184 
4185 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4186 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4187 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4188 				}
4189 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4190 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4191 				sctp_stop_timers_for_shutdown(stcb);
4192 				if (asoc->alternate) {
4193 					netp = asoc->alternate;
4194 				} else {
4195 					netp = asoc->primary_destination;
4196 				}
4197 				sctp_send_shutdown(stcb, netp);
4198 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4199 				    stcb->sctp_ep, stcb, netp);
4200 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4201 				    stcb->sctp_ep, stcb, netp);
4202 			}
4203 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4204 		    (asoc->stream_queue_cnt == 0)) {
4205 			struct sctp_nets *netp;
4206 
4207 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4208 				goto abort_out_now;
4209 			}
4210 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4211 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4212 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4213 			sctp_stop_timers_for_shutdown(stcb);
4214 			if (asoc->alternate) {
4215 				netp = asoc->alternate;
4216 			} else {
4217 				netp = asoc->primary_destination;
4218 			}
4219 			sctp_send_shutdown_ack(stcb, netp);
4220 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4221 			    stcb->sctp_ep, stcb, netp);
4222 		}
4223 	}
4224 	/*********************************************/
4225 	/* Here we perform PR-SCTP procedures        */
4226 	/* (section 4.2)                             */
4227 	/*********************************************/
4228 	/* C1. update advancedPeerAckPoint */
4229 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4230 		asoc->advanced_peer_ack_point = cumack;
4231 	}
4232 	/* PR-Sctp issues need to be addressed too */
4233 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4234 		struct sctp_tmit_chunk *lchk;
4235 		uint32_t old_adv_peer_ack_point;
4236 
4237 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4238 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4239 		/* C3. See if we need to send a Fwd-TSN */
4240 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4241 			/*
4242 			 * ISSUE with ECN, see FWD-TSN processing.
4243 			 */
4244 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4245 				send_forward_tsn(stcb, asoc);
4246 			} else if (lchk) {
4247 				/* try to FR fwd-tsn's that get lost too */
4248 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4249 					send_forward_tsn(stcb, asoc);
4250 				}
4251 			}
4252 		}
4253 		if (lchk) {
4254 			/* Assure a timer is up */
4255 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4256 			    stcb->sctp_ep, stcb, lchk->whoTo);
4257 		}
4258 	}
4259 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4260 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4261 		    rwnd,
4262 		    stcb->asoc.peers_rwnd,
4263 		    stcb->asoc.total_flight,
4264 		    stcb->asoc.total_output_queue_size);
4265 	}
4266 }
4267 
4268 void
4269 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4270     struct sctp_tcb *stcb,
4271     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4272     int *abort_now, uint8_t flags,
4273     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4274 {
4275 	struct sctp_association *asoc;
4276 	struct sctp_tmit_chunk *tp1, *tp2;
4277 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4278 	uint16_t wake_him = 0;
4279 	uint32_t send_s = 0;
4280 	long j;
4281 	int accum_moved = 0;
4282 	int will_exit_fast_recovery = 0;
4283 	uint32_t a_rwnd, old_rwnd;
4284 	int win_probe_recovery = 0;
4285 	int win_probe_recovered = 0;
4286 	struct sctp_nets *net = NULL;
4287 	int done_once;
4288 	int rto_ok = 1;
4289 	uint8_t reneged_all = 0;
4290 	uint8_t cmt_dac_flag;
4291 
4292 	/*
4293 	 * we take any chance we can to service our queues since we cannot
4294 	 * get awoken when the socket is read from :<
4295 	 */
4296 	/*
4297 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4298 	 * old sack, if so discard. 2) If there is nothing left in the send
4299 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4300 	 * too, update any rwnd change and verify no timers are running.
4301 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4302 	 * moved process these first and note that it moved. 4) Process any
4303 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4304 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4305 	 * sync up flightsizes and things, stop all timers and also check
4306 	 * for shutdown_pending state. If so then go ahead and send off the
4307 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4308 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4309 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4310 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4311 	 * if in shutdown_recv state.
4312 	 */
4313 	SCTP_TCB_LOCK_ASSERT(stcb);
4314 	/* CMT DAC algo */
4315 	this_sack_lowest_newack = 0;
4316 	SCTP_STAT_INCR(sctps_slowpath_sack);
4317 	last_tsn = cum_ack;
4318 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4319 #ifdef SCTP_ASOCLOG_OF_TSNS
4320 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4321 	stcb->asoc.cumack_log_at++;
4322 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4323 		stcb->asoc.cumack_log_at = 0;
4324 	}
4325 #endif
4326 	a_rwnd = rwnd;
4327 
4328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4329 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4330 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4331 	}
4332 	old_rwnd = stcb->asoc.peers_rwnd;
4333 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4334 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4335 		    stcb->asoc.overall_error_count,
4336 		    0,
4337 		    SCTP_FROM_SCTP_INDATA,
4338 		    __LINE__);
4339 	}
4340 	stcb->asoc.overall_error_count = 0;
4341 	asoc = &stcb->asoc;
4342 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4343 		sctp_log_sack(asoc->last_acked_seq,
4344 		    cum_ack,
4345 		    0,
4346 		    num_seg,
4347 		    num_dup,
4348 		    SCTP_LOG_NEW_SACK);
4349 	}
4350 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4351 		uint16_t i;
4352 		uint32_t *dupdata, dblock;
4353 
4354 		for (i = 0; i < num_dup; i++) {
4355 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4356 			    sizeof(uint32_t), (uint8_t *) & dblock);
4357 			if (dupdata == NULL) {
4358 				break;
4359 			}
4360 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4361 		}
4362 	}
4363 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4364 		/* reality check */
4365 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4366 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4367 			    sctpchunk_listhead);
4368 			send_s = tp1->rec.data.TSN_seq + 1;
4369 		} else {
4370 			tp1 = NULL;
4371 			send_s = asoc->sending_seq;
4372 		}
4373 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4374 			struct mbuf *op_err;
4375 			char msg[SCTP_DIAG_INFO_LEN];
4376 
4377 			/*
4378 			 * no way, we have not even sent this TSN out yet.
4379 			 * Peer is hopelessly messed up with us.
4380 			 */
4381 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4382 			    cum_ack, send_s);
4383 			if (tp1) {
4384 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4385 				    tp1->rec.data.TSN_seq, (void *)tp1);
4386 			}
4387 	hopeless_peer:
4388 			*abort_now = 1;
4389 			/* XXX */
4390 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4391 			    cum_ack, send_s);
4392 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4393 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4394 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4395 			return;
4396 		}
4397 	}
4398 	/**********************/
4399 	/* 1) check the range */
4400 	/**********************/
4401 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4402 		/* acking something behind */
4403 		return;
4404 	}
4405 	/* update the Rwnd of the peer */
4406 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4407 	    TAILQ_EMPTY(&asoc->send_queue) &&
4408 	    (asoc->stream_queue_cnt == 0)) {
4409 		/* nothing left on send/sent and strmq */
4410 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4411 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4412 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4413 		}
4414 		asoc->peers_rwnd = a_rwnd;
4415 		if (asoc->sent_queue_retran_cnt) {
4416 			asoc->sent_queue_retran_cnt = 0;
4417 		}
4418 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4419 			/* SWS sender side engages */
4420 			asoc->peers_rwnd = 0;
4421 		}
4422 		/* stop any timers */
4423 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4424 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4425 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4426 			net->partial_bytes_acked = 0;
4427 			net->flight_size = 0;
4428 		}
4429 		asoc->total_flight = 0;
4430 		asoc->total_flight_count = 0;
4431 		return;
4432 	}
4433 	/*
4434 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4435 	 * things. The total byte count acked is tracked in netAckSz AND
4436 	 * netAck2 is used to track the total bytes acked that are un-
4437 	 * amibguious and were never retransmitted. We track these on a per
4438 	 * destination address basis.
4439 	 */
4440 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4441 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4442 			/* Drag along the window_tsn for cwr's */
4443 			net->cwr_window_tsn = cum_ack;
4444 		}
4445 		net->prev_cwnd = net->cwnd;
4446 		net->net_ack = 0;
4447 		net->net_ack2 = 0;
4448 
4449 		/*
4450 		 * CMT: Reset CUC and Fast recovery algo variables before
4451 		 * SACK processing
4452 		 */
4453 		net->new_pseudo_cumack = 0;
4454 		net->will_exit_fast_recovery = 0;
4455 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4456 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4457 		}
4458 	}
4459 	/* process the new consecutive TSN first */
4460 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4461 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4462 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4463 				accum_moved = 1;
4464 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4465 					/*
4466 					 * If it is less than ACKED, it is
4467 					 * now no-longer in flight. Higher
4468 					 * values may occur during marking
4469 					 */
4470 					if ((tp1->whoTo->dest_state &
4471 					    SCTP_ADDR_UNCONFIRMED) &&
4472 					    (tp1->snd_count < 2)) {
4473 						/*
4474 						 * If there was no retran
4475 						 * and the address is
4476 						 * un-confirmed and we sent
4477 						 * there and are now
4478 						 * sacked.. its confirmed,
4479 						 * mark it so.
4480 						 */
4481 						tp1->whoTo->dest_state &=
4482 						    ~SCTP_ADDR_UNCONFIRMED;
4483 					}
4484 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4485 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4486 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4487 							    tp1->whoTo->flight_size,
4488 							    tp1->book_size,
4489 							    (uint32_t) (uintptr_t) tp1->whoTo,
4490 							    tp1->rec.data.TSN_seq);
4491 						}
4492 						sctp_flight_size_decrease(tp1);
4493 						sctp_total_flight_decrease(stcb, tp1);
4494 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4495 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4496 							    tp1);
4497 						}
4498 					}
4499 					tp1->whoTo->net_ack += tp1->send_size;
4500 
4501 					/* CMT SFR and DAC algos */
4502 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4503 					tp1->whoTo->saw_newack = 1;
4504 
4505 					if (tp1->snd_count < 2) {
4506 						/*
4507 						 * True non-retransmited
4508 						 * chunk
4509 						 */
4510 						tp1->whoTo->net_ack2 +=
4511 						    tp1->send_size;
4512 
4513 						/* update RTO too? */
4514 						if (tp1->do_rtt) {
4515 							if (rto_ok) {
4516 								tp1->whoTo->RTO =
4517 								    sctp_calculate_rto(stcb,
4518 								    asoc, tp1->whoTo,
4519 								    &tp1->sent_rcv_time,
4520 								    sctp_align_safe_nocopy,
4521 								    SCTP_RTT_FROM_DATA);
4522 								rto_ok = 0;
4523 							}
4524 							if (tp1->whoTo->rto_needed == 0) {
4525 								tp1->whoTo->rto_needed = 1;
4526 							}
4527 							tp1->do_rtt = 0;
4528 						}
4529 					}
4530 					/*
4531 					 * CMT: CUCv2 algorithm. From the
4532 					 * cumack'd TSNs, for each TSN being
4533 					 * acked for the first time, set the
4534 					 * following variables for the
4535 					 * corresp destination.
4536 					 * new_pseudo_cumack will trigger a
4537 					 * cwnd update.
4538 					 * find_(rtx_)pseudo_cumack will
4539 					 * trigger search for the next
4540 					 * expected (rtx-)pseudo-cumack.
4541 					 */
4542 					tp1->whoTo->new_pseudo_cumack = 1;
4543 					tp1->whoTo->find_pseudo_cumack = 1;
4544 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4545 
4546 
4547 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4548 						sctp_log_sack(asoc->last_acked_seq,
4549 						    cum_ack,
4550 						    tp1->rec.data.TSN_seq,
4551 						    0,
4552 						    0,
4553 						    SCTP_LOG_TSN_ACKED);
4554 					}
4555 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4556 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4557 					}
4558 				}
4559 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4560 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4561 #ifdef SCTP_AUDITING_ENABLED
4562 					sctp_audit_log(0xB3,
4563 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4564 #endif
4565 				}
4566 				if (tp1->rec.data.chunk_was_revoked) {
4567 					/* deflate the cwnd */
4568 					tp1->whoTo->cwnd -= tp1->book_size;
4569 					tp1->rec.data.chunk_was_revoked = 0;
4570 				}
4571 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4572 					tp1->sent = SCTP_DATAGRAM_ACKED;
4573 				}
4574 			}
4575 		} else {
4576 			break;
4577 		}
4578 	}
4579 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4580 	/* always set this up to cum-ack */
4581 	asoc->this_sack_highest_gap = last_tsn;
4582 
4583 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4584 
4585 		/*
4586 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4587 		 * to be greater than the cumack. Also reset saw_newack to 0
4588 		 * for all dests.
4589 		 */
4590 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4591 			net->saw_newack = 0;
4592 			net->this_sack_highest_newack = last_tsn;
4593 		}
4594 
4595 		/*
4596 		 * thisSackHighestGap will increase while handling NEW
4597 		 * segments this_sack_highest_newack will increase while
4598 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4599 		 * used for CMT DAC algo. saw_newack will also change.
4600 		 */
4601 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4602 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4603 		    num_seg, num_nr_seg, &rto_ok)) {
4604 			wake_him++;
4605 		}
4606 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4607 			/*
4608 			 * validate the biggest_tsn_acked in the gap acks if
4609 			 * strict adherence is wanted.
4610 			 */
4611 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4612 				/*
4613 				 * peer is either confused or we are under
4614 				 * attack. We must abort.
4615 				 */
4616 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4617 				    biggest_tsn_acked, send_s);
4618 				goto hopeless_peer;
4619 			}
4620 		}
4621 	}
4622 	/*******************************************/
4623 	/* cancel ALL T3-send timer if accum moved */
4624 	/*******************************************/
4625 	if (asoc->sctp_cmt_on_off > 0) {
4626 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4627 			if (net->new_pseudo_cumack)
4628 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4629 				    stcb, net,
4630 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4631 
4632 		}
4633 	} else {
4634 		if (accum_moved) {
4635 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4636 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4637 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4638 			}
4639 		}
4640 	}
4641 	/********************************************/
4642 	/* drop the acked chunks from the sentqueue */
4643 	/********************************************/
4644 	asoc->last_acked_seq = cum_ack;
4645 
4646 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4647 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4648 			break;
4649 		}
4650 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4651 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4652 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4653 #ifdef INVARIANTS
4654 			} else {
4655 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4656 #endif
4657 			}
4658 		}
4659 		if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4660 		    (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4661 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4662 			asoc->trigger_reset = 1;
4663 		}
4664 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4665 		if (PR_SCTP_ENABLED(tp1->flags)) {
4666 			if (asoc->pr_sctp_cnt != 0)
4667 				asoc->pr_sctp_cnt--;
4668 		}
4669 		asoc->sent_queue_cnt--;
4670 		if (tp1->data) {
4671 			/* sa_ignore NO_NULL_CHK */
4672 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4673 			sctp_m_freem(tp1->data);
4674 			tp1->data = NULL;
4675 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4676 				asoc->sent_queue_cnt_removeable--;
4677 			}
4678 		}
4679 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4680 			sctp_log_sack(asoc->last_acked_seq,
4681 			    cum_ack,
4682 			    tp1->rec.data.TSN_seq,
4683 			    0,
4684 			    0,
4685 			    SCTP_LOG_FREE_SENT);
4686 		}
4687 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4688 		wake_him++;
4689 	}
4690 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4691 #ifdef INVARIANTS
4692 		panic("Warning flight size is postive and should be 0");
4693 #else
4694 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4695 		    asoc->total_flight);
4696 #endif
4697 		asoc->total_flight = 0;
4698 	}
4699 	/* sa_ignore NO_NULL_CHK */
4700 	if ((wake_him) && (stcb->sctp_socket)) {
4701 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4702 		struct socket *so;
4703 
4704 #endif
4705 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4706 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4707 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4708 		}
4709 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4710 		so = SCTP_INP_SO(stcb->sctp_ep);
4711 		atomic_add_int(&stcb->asoc.refcnt, 1);
4712 		SCTP_TCB_UNLOCK(stcb);
4713 		SCTP_SOCKET_LOCK(so, 1);
4714 		SCTP_TCB_LOCK(stcb);
4715 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4716 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4717 			/* assoc was freed while we were unlocked */
4718 			SCTP_SOCKET_UNLOCK(so, 1);
4719 			return;
4720 		}
4721 #endif
4722 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4723 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4724 		SCTP_SOCKET_UNLOCK(so, 1);
4725 #endif
4726 	} else {
4727 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4728 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4729 		}
4730 	}
4731 
4732 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4733 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4734 			/* Setup so we will exit RFC2582 fast recovery */
4735 			will_exit_fast_recovery = 1;
4736 		}
4737 	}
4738 	/*
4739 	 * Check for revoked fragments:
4740 	 *
4741 	 * if Previous sack - Had no frags then we can't have any revoked if
4742 	 * Previous sack - Had frag's then - If we now have frags aka
4743 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4744 	 * some of them. else - The peer revoked all ACKED fragments, since
4745 	 * we had some before and now we have NONE.
4746 	 */
4747 
4748 	if (num_seg) {
4749 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4750 		asoc->saw_sack_with_frags = 1;
4751 	} else if (asoc->saw_sack_with_frags) {
4752 		int cnt_revoked = 0;
4753 
4754 		/* Peer revoked all dg's marked or acked */
4755 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4756 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4757 				tp1->sent = SCTP_DATAGRAM_SENT;
4758 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4759 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4760 					    tp1->whoTo->flight_size,
4761 					    tp1->book_size,
4762 					    (uint32_t) (uintptr_t) tp1->whoTo,
4763 					    tp1->rec.data.TSN_seq);
4764 				}
4765 				sctp_flight_size_increase(tp1);
4766 				sctp_total_flight_increase(stcb, tp1);
4767 				tp1->rec.data.chunk_was_revoked = 1;
4768 				/*
4769 				 * To ensure that this increase in
4770 				 * flightsize, which is artificial, does not
4771 				 * throttle the sender, we also increase the
4772 				 * cwnd artificially.
4773 				 */
4774 				tp1->whoTo->cwnd += tp1->book_size;
4775 				cnt_revoked++;
4776 			}
4777 		}
4778 		if (cnt_revoked) {
4779 			reneged_all = 1;
4780 		}
4781 		asoc->saw_sack_with_frags = 0;
4782 	}
4783 	if (num_nr_seg > 0)
4784 		asoc->saw_sack_with_nr_frags = 1;
4785 	else
4786 		asoc->saw_sack_with_nr_frags = 0;
4787 
4788 	/* JRS - Use the congestion control given in the CC module */
4789 	if (ecne_seen == 0) {
4790 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4791 			if (net->net_ack2 > 0) {
4792 				/*
4793 				 * Karn's rule applies to clearing error
4794 				 * count, this is optional.
4795 				 */
4796 				net->error_count = 0;
4797 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4798 					/* addr came good */
4799 					net->dest_state |= SCTP_ADDR_REACHABLE;
4800 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4801 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4802 				}
4803 				if (net == stcb->asoc.primary_destination) {
4804 					if (stcb->asoc.alternate) {
4805 						/*
4806 						 * release the alternate,
4807 						 * primary is good
4808 						 */
4809 						sctp_free_remote_addr(stcb->asoc.alternate);
4810 						stcb->asoc.alternate = NULL;
4811 					}
4812 				}
4813 				if (net->dest_state & SCTP_ADDR_PF) {
4814 					net->dest_state &= ~SCTP_ADDR_PF;
4815 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4816 					    stcb->sctp_ep, stcb, net,
4817 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4818 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4819 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4820 					/* Done with this net */
4821 					net->net_ack = 0;
4822 				}
4823 				/* restore any doubled timers */
4824 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4825 				if (net->RTO < stcb->asoc.minrto) {
4826 					net->RTO = stcb->asoc.minrto;
4827 				}
4828 				if (net->RTO > stcb->asoc.maxrto) {
4829 					net->RTO = stcb->asoc.maxrto;
4830 				}
4831 			}
4832 		}
4833 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4834 	}
4835 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4836 		/* nothing left in-flight */
4837 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4838 			/* stop all timers */
4839 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4840 			    stcb, net,
4841 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4842 			net->flight_size = 0;
4843 			net->partial_bytes_acked = 0;
4844 		}
4845 		asoc->total_flight = 0;
4846 		asoc->total_flight_count = 0;
4847 	}
4848 	/**********************************/
4849 	/* Now what about shutdown issues */
4850 	/**********************************/
4851 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4852 		/* nothing left on sendqueue.. consider done */
4853 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4854 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4855 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4856 		}
4857 		asoc->peers_rwnd = a_rwnd;
4858 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4859 			/* SWS sender side engages */
4860 			asoc->peers_rwnd = 0;
4861 		}
4862 		/* clean up */
4863 		if ((asoc->stream_queue_cnt == 1) &&
4864 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4865 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4866 		    (asoc->locked_on_sending)
4867 		    ) {
4868 			struct sctp_stream_queue_pending *sp;
4869 
4870 			/*
4871 			 * I may be in a state where we got all across.. but
4872 			 * cannot write more due to a shutdown... we abort
4873 			 * since the user did not indicate EOR in this case.
4874 			 */
4875 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4876 			    sctp_streamhead);
4877 			if ((sp) && (sp->length == 0)) {
4878 				asoc->locked_on_sending = NULL;
4879 				if (sp->msg_is_complete) {
4880 					asoc->stream_queue_cnt--;
4881 				} else {
4882 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4883 					asoc->stream_queue_cnt--;
4884 				}
4885 			}
4886 		}
4887 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4888 		    (asoc->stream_queue_cnt == 0)) {
4889 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4890 				/* Need to abort here */
4891 				struct mbuf *op_err;
4892 
4893 		abort_out_now:
4894 				*abort_now = 1;
4895 				/* XXX */
4896 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4897 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4898 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4899 				return;
4900 			} else {
4901 				struct sctp_nets *netp;
4902 
4903 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4904 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4905 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4906 				}
4907 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4908 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4909 				sctp_stop_timers_for_shutdown(stcb);
4910 				if (asoc->alternate) {
4911 					netp = asoc->alternate;
4912 				} else {
4913 					netp = asoc->primary_destination;
4914 				}
4915 				sctp_send_shutdown(stcb, netp);
4916 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4917 				    stcb->sctp_ep, stcb, netp);
4918 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4919 				    stcb->sctp_ep, stcb, netp);
4920 			}
4921 			return;
4922 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4923 		    (asoc->stream_queue_cnt == 0)) {
4924 			struct sctp_nets *netp;
4925 
4926 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4927 				goto abort_out_now;
4928 			}
4929 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4930 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4931 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4932 			sctp_stop_timers_for_shutdown(stcb);
4933 			if (asoc->alternate) {
4934 				netp = asoc->alternate;
4935 			} else {
4936 				netp = asoc->primary_destination;
4937 			}
4938 			sctp_send_shutdown_ack(stcb, netp);
4939 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4940 			    stcb->sctp_ep, stcb, netp);
4941 			return;
4942 		}
4943 	}
4944 	/*
4945 	 * Now here we are going to recycle net_ack for a different use...
4946 	 * HEADS UP.
4947 	 */
4948 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4949 		net->net_ack = 0;
4950 	}
4951 
4952 	/*
4953 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4954 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4955 	 * automatically ensure that.
4956 	 */
4957 	if ((asoc->sctp_cmt_on_off > 0) &&
4958 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4959 	    (cmt_dac_flag == 0)) {
4960 		this_sack_lowest_newack = cum_ack;
4961 	}
4962 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4963 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4964 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4965 	}
4966 	/* JRS - Use the congestion control given in the CC module */
4967 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4968 
4969 	/* Now are we exiting loss recovery ? */
4970 	if (will_exit_fast_recovery) {
4971 		/* Ok, we must exit fast recovery */
4972 		asoc->fast_retran_loss_recovery = 0;
4973 	}
4974 	if ((asoc->sat_t3_loss_recovery) &&
4975 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4976 		/* end satellite t3 loss recovery */
4977 		asoc->sat_t3_loss_recovery = 0;
4978 	}
4979 	/*
4980 	 * CMT Fast recovery
4981 	 */
4982 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4983 		if (net->will_exit_fast_recovery) {
4984 			/* Ok, we must exit fast recovery */
4985 			net->fast_retran_loss_recovery = 0;
4986 		}
4987 	}
4988 
4989 	/* Adjust and set the new rwnd value */
4990 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4991 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4992 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4993 	}
4994 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4995 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4996 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4997 		/* SWS sender side engages */
4998 		asoc->peers_rwnd = 0;
4999 	}
5000 	if (asoc->peers_rwnd > old_rwnd) {
5001 		win_probe_recovery = 1;
5002 	}
5003 	/*
5004 	 * Now we must setup so we have a timer up for anyone with
5005 	 * outstanding data.
5006 	 */
5007 	done_once = 0;
5008 again:
5009 	j = 0;
5010 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5011 		if (win_probe_recovery && (net->window_probe)) {
5012 			win_probe_recovered = 1;
5013 			/*-
5014 			 * Find first chunk that was used with
5015 			 * window probe and clear the event. Put
5016 			 * it back into the send queue as if has
5017 			 * not been sent.
5018 			 */
5019 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5020 				if (tp1->window_probe) {
5021 					sctp_window_probe_recovery(stcb, asoc, tp1);
5022 					break;
5023 				}
5024 			}
5025 		}
5026 		if (net->flight_size) {
5027 			j++;
5028 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5029 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5030 				    stcb->sctp_ep, stcb, net);
5031 			}
5032 			if (net->window_probe) {
5033 				net->window_probe = 0;
5034 			}
5035 		} else {
5036 			if (net->window_probe) {
5037 				/*
5038 				 * In window probes we must assure a timer
5039 				 * is still running there
5040 				 */
5041 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5042 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5043 					    stcb->sctp_ep, stcb, net);
5044 
5045 				}
5046 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5047 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5048 				    stcb, net,
5049 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5050 			}
5051 		}
5052 	}
5053 	if ((j == 0) &&
5054 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5055 	    (asoc->sent_queue_retran_cnt == 0) &&
5056 	    (win_probe_recovered == 0) &&
5057 	    (done_once == 0)) {
5058 		/*
5059 		 * huh, this should not happen unless all packets are
5060 		 * PR-SCTP and marked to skip of course.
5061 		 */
5062 		if (sctp_fs_audit(asoc)) {
5063 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5064 				net->flight_size = 0;
5065 			}
5066 			asoc->total_flight = 0;
5067 			asoc->total_flight_count = 0;
5068 			asoc->sent_queue_retran_cnt = 0;
5069 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5070 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5071 					sctp_flight_size_increase(tp1);
5072 					sctp_total_flight_increase(stcb, tp1);
5073 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5074 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5075 				}
5076 			}
5077 		}
5078 		done_once = 1;
5079 		goto again;
5080 	}
5081 	/*********************************************/
5082 	/* Here we perform PR-SCTP procedures        */
5083 	/* (section 4.2)                             */
5084 	/*********************************************/
5085 	/* C1. update advancedPeerAckPoint */
5086 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5087 		asoc->advanced_peer_ack_point = cum_ack;
5088 	}
5089 	/* C2. try to further move advancedPeerAckPoint ahead */
5090 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5091 		struct sctp_tmit_chunk *lchk;
5092 		uint32_t old_adv_peer_ack_point;
5093 
5094 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5095 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5096 		/* C3. See if we need to send a Fwd-TSN */
5097 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5098 			/*
5099 			 * ISSUE with ECN, see FWD-TSN processing.
5100 			 */
5101 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5102 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5103 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5104 				    old_adv_peer_ack_point);
5105 			}
5106 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5107 				send_forward_tsn(stcb, asoc);
5108 			} else if (lchk) {
5109 				/* try to FR fwd-tsn's that get lost too */
5110 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5111 					send_forward_tsn(stcb, asoc);
5112 				}
5113 			}
5114 		}
5115 		if (lchk) {
5116 			/* Assure a timer is up */
5117 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5118 			    stcb->sctp_ep, stcb, lchk->whoTo);
5119 		}
5120 	}
5121 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5122 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5123 		    a_rwnd,
5124 		    stcb->asoc.peers_rwnd,
5125 		    stcb->asoc.total_flight,
5126 		    stcb->asoc.total_output_queue_size);
5127 	}
5128 }
5129 
5130 void
5131 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5132 {
5133 	/* Copy cum-ack */
5134 	uint32_t cum_ack, a_rwnd;
5135 
5136 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5137 	/* Arrange so a_rwnd does NOT change */
5138 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5139 
5140 	/* Now call the express sack handling */
5141 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5142 }
5143 
5144 static void
5145 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5146     struct sctp_stream_in *strmin)
5147 {
5148 	struct sctp_queued_to_read *ctl, *nctl;
5149 	struct sctp_association *asoc;
5150 	uint32_t tt;
5151 	int need_reasm_check = 0, old;
5152 
5153 	asoc = &stcb->asoc;
5154 	tt = strmin->last_sequence_delivered;
5155 	if (asoc->idata_supported) {
5156 		old = 0;
5157 	} else {
5158 		old = 1;
5159 	}
5160 	/*
5161 	 * First deliver anything prior to and including the stream no that
5162 	 * came in.
5163 	 */
5164 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5165 		if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) {
5166 			/* this is deliverable now */
5167 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5168 				if (ctl->on_strm_q) {
5169 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5170 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5171 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5172 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5173 #ifdef INVARIANTS
5174 					} else {
5175 						panic("strmin: %p ctl: %p unknown %d",
5176 						    strmin, ctl, ctl->on_strm_q);
5177 #endif
5178 					}
5179 					ctl->on_strm_q = 0;
5180 				}
5181 				/* subtract pending on streams */
5182 				asoc->size_on_all_streams -= ctl->length;
5183 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5184 				/* deliver it to at least the delivery-q */
5185 				if (stcb->sctp_socket) {
5186 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5187 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5188 					    ctl,
5189 					    &stcb->sctp_socket->so_rcv,
5190 					    1, SCTP_READ_LOCK_HELD,
5191 					    SCTP_SO_NOT_LOCKED);
5192 				}
5193 			} else {
5194 				/* Its a fragmented message */
5195 				if (ctl->first_frag_seen) {
5196 					/*
5197 					 * Make it so this is next to
5198 					 * deliver, we restore later
5199 					 */
5200 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5201 					need_reasm_check = 1;
5202 					break;
5203 				}
5204 			}
5205 		} else {
5206 			/* no more delivery now. */
5207 			break;
5208 		}
5209 	}
5210 	if (need_reasm_check) {
5211 		int ret;
5212 
5213 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5214 		if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) {
5215 			/* Restore the next to deliver unless we are ahead */
5216 			strmin->last_sequence_delivered = tt;
5217 		}
5218 		if (ret == 0) {
5219 			/* Left the front Partial one on */
5220 			return;
5221 		}
5222 		need_reasm_check = 0;
5223 	}
5224 	/*
5225 	 * now we must deliver things in queue the normal way  if any are
5226 	 * now ready.
5227 	 */
5228 	tt = strmin->last_sequence_delivered + 1;
5229 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5230 		if (tt == ctl->sinfo_ssn) {
5231 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5232 				/* this is deliverable now */
5233 				if (ctl->on_strm_q) {
5234 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5235 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5236 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5237 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5238 #ifdef INVARIANTS
5239 					} else {
5240 						panic("strmin: %p ctl: %p unknown %d",
5241 						    strmin, ctl, ctl->on_strm_q);
5242 #endif
5243 					}
5244 					ctl->on_strm_q = 0;
5245 				}
5246 				/* subtract pending on streams */
5247 				asoc->size_on_all_streams -= ctl->length;
5248 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5249 				/* deliver it to at least the delivery-q */
5250 				strmin->last_sequence_delivered = ctl->sinfo_ssn;
5251 				if (stcb->sctp_socket) {
5252 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5253 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5254 					    ctl,
5255 					    &stcb->sctp_socket->so_rcv, 1,
5256 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5257 
5258 				}
5259 				tt = strmin->last_sequence_delivered + 1;
5260 			} else {
5261 				/* Its a fragmented message */
5262 				if (ctl->first_frag_seen) {
5263 					/*
5264 					 * Make it so this is next to
5265 					 * deliver
5266 					 */
5267 					strmin->last_sequence_delivered = ctl->sinfo_ssn - 1;
5268 					need_reasm_check = 1;
5269 					break;
5270 				}
5271 			}
5272 		} else {
5273 			break;
5274 		}
5275 	}
5276 	if (need_reasm_check) {
5277 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin);
5278 	}
5279 }
5280 
5281 static void
5282 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5283     struct sctp_association *asoc,
5284     uint16_t stream, uint32_t seq)
5285 {
5286 	struct sctp_queued_to_read *control;
5287 	struct sctp_stream_in *strm;
5288 	struct sctp_tmit_chunk *chk, *nchk;
5289 
5290 	/*
5291 	 * For now large messages held on the stream reasm that are complete
5292 	 * will be tossed too. We could in theory do more work to spin
5293 	 * through and stop after dumping one msg aka seeing the start of a
5294 	 * new msg at the head, and call the delivery function... to see if
5295 	 * it can be delivered... But for now we just dump everything on the
5296 	 * queue.
5297 	 */
5298 	strm = &asoc->strmin[stream];
5299 	control = find_reasm_entry(strm, (uint32_t) seq, 0, 0);
5300 	if (control == NULL) {
5301 		/* Not found */
5302 		return;
5303 	}
5304 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5305 		/* Purge hanging chunks */
5306 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5307 		asoc->size_on_reasm_queue -= chk->send_size;
5308 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5309 		if (chk->data) {
5310 			sctp_m_freem(chk->data);
5311 			chk->data = NULL;
5312 		}
5313 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5314 	}
5315 	TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5316 	if (control->on_read_q == 0) {
5317 		sctp_free_remote_addr(control->whoFrom);
5318 		if (control->data) {
5319 			sctp_m_freem(control->data);
5320 			control->data = NULL;
5321 		}
5322 		sctp_free_a_readq(stcb, control);
5323 	}
5324 }
5325 
5326 
5327 void
5328 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5329     struct sctp_forward_tsn_chunk *fwd,
5330     int *abort_flag, struct mbuf *m, int offset)
5331 {
5332 	/* The pr-sctp fwd tsn */
5333 	/*
5334 	 * here we will perform all the data receiver side steps for
5335 	 * processing FwdTSN, as required in by pr-sctp draft:
5336 	 *
5337 	 * Assume we get FwdTSN(x):
5338 	 *
5339 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5340 	 * others we have 3) examine and update re-ordering queue on
5341 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5342 	 * report where we are.
5343 	 */
5344 	struct sctp_association *asoc;
5345 	uint32_t new_cum_tsn, gap;
5346 	unsigned int i, fwd_sz, m_size;
5347 	uint32_t str_seq;
5348 	struct sctp_stream_in *strm;
5349 	struct sctp_queued_to_read *ctl, *sv;
5350 
5351 	asoc = &stcb->asoc;
5352 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5353 		SCTPDBG(SCTP_DEBUG_INDATA1,
5354 		    "Bad size too small/big fwd-tsn\n");
5355 		return;
5356 	}
5357 	m_size = (stcb->asoc.mapping_array_size << 3);
5358 	/*************************************************************/
5359 	/* 1. Here we update local cumTSN and shift the bitmap array */
5360 	/*************************************************************/
5361 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5362 
5363 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5364 		/* Already got there ... */
5365 		return;
5366 	}
5367 	/*
5368 	 * now we know the new TSN is more advanced, let's find the actual
5369 	 * gap
5370 	 */
5371 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5372 	asoc->cumulative_tsn = new_cum_tsn;
5373 	if (gap >= m_size) {
5374 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5375 			struct mbuf *op_err;
5376 			char msg[SCTP_DIAG_INFO_LEN];
5377 
5378 			/*
5379 			 * out of range (of single byte chunks in the rwnd I
5380 			 * give out). This must be an attacker.
5381 			 */
5382 			*abort_flag = 1;
5383 			snprintf(msg, sizeof(msg),
5384 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5385 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5386 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5387 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5388 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5389 			return;
5390 		}
5391 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5392 
5393 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5394 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5395 		asoc->highest_tsn_inside_map = new_cum_tsn;
5396 
5397 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5398 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5399 
5400 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5401 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5402 		}
5403 	} else {
5404 		SCTP_TCB_LOCK_ASSERT(stcb);
5405 		for (i = 0; i <= gap; i++) {
5406 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5407 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5408 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5409 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5410 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5411 				}
5412 			}
5413 		}
5414 	}
5415 	/*************************************************************/
5416 	/* 2. Clear up re-assembly queue                             */
5417 	/*************************************************************/
5418 
5419 	/* This is now done as part of clearing up the stream/seq */
5420 
5421 	/*******************************************************/
5422 	/* 3. Update the PR-stream re-ordering queues and fix  */
5423 	/* delivery issues as needed.                       */
5424 	/*******************************************************/
5425 	fwd_sz -= sizeof(*fwd);
5426 	if (m && fwd_sz) {
5427 		/* New method. */
5428 		unsigned int num_str;
5429 		uint32_t sequence;
5430 		uint16_t stream;
5431 		int old;
5432 		struct sctp_strseq *stseq, strseqbuf;
5433 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5434 
5435 		offset += sizeof(*fwd);
5436 
5437 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5438 		if (asoc->idata_supported) {
5439 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5440 			old = 0;
5441 		} else {
5442 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5443 			old = 1;
5444 		}
5445 		for (i = 0; i < num_str; i++) {
5446 			if (asoc->idata_supported) {
5447 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5448 				    sizeof(struct sctp_strseq_mid),
5449 				    (uint8_t *) & strseqbuf_m);
5450 				offset += sizeof(struct sctp_strseq_mid);
5451 				if (stseq_m == NULL) {
5452 					break;
5453 				}
5454 				stream = ntohs(stseq_m->stream);
5455 				sequence = ntohl(stseq_m->msg_id);
5456 			} else {
5457 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5458 				    sizeof(struct sctp_strseq),
5459 				    (uint8_t *) & strseqbuf);
5460 				offset += sizeof(struct sctp_strseq);
5461 				if (stseq == NULL) {
5462 					break;
5463 				}
5464 				stream = ntohs(stseq->stream);
5465 				sequence = (uint32_t) ntohs(stseq->sequence);
5466 			}
5467 			/* Convert */
5468 
5469 			/* now process */
5470 
5471 			/*
5472 			 * Ok we now look for the stream/seq on the read
5473 			 * queue where its not all delivered. If we find it
5474 			 * we transmute the read entry into a PDI_ABORTED.
5475 			 */
5476 			if (stream >= asoc->streamincnt) {
5477 				/* screwed up streams, stop!  */
5478 				break;
5479 			}
5480 			if ((asoc->str_of_pdapi == stream) &&
5481 			    (asoc->ssn_of_pdapi == sequence)) {
5482 				/*
5483 				 * If this is the one we were partially
5484 				 * delivering now then we no longer are.
5485 				 * Note this will change with the reassembly
5486 				 * re-write.
5487 				 */
5488 				asoc->fragmented_delivery_inprogress = 0;
5489 			}
5490 			strm = &asoc->strmin[stream];
5491 			sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence);
5492 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5493 				if ((ctl->sinfo_stream == stream) &&
5494 				    (ctl->sinfo_ssn == sequence)) {
5495 					str_seq = (stream << 16) | (0x0000ffff & sequence);
5496 					ctl->pdapi_aborted = 1;
5497 					sv = stcb->asoc.control_pdapi;
5498 					ctl->end_added = 1;
5499 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5500 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5501 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5502 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5503 #ifdef INVARIANTS
5504 					} else if (ctl->on_strm_q) {
5505 						panic("strm: %p ctl: %p unknown %d",
5506 						    strm, ctl, ctl->on_strm_q);
5507 #endif
5508 					}
5509 					ctl->on_strm_q = 0;
5510 					stcb->asoc.control_pdapi = ctl;
5511 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5512 					    stcb,
5513 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5514 					    (void *)&str_seq,
5515 					    SCTP_SO_NOT_LOCKED);
5516 					stcb->asoc.control_pdapi = sv;
5517 					break;
5518 				} else if ((ctl->sinfo_stream == stream) &&
5519 				    SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) {
5520 					/* We are past our victim SSN */
5521 					break;
5522 				}
5523 			}
5524 			if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) {
5525 				/* Update the sequence number */
5526 				strm->last_sequence_delivered = sequence;
5527 			}
5528 			/* now kick the stream the new way */
5529 			/* sa_ignore NO_NULL_CHK */
5530 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5531 		}
5532 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5533 	}
5534 	/*
5535 	 * Now slide thing forward.
5536 	 */
5537 	sctp_slide_mapping_arrays(stcb);
5538 }
5539