xref: /freebsd/sys/netinet/sctp_indata.c (revision 6c925b9c81036a86db387f75a32b423420eadf6c)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <sys/proc.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctp_auth.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_asconf.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_bsd_addr.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp_crc32.h>
52 #include <netinet/sctp_lock_bsd.h>
53 /*
54  * NOTES: On the outbound side of things I need to check the sack timer to
55  * see if I should generate a sack into the chunk queue (if I have data to
56  * send that is and will be sending it .. for bundling.
57  *
58  * The callback in sctp_usrreq.c will get called when the socket is read from.
59  * This will cause sctp_service_queues() to get called on the top entry in
60  * the list.
61  */
62 static void
63 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
64     struct sctp_stream_in *strm,
65     struct sctp_tcb *stcb,
66     struct sctp_association *asoc,
67     struct sctp_tmit_chunk *chk, int lock_held);
68 
69 
70 void
71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 {
73 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
74 }
75 
76 /* Calculate what the rwnd would be */
77 uint32_t
78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	uint32_t calc = 0;
81 
82 	/*
83 	 * This is really set wrong with respect to a 1-2-m socket. Since
84 	 * the sb_cc is the count that everyone as put up. When we re-write
85 	 * sctp_soreceive then we will fix this so that ONLY this
86 	 * associations data is taken into account.
87 	 */
88 	if (stcb->sctp_socket == NULL) {
89 		return (calc);
90 	}
91 	if (stcb->asoc.sb_cc == 0 &&
92 	    asoc->size_on_reasm_queue == 0 &&
93 	    asoc->size_on_all_streams == 0) {
94 		/* Full rwnd granted */
95 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
96 		return (calc);
97 	}
98 	/* get actual space */
99 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
100 	/*
101 	 * take out what has NOT been put on socket queue and we yet hold
102 	 * for putting up.
103 	 */
104 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
105 	    asoc->cnt_on_reasm_queue * MSIZE));
106 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
107 	    asoc->cnt_on_all_streams * MSIZE));
108 	if (calc == 0) {
109 		/* out of space */
110 		return (calc);
111 	}
112 	/* what is the overhead of all these rwnd's */
113 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
114 	/*
115 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
116 	 * even it is 0. SWS engaged
117 	 */
118 	if (calc < stcb->asoc.my_rwnd_control_len) {
119 		calc = 1;
120 	}
121 	return (calc);
122 }
123 
124 
125 
126 /*
127  * Build out our readq entry based on the incoming packet.
128  */
129 struct sctp_queued_to_read *
130 sctp_build_readq_entry(struct sctp_tcb *stcb,
131     struct sctp_nets *net,
132     uint32_t tsn, uint32_t ppid,
133     uint32_t context, uint16_t sid,
134     uint32_t mid, uint8_t flags,
135     struct mbuf *dm)
136 {
137 	struct sctp_queued_to_read *read_queue_e = NULL;
138 
139 	sctp_alloc_a_readq(stcb, read_queue_e);
140 	if (read_queue_e == NULL) {
141 		goto failed_build;
142 	}
143 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
144 	read_queue_e->sinfo_stream = sid;
145 	read_queue_e->sinfo_flags = (flags << 8);
146 	read_queue_e->sinfo_ppid = ppid;
147 	read_queue_e->sinfo_context = context;
148 	read_queue_e->sinfo_tsn = tsn;
149 	read_queue_e->sinfo_cumtsn = tsn;
150 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
151 	read_queue_e->mid = mid;
152 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
153 	TAILQ_INIT(&read_queue_e->reasm);
154 	read_queue_e->whoFrom = net;
155 	atomic_add_int(&net->ref_count, 1);
156 	read_queue_e->data = dm;
157 	read_queue_e->stcb = stcb;
158 	read_queue_e->port_from = stcb->rport;
159 failed_build:
160 	return (read_queue_e);
161 }
162 
163 struct mbuf *
164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
165 {
166 	struct sctp_extrcvinfo *seinfo;
167 	struct sctp_sndrcvinfo *outinfo;
168 	struct sctp_rcvinfo *rcvinfo;
169 	struct sctp_nxtinfo *nxtinfo;
170 	struct cmsghdr *cmh;
171 	struct mbuf *ret;
172 	int len;
173 	int use_extended;
174 	int provide_nxt;
175 
176 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
179 		/* user does not want any ancillary data */
180 		return (NULL);
181 	}
182 	len = 0;
183 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
184 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
185 	}
186 	seinfo = (struct sctp_extrcvinfo *)sinfo;
187 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
188 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
189 		provide_nxt = 1;
190 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
191 	} else {
192 		provide_nxt = 0;
193 	}
194 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
195 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
196 			use_extended = 1;
197 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
198 		} else {
199 			use_extended = 0;
200 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
201 		}
202 	} else {
203 		use_extended = 0;
204 	}
205 
206 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
207 	if (ret == NULL) {
208 		/* No space */
209 		return (ret);
210 	}
211 	SCTP_BUF_LEN(ret) = 0;
212 
213 	/* We need a CMSG header followed by the struct */
214 	cmh = mtod(ret, struct cmsghdr *);
215 	/*
216 	 * Make sure that there is no un-initialized padding between the
217 	 * cmsg header and cmsg data and after the cmsg data.
218 	 */
219 	memset(cmh, 0, len);
220 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
221 		cmh->cmsg_level = IPPROTO_SCTP;
222 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
223 		cmh->cmsg_type = SCTP_RCVINFO;
224 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
225 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
226 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
227 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
228 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
229 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
230 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
231 		rcvinfo->rcv_context = sinfo->sinfo_context;
232 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
233 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
234 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
235 	}
236 	if (provide_nxt) {
237 		cmh->cmsg_level = IPPROTO_SCTP;
238 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
239 		cmh->cmsg_type = SCTP_NXTINFO;
240 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
241 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
242 		nxtinfo->nxt_flags = 0;
243 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
244 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
245 		}
246 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
247 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
248 		}
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
250 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
251 		}
252 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
253 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
254 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
255 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
256 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
257 	}
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
261 		if (use_extended) {
262 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
263 			cmh->cmsg_type = SCTP_EXTRCV;
264 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
265 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
266 		} else {
267 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 			cmh->cmsg_type = SCTP_SNDRCV;
269 			*outinfo = *sinfo;
270 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
271 		}
272 	}
273 	return (ret);
274 }
275 
276 
277 static void
278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
279 {
280 	uint32_t gap, i, cumackp1;
281 	int fnd = 0;
282 	int in_r = 0, in_nr = 0;
283 
284 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
285 		return;
286 	}
287 	cumackp1 = asoc->cumulative_tsn + 1;
288 	if (SCTP_TSN_GT(cumackp1, tsn)) {
289 		/*
290 		 * this tsn is behind the cum ack and thus we don't need to
291 		 * worry about it being moved from one to the other.
292 		 */
293 		return;
294 	}
295 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
296 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
297 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
298 	if ((in_r == 0) && (in_nr == 0)) {
299 #ifdef INVARIANTS
300 		panic("Things are really messed up now");
301 #else
302 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
303 		sctp_print_mapping_array(asoc);
304 #endif
305 	}
306 	if (in_nr == 0)
307 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
308 	if (in_r)
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
311 		asoc->highest_tsn_inside_nr_map = tsn;
312 	}
313 	if (tsn == asoc->highest_tsn_inside_map) {
314 		/* We must back down to see what the new highest is */
315 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 				asoc->highest_tsn_inside_map = i;
319 				fnd = 1;
320 				break;
321 			}
322 		}
323 		if (!fnd) {
324 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
325 		}
326 	}
327 }
328 
329 static int
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331     struct sctp_association *asoc,
332     struct sctp_queued_to_read *control)
333 {
334 	struct sctp_queued_to_read *at;
335 	struct sctp_readhead *q;
336 	uint8_t flags, unordered;
337 
338 	flags = (control->sinfo_flags >> 8);
339 	unordered = flags & SCTP_DATA_UNORDERED;
340 	if (unordered) {
341 		q = &strm->uno_inqueue;
342 		if (asoc->idata_supported == 0) {
343 			if (!TAILQ_EMPTY(q)) {
344 				/* Only one stream can be here in old style
345 				 * -- abort */
346 				return (-1);
347 			}
348 			TAILQ_INSERT_TAIL(q, control, next_instrm);
349 			control->on_strm_q = SCTP_ON_UNORDERED;
350 			return (0);
351 		}
352 	} else {
353 		q = &strm->inqueue;
354 	}
355 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
356 		control->end_added = 1;
357 		control->first_frag_seen = 1;
358 		control->last_frag_seen = 1;
359 	}
360 	if (TAILQ_EMPTY(q)) {
361 		/* Empty queue */
362 		TAILQ_INSERT_HEAD(q, control, next_instrm);
363 		if (unordered) {
364 			control->on_strm_q = SCTP_ON_UNORDERED;
365 		} else {
366 			control->on_strm_q = SCTP_ON_ORDERED;
367 		}
368 		return (0);
369 	} else {
370 		TAILQ_FOREACH(at, q, next_instrm) {
371 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
372 				/*
373 				 * one in queue is bigger than the new one,
374 				 * insert before this one
375 				 */
376 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
377 				if (unordered) {
378 					control->on_strm_q = SCTP_ON_UNORDERED;
379 				} else {
380 					control->on_strm_q = SCTP_ON_ORDERED;
381 				}
382 				break;
383 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
384 				/*
385 				 * Gak, He sent me a duplicate msg id
386 				 * number?? return -1 to abort.
387 				 */
388 				return (-1);
389 			} else {
390 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
391 					/*
392 					 * We are at the end, insert it
393 					 * after this one
394 					 */
395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
396 						sctp_log_strm_del(control, at,
397 						    SCTP_STR_LOG_FROM_INSERT_TL);
398 					}
399 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
400 					if (unordered) {
401 						control->on_strm_q = SCTP_ON_UNORDERED;
402 					} else {
403 						control->on_strm_q = SCTP_ON_ORDERED;
404 					}
405 					break;
406 				}
407 			}
408 		}
409 	}
410 	return (0);
411 }
412 
413 static void
414 sctp_abort_in_reasm(struct sctp_tcb *stcb,
415     struct sctp_queued_to_read *control,
416     struct sctp_tmit_chunk *chk,
417     int *abort_flag, int opspot)
418 {
419 	char msg[SCTP_DIAG_INFO_LEN];
420 	struct mbuf *oper;
421 
422 	if (stcb->asoc.idata_supported) {
423 		snprintf(msg, sizeof(msg),
424 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
425 		    opspot,
426 		    control->fsn_included,
427 		    chk->rec.data.tsn,
428 		    chk->rec.data.sid,
429 		    chk->rec.data.fsn, chk->rec.data.mid);
430 	} else {
431 		snprintf(msg, sizeof(msg),
432 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
433 		    opspot,
434 		    control->fsn_included,
435 		    chk->rec.data.tsn,
436 		    chk->rec.data.sid,
437 		    chk->rec.data.fsn,
438 		    (uint16_t) chk->rec.data.mid);
439 	}
440 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
441 	sctp_m_freem(chk->data);
442 	chk->data = NULL;
443 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
444 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
445 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
446 	*abort_flag = 1;
447 }
448 
449 static void
450 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
451 {
452 	/*
453 	 * The control could not be placed and must be cleaned.
454 	 */
455 	struct sctp_tmit_chunk *chk, *nchk;
456 
457 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
458 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
459 		if (chk->data)
460 			sctp_m_freem(chk->data);
461 		chk->data = NULL;
462 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
463 	}
464 	sctp_free_a_readq(stcb, control);
465 }
466 
467 /*
468  * Queue the chunk either right into the socket buffer if it is the next one
469  * to go OR put it in the correct place in the delivery queue.  If we do
470  * append to the so_buf, keep doing so until we are out of order as
471  * long as the control's entered are non-fragmented.
472  */
473 static void
474 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
475     struct sctp_stream_in *strm,
476     struct sctp_association *asoc,
477     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
478 {
479 	/*
480 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
481 	 * all the data in one stream this could happen quite rapidly. One
482 	 * could use the TSN to keep track of things, but this scheme breaks
483 	 * down in the other type of stream usage that could occur. Send a
484 	 * single msg to stream 0, send 4Billion messages to stream 1, now
485 	 * send a message to stream 0. You have a situation where the TSN
486 	 * has wrapped but not in the stream. Is this worth worrying about
487 	 * or should we just change our queue sort at the bottom to be by
488 	 * TSN.
489 	 *
490 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
491 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
492 	 * assignment this could happen... and I don't see how this would be
493 	 * a violation. So for now I am undecided an will leave the sort by
494 	 * SSN alone. Maybe a hybred approach is the answer
495 	 *
496 	 */
497 	struct sctp_queued_to_read *at;
498 	int queue_needed;
499 	uint32_t nxt_todel;
500 	struct mbuf *op_err;
501 	char msg[SCTP_DIAG_INFO_LEN];
502 
503 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
504 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
505 	}
506 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
507 		/* The incoming sseq is behind where we last delivered? */
508 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
509 		    control->mid, strm->last_mid_delivered);
510 protocol_error:
511 		/*
512 		 * throw it in the stream so it gets cleaned up in
513 		 * association destruction
514 		 */
515 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
516 		if (asoc->idata_supported) {
517 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
518 			    strm->last_mid_delivered, control->sinfo_tsn,
519 			    control->sinfo_stream, control->mid);
520 		} else {
521 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
522 			    (uint16_t) strm->last_mid_delivered,
523 			    control->sinfo_tsn,
524 			    control->sinfo_stream,
525 			    (uint16_t) control->mid);
526 		}
527 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
528 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
529 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
530 		*abort_flag = 1;
531 		return;
532 
533 	}
534 	if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) {
535 		goto protocol_error;
536 	}
537 	queue_needed = 1;
538 	asoc->size_on_all_streams += control->length;
539 	sctp_ucount_incr(asoc->cnt_on_all_streams);
540 	nxt_todel = strm->last_mid_delivered + 1;
541 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
542 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
543 		struct socket *so;
544 
545 		so = SCTP_INP_SO(stcb->sctp_ep);
546 		atomic_add_int(&stcb->asoc.refcnt, 1);
547 		SCTP_TCB_UNLOCK(stcb);
548 		SCTP_SOCKET_LOCK(so, 1);
549 		SCTP_TCB_LOCK(stcb);
550 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
551 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
552 			SCTP_SOCKET_UNLOCK(so, 1);
553 			return;
554 		}
555 #endif
556 		/* can be delivered right away? */
557 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
559 		}
560 		/* EY it wont be queued if it could be delivered directly */
561 		queue_needed = 0;
562 		asoc->size_on_all_streams -= control->length;
563 		sctp_ucount_decr(asoc->cnt_on_all_streams);
564 		strm->last_mid_delivered++;
565 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
566 		sctp_add_to_readq(stcb->sctp_ep, stcb,
567 		    control,
568 		    &stcb->sctp_socket->so_rcv, 1,
569 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
570 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
571 			/* all delivered */
572 			nxt_todel = strm->last_mid_delivered + 1;
573 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
574 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
575 				asoc->size_on_all_streams -= control->length;
576 				sctp_ucount_decr(asoc->cnt_on_all_streams);
577 				if (control->on_strm_q == SCTP_ON_ORDERED) {
578 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
579 #ifdef INVARIANTS
580 				} else {
581 					panic("Huh control: %p is on_strm_q: %d",
582 					    control, control->on_strm_q);
583 #endif
584 				}
585 				control->on_strm_q = 0;
586 				strm->last_mid_delivered++;
587 				/*
588 				 * We ignore the return of deliver_data here
589 				 * since we always can hold the chunk on the
590 				 * d-queue. And we have a finite number that
591 				 * can be delivered from the strq.
592 				 */
593 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
594 					sctp_log_strm_del(control, NULL,
595 					    SCTP_STR_LOG_FROM_IMMED_DEL);
596 				}
597 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
598 				sctp_add_to_readq(stcb->sctp_ep, stcb,
599 				    control,
600 				    &stcb->sctp_socket->so_rcv, 1,
601 				    SCTP_READ_LOCK_NOT_HELD,
602 				    SCTP_SO_LOCKED);
603 				continue;
604 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
605 				*need_reasm = 1;
606 			}
607 			break;
608 		}
609 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
610 		SCTP_SOCKET_UNLOCK(so, 1);
611 #endif
612 	}
613 	if (queue_needed) {
614 		/*
615 		 * Ok, we did not deliver this guy, find the correct place
616 		 * to put it on the queue.
617 		 */
618 		if (sctp_place_control_in_stream(strm, asoc, control)) {
619 			snprintf(msg, sizeof(msg),
620 			    "Queue to str MID: %u duplicate",
621 			    control->mid);
622 			sctp_clean_up_control(stcb, control);
623 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
624 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
625 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
626 			*abort_flag = 1;
627 		}
628 	}
629 }
630 
631 
632 static void
633 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
634 {
635 	struct mbuf *m, *prev = NULL;
636 	struct sctp_tcb *stcb;
637 
638 	stcb = control->stcb;
639 	control->held_length = 0;
640 	control->length = 0;
641 	m = control->data;
642 	while (m) {
643 		if (SCTP_BUF_LEN(m) == 0) {
644 			/* Skip mbufs with NO length */
645 			if (prev == NULL) {
646 				/* First one */
647 				control->data = sctp_m_free(m);
648 				m = control->data;
649 			} else {
650 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
651 				m = SCTP_BUF_NEXT(prev);
652 			}
653 			if (m == NULL) {
654 				control->tail_mbuf = prev;
655 			}
656 			continue;
657 		}
658 		prev = m;
659 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
660 		if (control->on_read_q) {
661 			/*
662 			 * On read queue so we must increment the SB stuff,
663 			 * we assume caller has done any locks of SB.
664 			 */
665 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
666 		}
667 		m = SCTP_BUF_NEXT(m);
668 	}
669 	if (prev) {
670 		control->tail_mbuf = prev;
671 	}
672 }
673 
674 static void
675 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
676 {
677 	struct mbuf *prev = NULL;
678 	struct sctp_tcb *stcb;
679 
680 	stcb = control->stcb;
681 	if (stcb == NULL) {
682 #ifdef INVARIANTS
683 		panic("Control broken");
684 #else
685 		return;
686 #endif
687 	}
688 	if (control->tail_mbuf == NULL) {
689 		/* TSNH */
690 		control->data = m;
691 		sctp_setup_tail_pointer(control);
692 		return;
693 	}
694 	control->tail_mbuf->m_next = m;
695 	while (m) {
696 		if (SCTP_BUF_LEN(m) == 0) {
697 			/* Skip mbufs with NO length */
698 			if (prev == NULL) {
699 				/* First one */
700 				control->tail_mbuf->m_next = sctp_m_free(m);
701 				m = control->tail_mbuf->m_next;
702 			} else {
703 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
704 				m = SCTP_BUF_NEXT(prev);
705 			}
706 			if (m == NULL) {
707 				control->tail_mbuf = prev;
708 			}
709 			continue;
710 		}
711 		prev = m;
712 		if (control->on_read_q) {
713 			/*
714 			 * On read queue so we must increment the SB stuff,
715 			 * we assume caller has done any locks of SB.
716 			 */
717 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
718 		}
719 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
720 		m = SCTP_BUF_NEXT(m);
721 	}
722 	if (prev) {
723 		control->tail_mbuf = prev;
724 	}
725 }
726 
727 static void
728 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
729 {
730 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
731 	nc->sinfo_stream = control->sinfo_stream;
732 	nc->mid = control->mid;
733 	TAILQ_INIT(&nc->reasm);
734 	nc->top_fsn = control->top_fsn;
735 	nc->mid = control->mid;
736 	nc->sinfo_flags = control->sinfo_flags;
737 	nc->sinfo_ppid = control->sinfo_ppid;
738 	nc->sinfo_context = control->sinfo_context;
739 	nc->fsn_included = 0xffffffff;
740 	nc->sinfo_tsn = control->sinfo_tsn;
741 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
742 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
743 	nc->whoFrom = control->whoFrom;
744 	atomic_add_int(&nc->whoFrom->ref_count, 1);
745 	nc->stcb = control->stcb;
746 	nc->port_from = control->port_from;
747 }
748 
749 static void
750 sctp_reset_a_control(struct sctp_queued_to_read *control,
751     struct sctp_inpcb *inp, uint32_t tsn)
752 {
753 	control->fsn_included = tsn;
754 	if (control->on_read_q) {
755 		/*
756 		 * We have to purge it from there, hopefully this will work
757 		 * :-)
758 		 */
759 		TAILQ_REMOVE(&inp->read_queue, control, next);
760 		control->on_read_q = 0;
761 	}
762 }
763 
764 static int
765 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
766     struct sctp_association *asoc,
767     struct sctp_stream_in *strm,
768     struct sctp_queued_to_read *control,
769     uint32_t pd_point,
770     int inp_read_lock_held)
771 {
772 	/*
773 	 * Special handling for the old un-ordered data chunk. All the
774 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
775 	 * to see if we have it all. If you return one, no other control
776 	 * entries on the un-ordered queue will be looked at. In theory
777 	 * there should be no others entries in reality, unless the guy is
778 	 * sending both unordered NDATA and unordered DATA...
779 	 */
780 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
781 	uint32_t fsn;
782 	struct sctp_queued_to_read *nc;
783 	int cnt_added;
784 
785 	if (control->first_frag_seen == 0) {
786 		/* Nothing we can do, we have not seen the first piece yet */
787 		return (1);
788 	}
789 	/* Collapse any we can */
790 	cnt_added = 0;
791 restart:
792 	fsn = control->fsn_included + 1;
793 	/* Now what can we add? */
794 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
795 		if (chk->rec.data.fsn == fsn) {
796 			/* Ok lets add it */
797 			sctp_alloc_a_readq(stcb, nc);
798 			if (nc == NULL) {
799 				break;
800 			}
801 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
802 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
803 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
804 			fsn++;
805 			cnt_added++;
806 			chk = NULL;
807 			if (control->end_added) {
808 				/* We are done */
809 				if (!TAILQ_EMPTY(&control->reasm)) {
810 					/*
811 					 * Ok we have to move anything left
812 					 * on the control queue to a new
813 					 * control.
814 					 */
815 					sctp_build_readq_entry_from_ctl(nc, control);
816 					tchk = TAILQ_FIRST(&control->reasm);
817 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
818 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
819 						asoc->size_on_reasm_queue -= tchk->send_size;
820 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
821 						nc->first_frag_seen = 1;
822 						nc->fsn_included = tchk->rec.data.fsn;
823 						nc->data = tchk->data;
824 						nc->sinfo_ppid = tchk->rec.data.ppid;
825 						nc->sinfo_tsn = tchk->rec.data.tsn;
826 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
827 						tchk->data = NULL;
828 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
829 						sctp_setup_tail_pointer(nc);
830 						tchk = TAILQ_FIRST(&control->reasm);
831 					}
832 					/* Spin the rest onto the queue */
833 					while (tchk) {
834 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
835 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
836 						tchk = TAILQ_FIRST(&control->reasm);
837 					}
838 					/* Now lets add it to the queue
839 					 * after removing control */
840 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
841 					nc->on_strm_q = SCTP_ON_UNORDERED;
842 					if (control->on_strm_q) {
843 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
844 						control->on_strm_q = 0;
845 					}
846 				}
847 				if (control->pdapi_started) {
848 					strm->pd_api_started = 0;
849 					control->pdapi_started = 0;
850 				}
851 				if (control->on_strm_q) {
852 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
853 					control->on_strm_q = 0;
854 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
855 				}
856 				if (control->on_read_q == 0) {
857 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
858 					    &stcb->sctp_socket->so_rcv, control->end_added,
859 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
860 				}
861 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
862 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
863 					/* Switch to the new guy and
864 					 * continue */
865 					control = nc;
866 					goto restart;
867 				} else {
868 					if (nc->on_strm_q == 0) {
869 						sctp_free_a_readq(stcb, nc);
870 					}
871 				}
872 				return (1);
873 			} else {
874 				sctp_free_a_readq(stcb, nc);
875 			}
876 		} else {
877 			/* Can't add more */
878 			break;
879 		}
880 	}
881 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
882 		strm->pd_api_started = 1;
883 		control->pdapi_started = 1;
884 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 		    &stcb->sctp_socket->so_rcv, control->end_added,
886 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
888 		return (0);
889 	} else {
890 		return (1);
891 	}
892 }
893 
894 static void
895 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
896     struct sctp_association *asoc,
897     struct sctp_queued_to_read *control,
898     struct sctp_tmit_chunk *chk,
899     int *abort_flag)
900 {
901 	struct sctp_tmit_chunk *at;
902 	int inserted;
903 
904 	/*
905 	 * Here we need to place the chunk into the control structure sorted
906 	 * in the correct order.
907 	 */
908 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
909 		/* Its the very first one. */
910 		SCTPDBG(SCTP_DEBUG_XXX,
911 		    "chunk is a first fsn: %u becomes fsn_included\n",
912 		    chk->rec.data.fsn);
913 		if (control->first_frag_seen) {
914 			/*
915 			 * In old un-ordered we can reassembly on one
916 			 * control multiple messages. As long as the next
917 			 * FIRST is greater then the old first (TSN i.e. FSN
918 			 * wise)
919 			 */
920 			struct mbuf *tdata;
921 			uint32_t tmp;
922 
923 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
924 				/* Easy way the start of a new guy beyond
925 				 * the lowest */
926 				goto place_chunk;
927 			}
928 			if ((chk->rec.data.fsn == control->fsn_included) ||
929 			    (control->pdapi_started)) {
930 				/*
931 				 * Ok this should not happen, if it does we
932 				 * started the pd-api on the higher TSN
933 				 * (since the equals part is a TSN failure
934 				 * it must be that).
935 				 *
936 				 * We are completly hosed in that case since
937 				 * I have no way to recover. This really
938 				 * will only happen if we can get more TSN's
939 				 * higher before the pd-api-point.
940 				 */
941 				sctp_abort_in_reasm(stcb, control, chk,
942 				    abort_flag,
943 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
944 
945 				return;
946 			}
947 			/*
948 			 * Ok we have two firsts and the one we just got is
949 			 * smaller than the one we previously placed.. yuck!
950 			 * We must swap them out.
951 			 */
952 			/* swap the mbufs */
953 			tdata = control->data;
954 			control->data = chk->data;
955 			chk->data = tdata;
956 			/* Save the lengths */
957 			chk->send_size = control->length;
958 			/* Recompute length of control and tail pointer */
959 			sctp_setup_tail_pointer(control);
960 			/* Fix the FSN included */
961 			tmp = control->fsn_included;
962 			control->fsn_included = chk->rec.data.fsn;
963 			chk->rec.data.fsn = tmp;
964 			/* Fix the TSN included */
965 			tmp = control->sinfo_tsn;
966 			control->sinfo_tsn = chk->rec.data.tsn;
967 			chk->rec.data.tsn = tmp;
968 			/* Fix the PPID included */
969 			tmp = control->sinfo_ppid;
970 			control->sinfo_ppid = chk->rec.data.ppid;
971 			chk->rec.data.ppid = tmp;
972 			/* Fix tail pointer */
973 			goto place_chunk;
974 		}
975 		control->first_frag_seen = 1;
976 		control->fsn_included = chk->rec.data.fsn;
977 		control->top_fsn = chk->rec.data.fsn;
978 		control->sinfo_tsn = chk->rec.data.tsn;
979 		control->sinfo_ppid = chk->rec.data.ppid;
980 		control->data = chk->data;
981 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
982 		chk->data = NULL;
983 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
984 		sctp_setup_tail_pointer(control);
985 		return;
986 	}
987 place_chunk:
988 	inserted = 0;
989 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
990 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
991 			/*
992 			 * This one in queue is bigger than the new one,
993 			 * insert the new one before at.
994 			 */
995 			asoc->size_on_reasm_queue += chk->send_size;
996 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
997 			inserted = 1;
998 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
999 			break;
1000 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1001 			/*
1002 			 * They sent a duplicate fsn number. This really
1003 			 * should not happen since the FSN is a TSN and it
1004 			 * should have been dropped earlier.
1005 			 */
1006 			sctp_abort_in_reasm(stcb, control, chk,
1007 			    abort_flag,
1008 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1009 			return;
1010 		}
1011 	}
1012 	if (inserted == 0) {
1013 		/* Its at the end */
1014 		asoc->size_on_reasm_queue += chk->send_size;
1015 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1016 		control->top_fsn = chk->rec.data.fsn;
1017 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1018 	}
1019 }
1020 
1021 static int
1022 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1023     struct sctp_stream_in *strm, int inp_read_lock_held)
1024 {
1025 	/*
1026 	 * Given a stream, strm, see if any of the SSN's on it that are
1027 	 * fragmented are ready to deliver. If so go ahead and place them on
1028 	 * the read queue. In so placing if we have hit the end, then we
1029 	 * need to remove them from the stream's queue.
1030 	 */
1031 	struct sctp_queued_to_read *control, *nctl = NULL;
1032 	uint32_t next_to_del;
1033 	uint32_t pd_point;
1034 	int ret = 0;
1035 
1036 	if (stcb->sctp_socket) {
1037 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1038 		    stcb->sctp_ep->partial_delivery_point);
1039 	} else {
1040 		pd_point = stcb->sctp_ep->partial_delivery_point;
1041 	}
1042 	control = TAILQ_FIRST(&strm->uno_inqueue);
1043 
1044 	if ((control) &&
1045 	    (asoc->idata_supported == 0)) {
1046 		/* Special handling needed for "old" data format */
1047 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1048 			goto done_un;
1049 		}
1050 	}
1051 	if (strm->pd_api_started) {
1052 		/* Can't add more */
1053 		return (0);
1054 	}
1055 	while (control) {
1056 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1057 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1058 		nctl = TAILQ_NEXT(control, next_instrm);
1059 		if (control->end_added) {
1060 			/* We just put the last bit on */
1061 			if (control->on_strm_q) {
1062 #ifdef INVARIANTS
1063 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1064 					panic("Huh control: %p on_q: %d -- not unordered?",
1065 					    control, control->on_strm_q);
1066 				}
1067 #endif
1068 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1069 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1070 				control->on_strm_q = 0;
1071 			}
1072 			if (control->on_read_q == 0) {
1073 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1074 				    control,
1075 				    &stcb->sctp_socket->so_rcv, control->end_added,
1076 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1077 			}
1078 		} else {
1079 			/* Can we do a PD-API for this un-ordered guy? */
1080 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1081 				strm->pd_api_started = 1;
1082 				control->pdapi_started = 1;
1083 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1084 				    control,
1085 				    &stcb->sctp_socket->so_rcv, control->end_added,
1086 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1087 
1088 				break;
1089 			}
1090 		}
1091 		control = nctl;
1092 	}
1093 done_un:
1094 	control = TAILQ_FIRST(&strm->inqueue);
1095 	if (strm->pd_api_started) {
1096 		/* Can't add more */
1097 		return (0);
1098 	}
1099 	if (control == NULL) {
1100 		return (ret);
1101 	}
1102 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1103 		/*
1104 		 * Ok the guy at the top was being partially delivered
1105 		 * completed, so we remove it. Note the pd_api flag was
1106 		 * taken off when the chunk was merged on in
1107 		 * sctp_queue_data_for_reasm below.
1108 		 */
1109 		nctl = TAILQ_NEXT(control, next_instrm);
1110 		SCTPDBG(SCTP_DEBUG_XXX,
1111 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1112 		    control, control->end_added, control->mid,
1113 		    control->top_fsn, control->fsn_included,
1114 		    strm->last_mid_delivered);
1115 		if (control->end_added) {
1116 			if (control->on_strm_q) {
1117 #ifdef INVARIANTS
1118 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1119 					panic("Huh control: %p on_q: %d -- not ordered?",
1120 					    control, control->on_strm_q);
1121 				}
1122 #endif
1123 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1124 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1125 				control->on_strm_q = 0;
1126 			}
1127 			if (strm->pd_api_started && control->pdapi_started) {
1128 				control->pdapi_started = 0;
1129 				strm->pd_api_started = 0;
1130 			}
1131 			if (control->on_read_q == 0) {
1132 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1133 				    control,
1134 				    &stcb->sctp_socket->so_rcv, control->end_added,
1135 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1136 			}
1137 			control = nctl;
1138 		}
1139 	}
1140 	if (strm->pd_api_started) {
1141 		/* Can't add more must have gotten an un-ordered above being
1142 		 * partially delivered. */
1143 		return (0);
1144 	}
1145 deliver_more:
1146 	next_to_del = strm->last_mid_delivered + 1;
1147 	if (control) {
1148 		SCTPDBG(SCTP_DEBUG_XXX,
1149 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1150 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1151 		    next_to_del);
1152 		nctl = TAILQ_NEXT(control, next_instrm);
1153 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1154 		    (control->first_frag_seen)) {
1155 			int done;
1156 
1157 			/* Ok we can deliver it onto the stream. */
1158 			if (control->end_added) {
1159 				/* We are done with it afterwards */
1160 				if (control->on_strm_q) {
1161 #ifdef INVARIANTS
1162 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1163 						panic("Huh control: %p on_q: %d -- not ordered?",
1164 						    control, control->on_strm_q);
1165 					}
1166 #endif
1167 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1168 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1169 					control->on_strm_q = 0;
1170 				}
1171 				ret++;
1172 			}
1173 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1174 				/* A singleton now slipping through - mark
1175 				 * it non-revokable too */
1176 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1177 			} else if (control->end_added == 0) {
1178 				/* Check if we can defer adding until its
1179 				 * all there */
1180 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1181 					/* Don't need it or cannot add more
1182 					 * (one being delivered that way) */
1183 					goto out;
1184 				}
1185 			}
1186 			done = (control->end_added) && (control->last_frag_seen);
1187 			if (control->on_read_q == 0) {
1188 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1189 				    control,
1190 				    &stcb->sctp_socket->so_rcv, control->end_added,
1191 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1192 			}
1193 			strm->last_mid_delivered = next_to_del;
1194 			if (done) {
1195 				control = nctl;
1196 				goto deliver_more;
1197 			} else {
1198 				/* We are now doing PD API */
1199 				strm->pd_api_started = 1;
1200 				control->pdapi_started = 1;
1201 			}
1202 		}
1203 	}
1204 out:
1205 	return (ret);
1206 }
1207 
1208 
1209 void
1210 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1211     struct sctp_stream_in *strm,
1212     struct sctp_tcb *stcb, struct sctp_association *asoc,
1213     struct sctp_tmit_chunk *chk, int hold_rlock)
1214 {
1215 	/*
1216 	 * Given a control and a chunk, merge the data from the chk onto the
1217 	 * control and free up the chunk resources.
1218 	 */
1219 	int i_locked = 0;
1220 
1221 	if (control->on_read_q && (hold_rlock == 0)) {
1222 		/*
1223 		 * Its being pd-api'd so we must do some locks.
1224 		 */
1225 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1226 		i_locked = 1;
1227 	}
1228 	if (control->data == NULL) {
1229 		control->data = chk->data;
1230 		sctp_setup_tail_pointer(control);
1231 	} else {
1232 		sctp_add_to_tail_pointer(control, chk->data);
1233 	}
1234 	control->fsn_included = chk->rec.data.fsn;
1235 	asoc->size_on_reasm_queue -= chk->send_size;
1236 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1237 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1238 	chk->data = NULL;
1239 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1240 		control->first_frag_seen = 1;
1241 		control->sinfo_tsn = chk->rec.data.tsn;
1242 		control->sinfo_ppid = chk->rec.data.ppid;
1243 	}
1244 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1245 		/* Its complete */
1246 		if ((control->on_strm_q) && (control->on_read_q)) {
1247 			if (control->pdapi_started) {
1248 				control->pdapi_started = 0;
1249 				strm->pd_api_started = 0;
1250 			}
1251 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1252 				/* Unordered */
1253 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1254 				control->on_strm_q = 0;
1255 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1256 				/* Ordered */
1257 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1258 				control->on_strm_q = 0;
1259 #ifdef INVARIANTS
1260 			} else if (control->on_strm_q) {
1261 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1262 				    control->on_strm_q);
1263 #endif
1264 			}
1265 		}
1266 		control->end_added = 1;
1267 		control->last_frag_seen = 1;
1268 	}
1269 	if (i_locked) {
1270 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1271 	}
1272 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1273 }
1274 
1275 /*
1276  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1277  * queue, see if anthing can be delivered. If so pull it off (or as much as
1278  * we can. If we run out of space then we must dump what we can and set the
1279  * appropriate flag to say we queued what we could.
1280  */
1281 static void
1282 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1283     struct sctp_stream_in *strm,
1284     struct sctp_queued_to_read *control,
1285     struct sctp_tmit_chunk *chk,
1286     int created_control,
1287     int *abort_flag, uint32_t tsn)
1288 {
1289 	uint32_t next_fsn;
1290 	struct sctp_tmit_chunk *at, *nat;
1291 	int do_wakeup, unordered;
1292 
1293 	/*
1294 	 * For old un-ordered data chunks.
1295 	 */
1296 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1297 		unordered = 1;
1298 	} else {
1299 		unordered = 0;
1300 	}
1301 	/* Must be added to the stream-in queue */
1302 	if (created_control) {
1303 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1304 			/* Duplicate SSN? */
1305 			sctp_clean_up_control(stcb, control);
1306 			sctp_abort_in_reasm(stcb, control, chk,
1307 			    abort_flag,
1308 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1309 			return;
1310 		}
1311 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1312 			/*
1313 			 * Ok we created this control and now lets validate
1314 			 * that its legal i.e. there is a B bit set, if not
1315 			 * and we have up to the cum-ack then its invalid.
1316 			 */
1317 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1318 				sctp_abort_in_reasm(stcb, control, chk,
1319 				    abort_flag,
1320 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1321 				return;
1322 			}
1323 		}
1324 	}
1325 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1326 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1327 		return;
1328 	}
1329 	/*
1330 	 * Ok we must queue the chunk into the reasembly portion: o if its
1331 	 * the first it goes to the control mbuf. o if its not first but the
1332 	 * next in sequence it goes to the control, and each succeeding one
1333 	 * in order also goes. o if its not in order we place it on the list
1334 	 * in its place.
1335 	 */
1336 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1337 		/* Its the very first one. */
1338 		SCTPDBG(SCTP_DEBUG_XXX,
1339 		    "chunk is a first fsn: %u becomes fsn_included\n",
1340 		    chk->rec.data.fsn);
1341 		if (control->first_frag_seen) {
1342 			/*
1343 			 * Error on senders part, they either sent us two
1344 			 * data chunks with FIRST, or they sent two
1345 			 * un-ordered chunks that were fragmented at the
1346 			 * same time in the same stream.
1347 			 */
1348 			sctp_abort_in_reasm(stcb, control, chk,
1349 			    abort_flag,
1350 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1351 			return;
1352 		}
1353 		control->first_frag_seen = 1;
1354 		control->sinfo_ppid = chk->rec.data.ppid;
1355 		control->sinfo_tsn = chk->rec.data.tsn;
1356 		control->fsn_included = chk->rec.data.fsn;
1357 		control->data = chk->data;
1358 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1359 		chk->data = NULL;
1360 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1361 		sctp_setup_tail_pointer(control);
1362 	} else {
1363 		/* Place the chunk in our list */
1364 		int inserted = 0;
1365 
1366 		if (control->last_frag_seen == 0) {
1367 			/* Still willing to raise highest FSN seen */
1368 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1369 				SCTPDBG(SCTP_DEBUG_XXX,
1370 				    "We have a new top_fsn: %u\n",
1371 				    chk->rec.data.fsn);
1372 				control->top_fsn = chk->rec.data.fsn;
1373 			}
1374 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1375 				SCTPDBG(SCTP_DEBUG_XXX,
1376 				    "The last fsn is now in place fsn: %u\n",
1377 				    chk->rec.data.fsn);
1378 				control->last_frag_seen = 1;
1379 			}
1380 			if (asoc->idata_supported || control->first_frag_seen) {
1381 				/*
1382 				 * For IDATA we always check since we know
1383 				 * that the first fragment is 0. For old
1384 				 * DATA we have to receive the first before
1385 				 * we know the first FSN (which is the TSN).
1386 				 */
1387 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1388 					/* We have already delivered up to
1389 					 * this so its a dup */
1390 					sctp_abort_in_reasm(stcb, control, chk,
1391 					    abort_flag,
1392 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1393 					return;
1394 				}
1395 			}
1396 		} else {
1397 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1398 				/* Second last? huh? */
1399 				SCTPDBG(SCTP_DEBUG_XXX,
1400 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1401 				    chk->rec.data.fsn, control->top_fsn);
1402 				sctp_abort_in_reasm(stcb, control,
1403 				    chk, abort_flag,
1404 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1405 				return;
1406 			}
1407 			if (asoc->idata_supported || control->first_frag_seen) {
1408 				/*
1409 				 * For IDATA we always check since we know
1410 				 * that the first fragment is 0. For old
1411 				 * DATA we have to receive the first before
1412 				 * we know the first FSN (which is the TSN).
1413 				 */
1414 
1415 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1416 					/* We have already delivered up to
1417 					 * this so its a dup */
1418 					SCTPDBG(SCTP_DEBUG_XXX,
1419 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1420 					    chk->rec.data.fsn, control->fsn_included);
1421 					sctp_abort_in_reasm(stcb, control, chk,
1422 					    abort_flag,
1423 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1424 					return;
1425 				}
1426 			}
1427 			/* validate not beyond top FSN if we have seen last
1428 			 * one */
1429 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1430 				SCTPDBG(SCTP_DEBUG_XXX,
1431 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1432 				    chk->rec.data.fsn,
1433 				    control->top_fsn);
1434 				sctp_abort_in_reasm(stcb, control, chk,
1435 				    abort_flag,
1436 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1437 				return;
1438 			}
1439 		}
1440 		/*
1441 		 * If we reach here, we need to place the new chunk in the
1442 		 * reassembly for this control.
1443 		 */
1444 		SCTPDBG(SCTP_DEBUG_XXX,
1445 		    "chunk is a not first fsn: %u needs to be inserted\n",
1446 		    chk->rec.data.fsn);
1447 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1448 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1449 				/*
1450 				 * This one in queue is bigger than the new
1451 				 * one, insert the new one before at.
1452 				 */
1453 				SCTPDBG(SCTP_DEBUG_XXX,
1454 				    "Insert it before fsn: %u\n",
1455 				    at->rec.data.fsn);
1456 				asoc->size_on_reasm_queue += chk->send_size;
1457 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1458 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1459 				inserted = 1;
1460 				break;
1461 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1462 				/* Gak, He sent me a duplicate str seq
1463 				 * number */
1464 				/*
1465 				 * foo bar, I guess I will just free this
1466 				 * new guy, should we abort too? FIX ME
1467 				 * MAYBE? Or it COULD be that the SSN's have
1468 				 * wrapped. Maybe I should compare to TSN
1469 				 * somehow... sigh for now just blow away
1470 				 * the chunk!
1471 				 */
1472 				SCTPDBG(SCTP_DEBUG_XXX,
1473 				    "Duplicate to fsn: %u -- abort\n",
1474 				    at->rec.data.fsn);
1475 				sctp_abort_in_reasm(stcb, control,
1476 				    chk, abort_flag,
1477 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1478 				return;
1479 			}
1480 		}
1481 		if (inserted == 0) {
1482 			/* Goes on the end */
1483 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1484 			    chk->rec.data.fsn);
1485 			asoc->size_on_reasm_queue += chk->send_size;
1486 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1487 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1488 		}
1489 	}
1490 	/*
1491 	 * Ok lets see if we can suck any up into the control structure that
1492 	 * are in seq if it makes sense.
1493 	 */
1494 	do_wakeup = 0;
1495 	/*
1496 	 * If the first fragment has not been seen there is no sense in
1497 	 * looking.
1498 	 */
1499 	if (control->first_frag_seen) {
1500 		next_fsn = control->fsn_included + 1;
1501 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1502 			if (at->rec.data.fsn == next_fsn) {
1503 				/* We can add this one now to the control */
1504 				SCTPDBG(SCTP_DEBUG_XXX,
1505 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1506 				    control, at,
1507 				    at->rec.data.fsn,
1508 				    next_fsn, control->fsn_included);
1509 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1510 				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1511 				if (control->on_read_q) {
1512 					do_wakeup = 1;
1513 				}
1514 				next_fsn++;
1515 				if (control->end_added && control->pdapi_started) {
1516 					if (strm->pd_api_started) {
1517 						strm->pd_api_started = 0;
1518 						control->pdapi_started = 0;
1519 					}
1520 					if (control->on_read_q == 0) {
1521 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1522 						    control,
1523 						    &stcb->sctp_socket->so_rcv, control->end_added,
1524 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1525 						do_wakeup = 1;
1526 					}
1527 					break;
1528 				}
1529 			} else {
1530 				break;
1531 			}
1532 		}
1533 	}
1534 	if (do_wakeup) {
1535 		/* Need to wakeup the reader */
1536 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1537 	}
1538 }
1539 
1540 static struct sctp_queued_to_read *
1541 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1542 {
1543 	struct sctp_queued_to_read *control;
1544 
1545 	if (ordered) {
1546 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1547 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1548 				break;
1549 			}
1550 		}
1551 	} else {
1552 		if (idata_supported) {
1553 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1554 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1555 					break;
1556 				}
1557 			}
1558 		} else {
1559 			control = TAILQ_FIRST(&strm->uno_inqueue);
1560 		}
1561 	}
1562 	return (control);
1563 }
1564 
1565 static int
1566 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1567     struct mbuf **m, int offset, int chk_length,
1568     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1569     int *break_flag, int last_chunk, uint8_t chk_type)
1570 {
1571 	/* Process a data chunk */
1572 	/* struct sctp_tmit_chunk *chk; */
1573 	struct sctp_tmit_chunk *chk;
1574 	uint32_t tsn, fsn, gap, mid;
1575 	struct mbuf *dmbuf;
1576 	int the_len;
1577 	int need_reasm_check = 0;
1578 	uint16_t sid;
1579 	struct mbuf *op_err;
1580 	char msg[SCTP_DIAG_INFO_LEN];
1581 	struct sctp_queued_to_read *control = NULL;
1582 	uint32_t ppid;
1583 	uint8_t chk_flags;
1584 	struct sctp_stream_reset_list *liste;
1585 	struct sctp_stream_in *strm;
1586 	int ordered;
1587 	size_t clen;
1588 	int created_control = 0;
1589 
1590 	if (chk_type == SCTP_IDATA) {
1591 		struct sctp_idata_chunk *chunk, chunk_buf;
1592 
1593 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1594 		    sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf);
1595 		chk_flags = chunk->ch.chunk_flags;
1596 		clen = sizeof(struct sctp_idata_chunk);
1597 		tsn = ntohl(chunk->dp.tsn);
1598 		sid = ntohs(chunk->dp.sid);
1599 		mid = ntohl(chunk->dp.mid);
1600 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1601 			fsn = 0;
1602 			ppid = chunk->dp.ppid_fsn.ppid;
1603 		} else {
1604 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1605 			ppid = 0xffffffff;	/* Use as an invalid value. */
1606 		}
1607 	} else {
1608 		struct sctp_data_chunk *chunk, chunk_buf;
1609 
1610 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1611 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
1612 		chk_flags = chunk->ch.chunk_flags;
1613 		clen = sizeof(struct sctp_data_chunk);
1614 		tsn = ntohl(chunk->dp.tsn);
1615 		sid = ntohs(chunk->dp.sid);
1616 		mid = (uint32_t) (ntohs(chunk->dp.ssn));
1617 		fsn = tsn;
1618 		ppid = chunk->dp.ppid;
1619 	}
1620 	if ((size_t)chk_length == clen) {
1621 		/*
1622 		 * Need to send an abort since we had a empty data chunk.
1623 		 */
1624 		op_err = sctp_generate_no_user_data_cause(tsn);
1625 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1626 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1627 		*abort_flag = 1;
1628 		return (0);
1629 	}
1630 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1631 		asoc->send_sack = 1;
1632 	}
1633 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1634 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1635 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1636 	}
1637 	if (stcb == NULL) {
1638 		return (0);
1639 	}
1640 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1641 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1642 		/* It is a duplicate */
1643 		SCTP_STAT_INCR(sctps_recvdupdata);
1644 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1645 			/* Record a dup for the next outbound sack */
1646 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1647 			asoc->numduptsns++;
1648 		}
1649 		asoc->send_sack = 1;
1650 		return (0);
1651 	}
1652 	/* Calculate the number of TSN's between the base and this TSN */
1653 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1654 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1655 		/* Can't hold the bit in the mapping at max array, toss it */
1656 		return (0);
1657 	}
1658 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1659 		SCTP_TCB_LOCK_ASSERT(stcb);
1660 		if (sctp_expand_mapping_array(asoc, gap)) {
1661 			/* Can't expand, drop it */
1662 			return (0);
1663 		}
1664 	}
1665 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1666 		*high_tsn = tsn;
1667 	}
1668 	/* See if we have received this one already */
1669 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1670 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1671 		SCTP_STAT_INCR(sctps_recvdupdata);
1672 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1673 			/* Record a dup for the next outbound sack */
1674 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1675 			asoc->numduptsns++;
1676 		}
1677 		asoc->send_sack = 1;
1678 		return (0);
1679 	}
1680 	/*
1681 	 * Check to see about the GONE flag, duplicates would cause a sack
1682 	 * to be sent up above
1683 	 */
1684 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1685 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1686 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1687 		/*
1688 		 * wait a minute, this guy is gone, there is no longer a
1689 		 * receiver. Send peer an ABORT!
1690 		 */
1691 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1692 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1693 		*abort_flag = 1;
1694 		return (0);
1695 	}
1696 	/*
1697 	 * Now before going further we see if there is room. If NOT then we
1698 	 * MAY let one through only IF this TSN is the one we are waiting
1699 	 * for on a partial delivery API.
1700 	 */
1701 
1702 	/* Is the stream valid? */
1703 	if (sid >= asoc->streamincnt) {
1704 		struct sctp_error_invalid_stream *cause;
1705 
1706 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1707 		    0, M_NOWAIT, 1, MT_DATA);
1708 		if (op_err != NULL) {
1709 			/* add some space up front so prepend will work well */
1710 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1711 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1712 			/*
1713 			 * Error causes are just param's and this one has
1714 			 * two back to back phdr, one with the error type
1715 			 * and size, the other with the streamid and a rsvd
1716 			 */
1717 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1718 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1719 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1720 			cause->stream_id = htons(sid);
1721 			cause->reserved = htons(0);
1722 			sctp_queue_op_err(stcb, op_err);
1723 		}
1724 		SCTP_STAT_INCR(sctps_badsid);
1725 		SCTP_TCB_LOCK_ASSERT(stcb);
1726 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1727 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1728 			asoc->highest_tsn_inside_nr_map = tsn;
1729 		}
1730 		if (tsn == (asoc->cumulative_tsn + 1)) {
1731 			/* Update cum-ack */
1732 			asoc->cumulative_tsn = tsn;
1733 		}
1734 		return (0);
1735 	}
1736 	strm = &asoc->strmin[sid];
1737 	/*
1738 	 * If its a fragmented message, lets see if we can find the control
1739 	 * on the reassembly queues.
1740 	 */
1741 	if ((chk_type == SCTP_IDATA) &&
1742 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1743 	    (fsn == 0)) {
1744 		/*
1745 		 * The first *must* be fsn 0, and other (middle/end) pieces
1746 		 * can *not* be fsn 0. XXX: This can happen in case of a
1747 		 * wrap around. Ignore is for now.
1748 		 */
1749 		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1750 		    mid, chk_flags);
1751 		goto err_out;
1752 	}
1753 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
1754 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1755 	    chk_flags, control);
1756 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1757 		/* See if we can find the re-assembly entity */
1758 		if (control != NULL) {
1759 			/* We found something, does it belong? */
1760 			if (ordered && (mid != control->mid)) {
1761 				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1762 		err_out:
1763 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1765 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1766 				*abort_flag = 1;
1767 				return (0);
1768 			}
1769 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1770 				/* We can't have a switched order with an
1771 				 * unordered chunk */
1772 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1773 				    tsn);
1774 				goto err_out;
1775 			}
1776 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1777 				/* We can't have a switched unordered with a
1778 				 * ordered chunk */
1779 				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1780 				    tsn);
1781 				goto err_out;
1782 			}
1783 		}
1784 	} else {
1785 		/*
1786 		 * Its a complete segment. Lets validate we don't have a
1787 		 * re-assembly going on with the same Stream/Seq (for
1788 		 * ordered) or in the same Stream for unordered.
1789 		 */
1790 		if (control != NULL) {
1791 			if (ordered || asoc->idata_supported) {
1792 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1793 				    chk_flags, mid);
1794 				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1795 				goto err_out;
1796 			} else {
1797 				if ((tsn == control->fsn_included + 1) &&
1798 				    (control->end_added == 0)) {
1799 					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1800 					goto err_out;
1801 				} else {
1802 					control = NULL;
1803 				}
1804 			}
1805 		}
1806 	}
1807 	/* now do the tests */
1808 	if (((asoc->cnt_on_all_streams +
1809 	    asoc->cnt_on_reasm_queue +
1810 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1811 	    (((int)asoc->my_rwnd) <= 0)) {
1812 		/*
1813 		 * When we have NO room in the rwnd we check to make sure
1814 		 * the reader is doing its job...
1815 		 */
1816 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1817 			/* some to read, wake-up */
1818 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1819 			struct socket *so;
1820 
1821 			so = SCTP_INP_SO(stcb->sctp_ep);
1822 			atomic_add_int(&stcb->asoc.refcnt, 1);
1823 			SCTP_TCB_UNLOCK(stcb);
1824 			SCTP_SOCKET_LOCK(so, 1);
1825 			SCTP_TCB_LOCK(stcb);
1826 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1827 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1828 				/* assoc was freed while we were unlocked */
1829 				SCTP_SOCKET_UNLOCK(so, 1);
1830 				return (0);
1831 			}
1832 #endif
1833 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1834 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1835 			SCTP_SOCKET_UNLOCK(so, 1);
1836 #endif
1837 		}
1838 		/* now is it in the mapping array of what we have accepted? */
1839 		if (chk_type == SCTP_DATA) {
1840 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1841 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1842 				/* Nope not in the valid range dump it */
1843 		dump_packet:
1844 				sctp_set_rwnd(stcb, asoc);
1845 				if ((asoc->cnt_on_all_streams +
1846 				    asoc->cnt_on_reasm_queue +
1847 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1848 					SCTP_STAT_INCR(sctps_datadropchklmt);
1849 				} else {
1850 					SCTP_STAT_INCR(sctps_datadroprwnd);
1851 				}
1852 				*break_flag = 1;
1853 				return (0);
1854 			}
1855 		} else {
1856 			if (control == NULL) {
1857 				goto dump_packet;
1858 			}
1859 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1860 				goto dump_packet;
1861 			}
1862 		}
1863 	}
1864 #ifdef SCTP_ASOCLOG_OF_TSNS
1865 	SCTP_TCB_LOCK_ASSERT(stcb);
1866 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1867 		asoc->tsn_in_at = 0;
1868 		asoc->tsn_in_wrapped = 1;
1869 	}
1870 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1871 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1872 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1873 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1874 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1875 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1876 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1877 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1878 	asoc->tsn_in_at++;
1879 #endif
1880 	/*
1881 	 * Before we continue lets validate that we are not being fooled by
1882 	 * an evil attacker. We can only have Nk chunks based on our TSN
1883 	 * spread allowed by the mapping array N * 8 bits, so there is no
1884 	 * way our stream sequence numbers could have wrapped. We of course
1885 	 * only validate the FIRST fragment so the bit must be set.
1886 	 */
1887 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1888 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1889 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1890 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1891 		/* The incoming sseq is behind where we last delivered? */
1892 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1893 		    mid, asoc->strmin[sid].last_mid_delivered);
1894 
1895 		if (asoc->idata_supported) {
1896 			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1897 			    asoc->strmin[sid].last_mid_delivered,
1898 			    tsn,
1899 			    sid,
1900 			    mid);
1901 		} else {
1902 			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1903 			    (uint16_t) asoc->strmin[sid].last_mid_delivered,
1904 			    tsn,
1905 			    sid,
1906 			    (uint16_t) mid);
1907 		}
1908 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1909 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1910 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1911 		*abort_flag = 1;
1912 		return (0);
1913 	}
1914 	if (chk_type == SCTP_IDATA) {
1915 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
1916 	} else {
1917 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
1918 	}
1919 	if (last_chunk == 0) {
1920 		if (chk_type == SCTP_IDATA) {
1921 			dmbuf = SCTP_M_COPYM(*m,
1922 			    (offset + sizeof(struct sctp_idata_chunk)),
1923 			    the_len, M_NOWAIT);
1924 		} else {
1925 			dmbuf = SCTP_M_COPYM(*m,
1926 			    (offset + sizeof(struct sctp_data_chunk)),
1927 			    the_len, M_NOWAIT);
1928 		}
1929 #ifdef SCTP_MBUF_LOGGING
1930 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1931 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1932 		}
1933 #endif
1934 	} else {
1935 		/* We can steal the last chunk */
1936 		int l_len;
1937 
1938 		dmbuf = *m;
1939 		/* lop off the top part */
1940 		if (chk_type == SCTP_IDATA) {
1941 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
1942 		} else {
1943 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1944 		}
1945 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1946 			l_len = SCTP_BUF_LEN(dmbuf);
1947 		} else {
1948 			/*
1949 			 * need to count up the size hopefully does not hit
1950 			 * this to often :-0
1951 			 */
1952 			struct mbuf *lat;
1953 
1954 			l_len = 0;
1955 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1956 				l_len += SCTP_BUF_LEN(lat);
1957 			}
1958 		}
1959 		if (l_len > the_len) {
1960 			/* Trim the end round bytes off  too */
1961 			m_adj(dmbuf, -(l_len - the_len));
1962 		}
1963 	}
1964 	if (dmbuf == NULL) {
1965 		SCTP_STAT_INCR(sctps_nomem);
1966 		return (0);
1967 	}
1968 	/*
1969 	 * Now no matter what, we need a control, get one if we don't have
1970 	 * one (we may have gotten it above when we found the message was
1971 	 * fragmented
1972 	 */
1973 	if (control == NULL) {
1974 		sctp_alloc_a_readq(stcb, control);
1975 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1976 		    ppid,
1977 		    sid,
1978 		    chk_flags,
1979 		    NULL, fsn, mid);
1980 		if (control == NULL) {
1981 			SCTP_STAT_INCR(sctps_nomem);
1982 			return (0);
1983 		}
1984 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1985 			control->data = dmbuf;
1986 			control->tail_mbuf = NULL;
1987 			control->end_added = 1;
1988 			control->last_frag_seen = 1;
1989 			control->first_frag_seen = 1;
1990 			control->fsn_included = fsn;
1991 			control->top_fsn = fsn;
1992 		}
1993 		created_control = 1;
1994 	}
1995 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
1996 	    chk_flags, ordered, mid, control);
1997 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1998 	    TAILQ_EMPTY(&asoc->resetHead) &&
1999 	    ((ordered == 0) ||
2000 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2001 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2002 		/* Candidate for express delivery */
2003 		/*
2004 		 * Its not fragmented, No PD-API is up, Nothing in the
2005 		 * delivery queue, Its un-ordered OR ordered and the next to
2006 		 * deliver AND nothing else is stuck on the stream queue,
2007 		 * And there is room for it in the socket buffer. Lets just
2008 		 * stuff it up the buffer....
2009 		 */
2010 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2011 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2012 			asoc->highest_tsn_inside_nr_map = tsn;
2013 		}
2014 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2015 		    control, mid);
2016 
2017 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2018 		    control, &stcb->sctp_socket->so_rcv,
2019 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2020 
2021 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2022 			/* for ordered, bump what we delivered */
2023 			strm->last_mid_delivered++;
2024 		}
2025 		SCTP_STAT_INCR(sctps_recvexpress);
2026 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2027 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2028 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2029 		}
2030 		control = NULL;
2031 		goto finish_express_del;
2032 	}
2033 	/* Now will we need a chunk too? */
2034 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2035 		sctp_alloc_a_chunk(stcb, chk);
2036 		if (chk == NULL) {
2037 			/* No memory so we drop the chunk */
2038 			SCTP_STAT_INCR(sctps_nomem);
2039 			if (last_chunk == 0) {
2040 				/* we copied it, free the copy */
2041 				sctp_m_freem(dmbuf);
2042 			}
2043 			return (0);
2044 		}
2045 		chk->rec.data.tsn = tsn;
2046 		chk->no_fr_allowed = 0;
2047 		chk->rec.data.fsn = fsn;
2048 		chk->rec.data.mid = mid;
2049 		chk->rec.data.sid = sid;
2050 		chk->rec.data.ppid = ppid;
2051 		chk->rec.data.context = stcb->asoc.context;
2052 		chk->rec.data.doing_fast_retransmit = 0;
2053 		chk->rec.data.rcv_flags = chk_flags;
2054 		chk->asoc = asoc;
2055 		chk->send_size = the_len;
2056 		chk->whoTo = net;
2057 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2058 		    chk,
2059 		    control, mid);
2060 		atomic_add_int(&net->ref_count, 1);
2061 		chk->data = dmbuf;
2062 	}
2063 	/* Set the appropriate TSN mark */
2064 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2065 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2066 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2067 			asoc->highest_tsn_inside_nr_map = tsn;
2068 		}
2069 	} else {
2070 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2071 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2072 			asoc->highest_tsn_inside_map = tsn;
2073 		}
2074 	}
2075 	/* Now is it complete (i.e. not fragmented)? */
2076 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2077 		/*
2078 		 * Special check for when streams are resetting. We could be
2079 		 * more smart about this and check the actual stream to see
2080 		 * if it is not being reset.. that way we would not create a
2081 		 * HOLB when amongst streams being reset and those not being
2082 		 * reset.
2083 		 *
2084 		 */
2085 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2086 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2087 			/*
2088 			 * yep its past where we need to reset... go ahead
2089 			 * and queue it.
2090 			 */
2091 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2092 				/* first one on */
2093 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2094 			} else {
2095 				struct sctp_queued_to_read *ctlOn, *nctlOn;
2096 				unsigned char inserted = 0;
2097 
2098 				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2099 					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2100 
2101 						continue;
2102 					} else {
2103 						/* found it */
2104 						TAILQ_INSERT_BEFORE(ctlOn, control, next);
2105 						inserted = 1;
2106 						break;
2107 					}
2108 				}
2109 				if (inserted == 0) {
2110 					/*
2111 					 * must be put at end, use prevP
2112 					 * (all setup from loop) to setup
2113 					 * nextP.
2114 					 */
2115 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2116 				}
2117 			}
2118 			goto finish_express_del;
2119 		}
2120 		if (chk_flags & SCTP_DATA_UNORDERED) {
2121 			/* queue directly into socket buffer */
2122 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2123 			    control, mid);
2124 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2125 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2126 			    control,
2127 			    &stcb->sctp_socket->so_rcv, 1,
2128 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2129 
2130 		} else {
2131 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2132 			    mid);
2133 			sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check);
2134 			if (*abort_flag) {
2135 				if (last_chunk) {
2136 					*m = NULL;
2137 				}
2138 				return (0);
2139 			}
2140 		}
2141 		goto finish_express_del;
2142 	}
2143 	/* If we reach here its a reassembly */
2144 	need_reasm_check = 1;
2145 	SCTPDBG(SCTP_DEBUG_XXX,
2146 	    "Queue data to stream for reasm control: %p MID: %u\n",
2147 	    control, mid);
2148 	sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn);
2149 	if (*abort_flag) {
2150 		/*
2151 		 * the assoc is now gone and chk was put onto the reasm
2152 		 * queue, which has all been freed.
2153 		 */
2154 		if (last_chunk) {
2155 			*m = NULL;
2156 		}
2157 		return (0);
2158 	}
2159 finish_express_del:
2160 	/* Here we tidy up things */
2161 	if (tsn == (asoc->cumulative_tsn + 1)) {
2162 		/* Update cum-ack */
2163 		asoc->cumulative_tsn = tsn;
2164 	}
2165 	if (last_chunk) {
2166 		*m = NULL;
2167 	}
2168 	if (ordered) {
2169 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2170 	} else {
2171 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2172 	}
2173 	SCTP_STAT_INCR(sctps_recvdata);
2174 	/* Set it present please */
2175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2176 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2177 	}
2178 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2179 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2180 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2181 	}
2182 	/* check the special flag for stream resets */
2183 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2184 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2185 		/*
2186 		 * we have finished working through the backlogged TSN's now
2187 		 * time to reset streams. 1: call reset function. 2: free
2188 		 * pending_reply space 3: distribute any chunks in
2189 		 * pending_reply_queue.
2190 		 */
2191 		struct sctp_queued_to_read *ctl, *nctl;
2192 
2193 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2194 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2195 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2196 		SCTP_FREE(liste, SCTP_M_STRESET);
2197 		/* sa_ignore FREED_MEMORY */
2198 		liste = TAILQ_FIRST(&asoc->resetHead);
2199 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2200 			/* All can be removed */
2201 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2202 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2203 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2204 				if (*abort_flag) {
2205 					return (0);
2206 				}
2207 			}
2208 		} else {
2209 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2210 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2211 					break;
2212 				}
2213 				/*
2214 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2215 				 * process it which is the NOT of
2216 				 * ctl->sinfo_tsn > liste->tsn
2217 				 */
2218 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2219 				sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check);
2220 				if (*abort_flag) {
2221 					return (0);
2222 				}
2223 			}
2224 		}
2225 		/*
2226 		 * Now service re-assembly to pick up anything that has been
2227 		 * held on reassembly queue?
2228 		 */
2229 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2230 		need_reasm_check = 0;
2231 	}
2232 	if (need_reasm_check) {
2233 		/* Another one waits ? */
2234 		(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2235 	}
2236 	return (1);
2237 }
2238 
2239 static const int8_t sctp_map_lookup_tab[256] = {
2240 	0, 1, 0, 2, 0, 1, 0, 3,
2241 	0, 1, 0, 2, 0, 1, 0, 4,
2242 	0, 1, 0, 2, 0, 1, 0, 3,
2243 	0, 1, 0, 2, 0, 1, 0, 5,
2244 	0, 1, 0, 2, 0, 1, 0, 3,
2245 	0, 1, 0, 2, 0, 1, 0, 4,
2246 	0, 1, 0, 2, 0, 1, 0, 3,
2247 	0, 1, 0, 2, 0, 1, 0, 6,
2248 	0, 1, 0, 2, 0, 1, 0, 3,
2249 	0, 1, 0, 2, 0, 1, 0, 4,
2250 	0, 1, 0, 2, 0, 1, 0, 3,
2251 	0, 1, 0, 2, 0, 1, 0, 5,
2252 	0, 1, 0, 2, 0, 1, 0, 3,
2253 	0, 1, 0, 2, 0, 1, 0, 4,
2254 	0, 1, 0, 2, 0, 1, 0, 3,
2255 	0, 1, 0, 2, 0, 1, 0, 7,
2256 	0, 1, 0, 2, 0, 1, 0, 3,
2257 	0, 1, 0, 2, 0, 1, 0, 4,
2258 	0, 1, 0, 2, 0, 1, 0, 3,
2259 	0, 1, 0, 2, 0, 1, 0, 5,
2260 	0, 1, 0, 2, 0, 1, 0, 3,
2261 	0, 1, 0, 2, 0, 1, 0, 4,
2262 	0, 1, 0, 2, 0, 1, 0, 3,
2263 	0, 1, 0, 2, 0, 1, 0, 6,
2264 	0, 1, 0, 2, 0, 1, 0, 3,
2265 	0, 1, 0, 2, 0, 1, 0, 4,
2266 	0, 1, 0, 2, 0, 1, 0, 3,
2267 	0, 1, 0, 2, 0, 1, 0, 5,
2268 	0, 1, 0, 2, 0, 1, 0, 3,
2269 	0, 1, 0, 2, 0, 1, 0, 4,
2270 	0, 1, 0, 2, 0, 1, 0, 3,
2271 	0, 1, 0, 2, 0, 1, 0, 8
2272 };
2273 
2274 
2275 void
2276 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2277 {
2278 	/*
2279 	 * Now we also need to check the mapping array in a couple of ways.
2280 	 * 1) Did we move the cum-ack point?
2281 	 *
2282 	 * When you first glance at this you might think that all entries
2283 	 * that make up the position of the cum-ack would be in the
2284 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2285 	 * deliverable. Thats true with one exception, when its a fragmented
2286 	 * message we may not deliver the data until some threshold (or all
2287 	 * of it) is in place. So we must OR the nr_mapping_array and
2288 	 * mapping_array to get a true picture of the cum-ack.
2289 	 */
2290 	struct sctp_association *asoc;
2291 	int at;
2292 	uint8_t val;
2293 	int slide_from, slide_end, lgap, distance;
2294 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2295 
2296 	asoc = &stcb->asoc;
2297 
2298 	old_cumack = asoc->cumulative_tsn;
2299 	old_base = asoc->mapping_array_base_tsn;
2300 	old_highest = asoc->highest_tsn_inside_map;
2301 	/*
2302 	 * We could probably improve this a small bit by calculating the
2303 	 * offset of the current cum-ack as the starting point.
2304 	 */
2305 	at = 0;
2306 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2307 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2308 		if (val == 0xff) {
2309 			at += 8;
2310 		} else {
2311 			/* there is a 0 bit */
2312 			at += sctp_map_lookup_tab[val];
2313 			break;
2314 		}
2315 	}
2316 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2317 
2318 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2319 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2320 #ifdef INVARIANTS
2321 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2322 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2323 #else
2324 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2325 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2326 		sctp_print_mapping_array(asoc);
2327 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2328 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2329 		}
2330 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2331 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2332 #endif
2333 	}
2334 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2335 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2336 	} else {
2337 		highest_tsn = asoc->highest_tsn_inside_map;
2338 	}
2339 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2340 		/* The complete array was completed by a single FR */
2341 		/* highest becomes the cum-ack */
2342 		int clr;
2343 #ifdef INVARIANTS
2344 		unsigned int i;
2345 #endif
2346 
2347 		/* clear the array */
2348 		clr = ((at + 7) >> 3);
2349 		if (clr > asoc->mapping_array_size) {
2350 			clr = asoc->mapping_array_size;
2351 		}
2352 		memset(asoc->mapping_array, 0, clr);
2353 		memset(asoc->nr_mapping_array, 0, clr);
2354 #ifdef INVARIANTS
2355 		for (i = 0; i < asoc->mapping_array_size; i++) {
2356 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2357 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2358 				sctp_print_mapping_array(asoc);
2359 			}
2360 		}
2361 #endif
2362 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2363 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2364 	} else if (at >= 8) {
2365 		/* we can slide the mapping array down */
2366 		/* slide_from holds where we hit the first NON 0xff byte */
2367 
2368 		/*
2369 		 * now calculate the ceiling of the move using our highest
2370 		 * TSN value
2371 		 */
2372 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2373 		slide_end = (lgap >> 3);
2374 		if (slide_end < slide_from) {
2375 			sctp_print_mapping_array(asoc);
2376 #ifdef INVARIANTS
2377 			panic("impossible slide");
2378 #else
2379 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2380 			    lgap, slide_end, slide_from, at);
2381 			return;
2382 #endif
2383 		}
2384 		if (slide_end > asoc->mapping_array_size) {
2385 #ifdef INVARIANTS
2386 			panic("would overrun buffer");
2387 #else
2388 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2389 			    asoc->mapping_array_size, slide_end);
2390 			slide_end = asoc->mapping_array_size;
2391 #endif
2392 		}
2393 		distance = (slide_end - slide_from) + 1;
2394 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2395 			sctp_log_map(old_base, old_cumack, old_highest,
2396 			    SCTP_MAP_PREPARE_SLIDE);
2397 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2398 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2399 		}
2400 		if (distance + slide_from > asoc->mapping_array_size ||
2401 		    distance < 0) {
2402 			/*
2403 			 * Here we do NOT slide forward the array so that
2404 			 * hopefully when more data comes in to fill it up
2405 			 * we will be able to slide it forward. Really I
2406 			 * don't think this should happen :-0
2407 			 */
2408 
2409 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2410 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2411 				    (uint32_t) asoc->mapping_array_size,
2412 				    SCTP_MAP_SLIDE_NONE);
2413 			}
2414 		} else {
2415 			int ii;
2416 
2417 			for (ii = 0; ii < distance; ii++) {
2418 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2419 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2420 
2421 			}
2422 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2423 				asoc->mapping_array[ii] = 0;
2424 				asoc->nr_mapping_array[ii] = 0;
2425 			}
2426 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2427 				asoc->highest_tsn_inside_map += (slide_from << 3);
2428 			}
2429 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2430 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2431 			}
2432 			asoc->mapping_array_base_tsn += (slide_from << 3);
2433 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2434 				sctp_log_map(asoc->mapping_array_base_tsn,
2435 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2436 				    SCTP_MAP_SLIDE_RESULT);
2437 			}
2438 		}
2439 	}
2440 }
2441 
2442 void
2443 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2444 {
2445 	struct sctp_association *asoc;
2446 	uint32_t highest_tsn;
2447 	int is_a_gap;
2448 
2449 	sctp_slide_mapping_arrays(stcb);
2450 	asoc = &stcb->asoc;
2451 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2452 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2453 	} else {
2454 		highest_tsn = asoc->highest_tsn_inside_map;
2455 	}
2456 	/* Is there a gap now? */
2457 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2458 
2459 	/*
2460 	 * Now we need to see if we need to queue a sack or just start the
2461 	 * timer (if allowed).
2462 	 */
2463 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2464 		/*
2465 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2466 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2467 		 * SACK
2468 		 */
2469 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2470 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2471 			    stcb->sctp_ep, stcb, NULL,
2472 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2473 		}
2474 		sctp_send_shutdown(stcb,
2475 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2476 		if (is_a_gap) {
2477 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2478 		}
2479 	} else {
2480 		/*
2481 		 * CMT DAC algorithm: increase number of packets received
2482 		 * since last ack
2483 		 */
2484 		stcb->asoc.cmt_dac_pkts_rcvd++;
2485 
2486 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2487 							 * SACK */
2488 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2489 							 * longer is one */
2490 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2491 		    (is_a_gap) ||	/* is still a gap */
2492 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2493 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ ) {
2494 
2495 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2496 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2497 			    (stcb->asoc.send_sack == 0) &&
2498 			    (stcb->asoc.numduptsns == 0) &&
2499 			    (stcb->asoc.delayed_ack) &&
2500 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2501 
2502 				/*
2503 				 * CMT DAC algorithm: With CMT, delay acks
2504 				 * even in the face of
2505 				 *
2506 				 * reordering. Therefore, if acks that do
2507 				 * not have to be sent because of the above
2508 				 * reasons, will be delayed. That is, acks
2509 				 * that would have been sent due to gap
2510 				 * reports will be delayed with DAC. Start
2511 				 * the delayed ack timer.
2512 				 */
2513 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2514 				    stcb->sctp_ep, stcb, NULL);
2515 			} else {
2516 				/*
2517 				 * Ok we must build a SACK since the timer
2518 				 * is pending, we got our first packet OR
2519 				 * there are gaps or duplicates.
2520 				 */
2521 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2522 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2523 			}
2524 		} else {
2525 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2526 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2527 				    stcb->sctp_ep, stcb, NULL);
2528 			}
2529 		}
2530 	}
2531 }
2532 
2533 int
2534 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2535     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2536     struct sctp_nets *net, uint32_t * high_tsn)
2537 {
2538 	struct sctp_chunkhdr *ch, chunk_buf;
2539 	struct sctp_association *asoc;
2540 	int num_chunks = 0;	/* number of control chunks processed */
2541 	int stop_proc = 0;
2542 	int chk_length, break_flag, last_chunk;
2543 	int abort_flag = 0, was_a_gap;
2544 	struct mbuf *m;
2545 	uint32_t highest_tsn;
2546 
2547 	/* set the rwnd */
2548 	sctp_set_rwnd(stcb, &stcb->asoc);
2549 
2550 	m = *mm;
2551 	SCTP_TCB_LOCK_ASSERT(stcb);
2552 	asoc = &stcb->asoc;
2553 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2554 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2555 	} else {
2556 		highest_tsn = asoc->highest_tsn_inside_map;
2557 	}
2558 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2559 	/*
2560 	 * setup where we got the last DATA packet from for any SACK that
2561 	 * may need to go out. Don't bump the net. This is done ONLY when a
2562 	 * chunk is assigned.
2563 	 */
2564 	asoc->last_data_chunk_from = net;
2565 
2566 	/*-
2567 	 * Now before we proceed we must figure out if this is a wasted
2568 	 * cluster... i.e. it is a small packet sent in and yet the driver
2569 	 * underneath allocated a full cluster for it. If so we must copy it
2570 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2571 	 * with cluster starvation. Note for __Panda__ we don't do this
2572 	 * since it has clusters all the way down to 64 bytes.
2573 	 */
2574 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2575 		/* we only handle mbufs that are singletons.. not chains */
2576 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2577 		if (m) {
2578 			/* ok lets see if we can copy the data up */
2579 			caddr_t *from, *to;
2580 
2581 			/* get the pointers and copy */
2582 			to = mtod(m, caddr_t *);
2583 			from = mtod((*mm), caddr_t *);
2584 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2585 			/* copy the length and free up the old */
2586 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2587 			sctp_m_freem(*mm);
2588 			/* success, back copy */
2589 			*mm = m;
2590 		} else {
2591 			/* We are in trouble in the mbuf world .. yikes */
2592 			m = *mm;
2593 		}
2594 	}
2595 	/* get pointer to the first chunk header */
2596 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2597 	    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2598 	if (ch == NULL) {
2599 		return (1);
2600 	}
2601 	/*
2602 	 * process all DATA chunks...
2603 	 */
2604 	*high_tsn = asoc->cumulative_tsn;
2605 	break_flag = 0;
2606 	asoc->data_pkts_seen++;
2607 	while (stop_proc == 0) {
2608 		/* validate chunk length */
2609 		chk_length = ntohs(ch->chunk_length);
2610 		if (length - *offset < chk_length) {
2611 			/* all done, mutulated chunk */
2612 			stop_proc = 1;
2613 			continue;
2614 		}
2615 		if ((asoc->idata_supported == 1) &&
2616 		    (ch->chunk_type == SCTP_DATA)) {
2617 			struct mbuf *op_err;
2618 			char msg[SCTP_DIAG_INFO_LEN];
2619 
2620 			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2621 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2622 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2623 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2624 			return (2);
2625 		}
2626 		if ((asoc->idata_supported == 0) &&
2627 		    (ch->chunk_type == SCTP_IDATA)) {
2628 			struct mbuf *op_err;
2629 			char msg[SCTP_DIAG_INFO_LEN];
2630 
2631 			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2632 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2633 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2634 			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2635 			return (2);
2636 		}
2637 		if ((ch->chunk_type == SCTP_DATA) ||
2638 		    (ch->chunk_type == SCTP_IDATA)) {
2639 			int clen;
2640 
2641 			if (ch->chunk_type == SCTP_DATA) {
2642 				clen = sizeof(struct sctp_data_chunk);
2643 			} else {
2644 				clen = sizeof(struct sctp_idata_chunk);
2645 			}
2646 			if (chk_length < clen) {
2647 				/*
2648 				 * Need to send an abort since we had a
2649 				 * invalid data chunk.
2650 				 */
2651 				struct mbuf *op_err;
2652 				char msg[SCTP_DIAG_INFO_LEN];
2653 
2654 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2655 				    chk_length);
2656 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2657 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2658 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2659 				return (2);
2660 			}
2661 #ifdef SCTP_AUDITING_ENABLED
2662 			sctp_audit_log(0xB1, 0);
2663 #endif
2664 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2665 				last_chunk = 1;
2666 			} else {
2667 				last_chunk = 0;
2668 			}
2669 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2670 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 			    last_chunk, ch->chunk_type)) {
2672 				num_chunks++;
2673 			}
2674 			if (abort_flag)
2675 				return (2);
2676 
2677 			if (break_flag) {
2678 				/*
2679 				 * Set because of out of rwnd space and no
2680 				 * drop rep space left.
2681 				 */
2682 				stop_proc = 1;
2683 				continue;
2684 			}
2685 		} else {
2686 			/* not a data chunk in the data region */
2687 			switch (ch->chunk_type) {
2688 			case SCTP_INITIATION:
2689 			case SCTP_INITIATION_ACK:
2690 			case SCTP_SELECTIVE_ACK:
2691 			case SCTP_NR_SELECTIVE_ACK:
2692 			case SCTP_HEARTBEAT_REQUEST:
2693 			case SCTP_HEARTBEAT_ACK:
2694 			case SCTP_ABORT_ASSOCIATION:
2695 			case SCTP_SHUTDOWN:
2696 			case SCTP_SHUTDOWN_ACK:
2697 			case SCTP_OPERATION_ERROR:
2698 			case SCTP_COOKIE_ECHO:
2699 			case SCTP_COOKIE_ACK:
2700 			case SCTP_ECN_ECHO:
2701 			case SCTP_ECN_CWR:
2702 			case SCTP_SHUTDOWN_COMPLETE:
2703 			case SCTP_AUTHENTICATION:
2704 			case SCTP_ASCONF_ACK:
2705 			case SCTP_PACKET_DROPPED:
2706 			case SCTP_STREAM_RESET:
2707 			case SCTP_FORWARD_CUM_TSN:
2708 			case SCTP_ASCONF:
2709 				{
2710 					/*
2711 					 * Now, what do we do with KNOWN
2712 					 * chunks that are NOT in the right
2713 					 * place?
2714 					 *
2715 					 * For now, I do nothing but ignore
2716 					 * them. We may later want to add
2717 					 * sysctl stuff to switch out and do
2718 					 * either an ABORT() or possibly
2719 					 * process them.
2720 					 */
2721 					struct mbuf *op_err;
2722 					char msg[SCTP_DIAG_INFO_LEN];
2723 
2724 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2725 					    ch->chunk_type);
2726 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2727 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2728 					return (2);
2729 				}
2730 			default:
2731 				/* unknown chunk type, use bit rules */
2732 				if (ch->chunk_type & 0x40) {
2733 					/* Add a error report to the queue */
2734 					struct mbuf *op_err;
2735 					struct sctp_gen_error_cause *cause;
2736 
2737 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2738 					    0, M_NOWAIT, 1, MT_DATA);
2739 					if (op_err != NULL) {
2740 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2741 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2742 						cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause)));
2743 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2744 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2745 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2746 							sctp_queue_op_err(stcb, op_err);
2747 						} else {
2748 							sctp_m_freem(op_err);
2749 						}
2750 					}
2751 				}
2752 				if ((ch->chunk_type & 0x80) == 0) {
2753 					/* discard the rest of this packet */
2754 					stop_proc = 1;
2755 				}	/* else skip this bad chunk and
2756 				  * continue... */ break;
2757 			}	/* switch of chunk type */
2758 		}
2759 		*offset += SCTP_SIZE32(chk_length);
2760 		if ((*offset >= length) || stop_proc) {
2761 			/* no more data left in the mbuf chain */
2762 			stop_proc = 1;
2763 			continue;
2764 		}
2765 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2766 		    sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
2767 		if (ch == NULL) {
2768 			*offset = length;
2769 			stop_proc = 1;
2770 			continue;
2771 		}
2772 	}
2773 	if (break_flag) {
2774 		/*
2775 		 * we need to report rwnd overrun drops.
2776 		 */
2777 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2778 	}
2779 	if (num_chunks) {
2780 		/*
2781 		 * Did we get data, if so update the time for auto-close and
2782 		 * give peer credit for being alive.
2783 		 */
2784 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2785 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2786 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2787 			    stcb->asoc.overall_error_count,
2788 			    0,
2789 			    SCTP_FROM_SCTP_INDATA,
2790 			    __LINE__);
2791 		}
2792 		stcb->asoc.overall_error_count = 0;
2793 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2794 	}
2795 	/* now service all of the reassm queue if needed */
2796 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2797 		/* Assure that we ack right away */
2798 		stcb->asoc.send_sack = 1;
2799 	}
2800 	/* Start a sack timer or QUEUE a SACK for sending */
2801 	sctp_sack_check(stcb, was_a_gap);
2802 	return (0);
2803 }
2804 
2805 static int
2806 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2807     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2808     int *num_frs,
2809     uint32_t * biggest_newly_acked_tsn,
2810     uint32_t * this_sack_lowest_newack,
2811     int *rto_ok)
2812 {
2813 	struct sctp_tmit_chunk *tp1;
2814 	unsigned int theTSN;
2815 	int j, wake_him = 0, circled = 0;
2816 
2817 	/* Recover the tp1 we last saw */
2818 	tp1 = *p_tp1;
2819 	if (tp1 == NULL) {
2820 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2821 	}
2822 	for (j = frag_strt; j <= frag_end; j++) {
2823 		theTSN = j + last_tsn;
2824 		while (tp1) {
2825 			if (tp1->rec.data.doing_fast_retransmit)
2826 				(*num_frs) += 1;
2827 
2828 			/*-
2829 			 * CMT: CUCv2 algorithm. For each TSN being
2830 			 * processed from the sent queue, track the
2831 			 * next expected pseudo-cumack, or
2832 			 * rtx_pseudo_cumack, if required. Separate
2833 			 * cumack trackers for first transmissions,
2834 			 * and retransmissions.
2835 			 */
2836 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2837 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2838 			    (tp1->snd_count == 1)) {
2839 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2840 				tp1->whoTo->find_pseudo_cumack = 0;
2841 			}
2842 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2843 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2844 			    (tp1->snd_count > 1)) {
2845 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2846 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2847 			}
2848 			if (tp1->rec.data.tsn == theTSN) {
2849 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2850 					/*-
2851 					 * must be held until
2852 					 * cum-ack passes
2853 					 */
2854 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2855 						/*-
2856 						 * If it is less than RESEND, it is
2857 						 * now no-longer in flight.
2858 						 * Higher values may already be set
2859 						 * via previous Gap Ack Blocks...
2860 						 * i.e. ACKED or RESEND.
2861 						 */
2862 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2863 						    *biggest_newly_acked_tsn)) {
2864 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2865 						}
2866 						/*-
2867 						 * CMT: SFR algo (and HTNA) - set
2868 						 * saw_newack to 1 for dest being
2869 						 * newly acked. update
2870 						 * this_sack_highest_newack if
2871 						 * appropriate.
2872 						 */
2873 						if (tp1->rec.data.chunk_was_revoked == 0)
2874 							tp1->whoTo->saw_newack = 1;
2875 
2876 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2877 						    tp1->whoTo->this_sack_highest_newack)) {
2878 							tp1->whoTo->this_sack_highest_newack =
2879 							    tp1->rec.data.tsn;
2880 						}
2881 						/*-
2882 						 * CMT DAC algo: also update
2883 						 * this_sack_lowest_newack
2884 						 */
2885 						if (*this_sack_lowest_newack == 0) {
2886 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2887 								sctp_log_sack(*this_sack_lowest_newack,
2888 								    last_tsn,
2889 								    tp1->rec.data.tsn,
2890 								    0,
2891 								    0,
2892 								    SCTP_LOG_TSN_ACKED);
2893 							}
2894 							*this_sack_lowest_newack = tp1->rec.data.tsn;
2895 						}
2896 						/*-
2897 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2898 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2899 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2900 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2901 						 * Separate pseudo_cumack trackers for first transmissions and
2902 						 * retransmissions.
2903 						 */
2904 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
2905 							if (tp1->rec.data.chunk_was_revoked == 0) {
2906 								tp1->whoTo->new_pseudo_cumack = 1;
2907 							}
2908 							tp1->whoTo->find_pseudo_cumack = 1;
2909 						}
2910 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2911 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
2912 						}
2913 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
2914 							if (tp1->rec.data.chunk_was_revoked == 0) {
2915 								tp1->whoTo->new_pseudo_cumack = 1;
2916 							}
2917 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2918 						}
2919 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2920 							sctp_log_sack(*biggest_newly_acked_tsn,
2921 							    last_tsn,
2922 							    tp1->rec.data.tsn,
2923 							    frag_strt,
2924 							    frag_end,
2925 							    SCTP_LOG_TSN_ACKED);
2926 						}
2927 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2928 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2929 							    tp1->whoTo->flight_size,
2930 							    tp1->book_size,
2931 							    (uint32_t) (uintptr_t) tp1->whoTo,
2932 							    tp1->rec.data.tsn);
2933 						}
2934 						sctp_flight_size_decrease(tp1);
2935 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2936 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2937 							    tp1);
2938 						}
2939 						sctp_total_flight_decrease(stcb, tp1);
2940 
2941 						tp1->whoTo->net_ack += tp1->send_size;
2942 						if (tp1->snd_count < 2) {
2943 							/*-
2944 							 * True non-retransmited chunk
2945 							 */
2946 							tp1->whoTo->net_ack2 += tp1->send_size;
2947 
2948 							/*-
2949 							 * update RTO too ?
2950 							 */
2951 							if (tp1->do_rtt) {
2952 								if (*rto_ok) {
2953 									tp1->whoTo->RTO =
2954 									    sctp_calculate_rto(stcb,
2955 									    &stcb->asoc,
2956 									    tp1->whoTo,
2957 									    &tp1->sent_rcv_time,
2958 									    sctp_align_safe_nocopy,
2959 									    SCTP_RTT_FROM_DATA);
2960 									*rto_ok = 0;
2961 								}
2962 								if (tp1->whoTo->rto_needed == 0) {
2963 									tp1->whoTo->rto_needed = 1;
2964 								}
2965 								tp1->do_rtt = 0;
2966 							}
2967 						}
2968 					}
2969 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2970 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2971 						    stcb->asoc.this_sack_highest_gap)) {
2972 							stcb->asoc.this_sack_highest_gap =
2973 							    tp1->rec.data.tsn;
2974 						}
2975 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2976 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2977 #ifdef SCTP_AUDITING_ENABLED
2978 							sctp_audit_log(0xB2,
2979 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2980 #endif
2981 						}
2982 					}
2983 					/*-
2984 					 * All chunks NOT UNSENT fall through here and are marked
2985 					 * (leave PR-SCTP ones that are to skip alone though)
2986 					 */
2987 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2988 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2989 						tp1->sent = SCTP_DATAGRAM_MARKED;
2990 					}
2991 					if (tp1->rec.data.chunk_was_revoked) {
2992 						/* deflate the cwnd */
2993 						tp1->whoTo->cwnd -= tp1->book_size;
2994 						tp1->rec.data.chunk_was_revoked = 0;
2995 					}
2996 					/* NR Sack code here */
2997 					if (nr_sacking &&
2998 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2999 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3000 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3001 #ifdef INVARIANTS
3002 						} else {
3003 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3004 #endif
3005 						}
3006 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3007 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3008 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3009 							stcb->asoc.trigger_reset = 1;
3010 						}
3011 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3012 						if (tp1->data) {
3013 							/* sa_ignore
3014 							 * NO_NULL_CHK */
3015 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3016 							sctp_m_freem(tp1->data);
3017 							tp1->data = NULL;
3018 						}
3019 						wake_him++;
3020 					}
3021 				}
3022 				break;
3023 			} /* if (tp1->tsn == theTSN) */ if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3024 				break;
3025 			}
3026 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3027 			if ((tp1 == NULL) && (circled == 0)) {
3028 				circled++;
3029 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3030 			}
3031 		}		/* end while (tp1) */
3032 		if (tp1 == NULL) {
3033 			circled = 0;
3034 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3035 		}
3036 		/* In case the fragments were not in order we must reset */
3037 	}			/* end for (j = fragStart */
3038 	*p_tp1 = tp1;
3039 	return (wake_him);	/* Return value only used for nr-sack */
3040 }
3041 
3042 
3043 static int
3044 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3045     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3046     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3047     int num_seg, int num_nr_seg, int *rto_ok)
3048 {
3049 	struct sctp_gap_ack_block *frag, block;
3050 	struct sctp_tmit_chunk *tp1;
3051 	int i;
3052 	int num_frs = 0;
3053 	int chunk_freed;
3054 	int non_revocable;
3055 	uint16_t frag_strt, frag_end, prev_frag_end;
3056 
3057 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3058 	prev_frag_end = 0;
3059 	chunk_freed = 0;
3060 
3061 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3062 		if (i == num_seg) {
3063 			prev_frag_end = 0;
3064 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3065 		}
3066 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3067 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3068 		*offset += sizeof(block);
3069 		if (frag == NULL) {
3070 			return (chunk_freed);
3071 		}
3072 		frag_strt = ntohs(frag->start);
3073 		frag_end = ntohs(frag->end);
3074 
3075 		if (frag_strt > frag_end) {
3076 			/* This gap report is malformed, skip it. */
3077 			continue;
3078 		}
3079 		if (frag_strt <= prev_frag_end) {
3080 			/* This gap report is not in order, so restart. */
3081 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3082 		}
3083 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3084 			*biggest_tsn_acked = last_tsn + frag_end;
3085 		}
3086 		if (i < num_seg) {
3087 			non_revocable = 0;
3088 		} else {
3089 			non_revocable = 1;
3090 		}
3091 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3092 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3093 		    this_sack_lowest_newack, rto_ok)) {
3094 			chunk_freed = 1;
3095 		}
3096 		prev_frag_end = frag_end;
3097 	}
3098 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3099 		if (num_frs)
3100 			sctp_log_fr(*biggest_tsn_acked,
3101 			    *biggest_newly_acked_tsn,
3102 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3103 	}
3104 	return (chunk_freed);
3105 }
3106 
3107 static void
3108 sctp_check_for_revoked(struct sctp_tcb *stcb,
3109     struct sctp_association *asoc, uint32_t cumack,
3110     uint32_t biggest_tsn_acked)
3111 {
3112 	struct sctp_tmit_chunk *tp1;
3113 
3114 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3115 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3116 			/*
3117 			 * ok this guy is either ACK or MARKED. If it is
3118 			 * ACKED it has been previously acked but not this
3119 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3120 			 * again.
3121 			 */
3122 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3123 				break;
3124 			}
3125 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3126 				/* it has been revoked */
3127 				tp1->sent = SCTP_DATAGRAM_SENT;
3128 				tp1->rec.data.chunk_was_revoked = 1;
3129 				/*
3130 				 * We must add this stuff back in to assure
3131 				 * timers and such get started.
3132 				 */
3133 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3134 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3135 					    tp1->whoTo->flight_size,
3136 					    tp1->book_size,
3137 					    (uint32_t) (uintptr_t) tp1->whoTo,
3138 					    tp1->rec.data.tsn);
3139 				}
3140 				sctp_flight_size_increase(tp1);
3141 				sctp_total_flight_increase(stcb, tp1);
3142 				/*
3143 				 * We inflate the cwnd to compensate for our
3144 				 * artificial inflation of the flight_size.
3145 				 */
3146 				tp1->whoTo->cwnd += tp1->book_size;
3147 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3148 					sctp_log_sack(asoc->last_acked_seq,
3149 					    cumack,
3150 					    tp1->rec.data.tsn,
3151 					    0,
3152 					    0,
3153 					    SCTP_LOG_TSN_REVOKED);
3154 				}
3155 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3156 				/* it has been re-acked in this SACK */
3157 				tp1->sent = SCTP_DATAGRAM_ACKED;
3158 			}
3159 		}
3160 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3161 			break;
3162 	}
3163 }
3164 
3165 
3166 static void
3167 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3168     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3169 {
3170 	struct sctp_tmit_chunk *tp1;
3171 	int strike_flag = 0;
3172 	struct timeval now;
3173 	int tot_retrans = 0;
3174 	uint32_t sending_seq;
3175 	struct sctp_nets *net;
3176 	int num_dests_sacked = 0;
3177 
3178 	/*
3179 	 * select the sending_seq, this is either the next thing ready to be
3180 	 * sent but not transmitted, OR, the next seq we assign.
3181 	 */
3182 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3183 	if (tp1 == NULL) {
3184 		sending_seq = asoc->sending_seq;
3185 	} else {
3186 		sending_seq = tp1->rec.data.tsn;
3187 	}
3188 
3189 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3190 	if ((asoc->sctp_cmt_on_off > 0) &&
3191 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3192 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3193 			if (net->saw_newack)
3194 				num_dests_sacked++;
3195 		}
3196 	}
3197 	if (stcb->asoc.prsctp_supported) {
3198 		(void)SCTP_GETTIME_TIMEVAL(&now);
3199 	}
3200 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3201 		strike_flag = 0;
3202 		if (tp1->no_fr_allowed) {
3203 			/* this one had a timeout or something */
3204 			continue;
3205 		}
3206 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3207 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3208 				sctp_log_fr(biggest_tsn_newly_acked,
3209 				    tp1->rec.data.tsn,
3210 				    tp1->sent,
3211 				    SCTP_FR_LOG_CHECK_STRIKE);
3212 		}
3213 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3214 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3215 			/* done */
3216 			break;
3217 		}
3218 		if (stcb->asoc.prsctp_supported) {
3219 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3220 				/* Is it expired? */
3221 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3222 					/* Yes so drop it */
3223 					if (tp1->data != NULL) {
3224 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3225 						    SCTP_SO_NOT_LOCKED);
3226 					}
3227 					continue;
3228 				}
3229 			}
3230 		}
3231 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
3232 			/* we are beyond the tsn in the sack  */
3233 			break;
3234 		}
3235 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3236 			/* either a RESEND, ACKED, or MARKED */
3237 			/* skip */
3238 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3239 				/* Continue strikin FWD-TSN chunks */
3240 				tp1->rec.data.fwd_tsn_cnt++;
3241 			}
3242 			continue;
3243 		}
3244 		/*
3245 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3246 		 */
3247 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3248 			/*
3249 			 * No new acks were receieved for data sent to this
3250 			 * dest. Therefore, according to the SFR algo for
3251 			 * CMT, no data sent to this dest can be marked for
3252 			 * FR using this SACK.
3253 			 */
3254 			continue;
3255 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
3256 		    tp1->whoTo->this_sack_highest_newack)) {
3257 			/*
3258 			 * CMT: New acks were receieved for data sent to
3259 			 * this dest. But no new acks were seen for data
3260 			 * sent after tp1. Therefore, according to the SFR
3261 			 * algo for CMT, tp1 cannot be marked for FR using
3262 			 * this SACK. This step covers part of the DAC algo
3263 			 * and the HTNA algo as well.
3264 			 */
3265 			continue;
3266 		}
3267 		/*
3268 		 * Here we check to see if we were have already done a FR
3269 		 * and if so we see if the biggest TSN we saw in the sack is
3270 		 * smaller than the recovery point. If so we don't strike
3271 		 * the tsn... otherwise we CAN strike the TSN.
3272 		 */
3273 		/*
3274 		 * @@@ JRI: Check for CMT if (accum_moved &&
3275 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3276 		 * 0)) {
3277 		 */
3278 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3279 			/*
3280 			 * Strike the TSN if in fast-recovery and cum-ack
3281 			 * moved.
3282 			 */
3283 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 				sctp_log_fr(biggest_tsn_newly_acked,
3285 				    tp1->rec.data.tsn,
3286 				    tp1->sent,
3287 				    SCTP_FR_LOG_STRIKE_CHUNK);
3288 			}
3289 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3290 				tp1->sent++;
3291 			}
3292 			if ((asoc->sctp_cmt_on_off > 0) &&
3293 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3294 				/*
3295 				 * CMT DAC algorithm: If SACK flag is set to
3296 				 * 0, then lowest_newack test will not pass
3297 				 * because it would have been set to the
3298 				 * cumack earlier. If not already to be
3299 				 * rtx'd, If not a mixed sack and if tp1 is
3300 				 * not between two sacked TSNs, then mark by
3301 				 * one more. NOTE that we are marking by one
3302 				 * additional time since the SACK DAC flag
3303 				 * indicates that two packets have been
3304 				 * received after this missing TSN.
3305 				 */
3306 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3307 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3308 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3309 						sctp_log_fr(16 + num_dests_sacked,
3310 						    tp1->rec.data.tsn,
3311 						    tp1->sent,
3312 						    SCTP_FR_LOG_STRIKE_CHUNK);
3313 					}
3314 					tp1->sent++;
3315 				}
3316 			}
3317 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3318 		    (asoc->sctp_cmt_on_off == 0)) {
3319 			/*
3320 			 * For those that have done a FR we must take
3321 			 * special consideration if we strike. I.e the
3322 			 * biggest_newly_acked must be higher than the
3323 			 * sending_seq at the time we did the FR.
3324 			 */
3325 			if (
3326 #ifdef SCTP_FR_TO_ALTERNATE
3327 			/*
3328 			 * If FR's go to new networks, then we must only do
3329 			 * this for singly homed asoc's. However if the FR's
3330 			 * go to the same network (Armando's work) then its
3331 			 * ok to FR multiple times.
3332 			 */
3333 			    (asoc->numnets < 2)
3334 #else
3335 			    (1)
3336 #endif
3337 			    ) {
3338 
3339 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3340 				    tp1->rec.data.fast_retran_tsn)) {
3341 					/*
3342 					 * Strike the TSN, since this ack is
3343 					 * beyond where things were when we
3344 					 * did a FR.
3345 					 */
3346 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 						sctp_log_fr(biggest_tsn_newly_acked,
3348 						    tp1->rec.data.tsn,
3349 						    tp1->sent,
3350 						    SCTP_FR_LOG_STRIKE_CHUNK);
3351 					}
3352 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3353 						tp1->sent++;
3354 					}
3355 					strike_flag = 1;
3356 					if ((asoc->sctp_cmt_on_off > 0) &&
3357 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3358 						/*
3359 						 * CMT DAC algorithm: If
3360 						 * SACK flag is set to 0,
3361 						 * then lowest_newack test
3362 						 * will not pass because it
3363 						 * would have been set to
3364 						 * the cumack earlier. If
3365 						 * not already to be rtx'd,
3366 						 * If not a mixed sack and
3367 						 * if tp1 is not between two
3368 						 * sacked TSNs, then mark by
3369 						 * one more. NOTE that we
3370 						 * are marking by one
3371 						 * additional time since the
3372 						 * SACK DAC flag indicates
3373 						 * that two packets have
3374 						 * been received after this
3375 						 * missing TSN.
3376 						 */
3377 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3378 						    (num_dests_sacked == 1) &&
3379 						    SCTP_TSN_GT(this_sack_lowest_newack,
3380 						    tp1->rec.data.tsn)) {
3381 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3382 								sctp_log_fr(32 + num_dests_sacked,
3383 								    tp1->rec.data.tsn,
3384 								    tp1->sent,
3385 								    SCTP_FR_LOG_STRIKE_CHUNK);
3386 							}
3387 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3388 								tp1->sent++;
3389 							}
3390 						}
3391 					}
3392 				}
3393 			}
3394 			/*
3395 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3396 			 * algo covers HTNA.
3397 			 */
3398 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3399 		    biggest_tsn_newly_acked)) {
3400 			/*
3401 			 * We don't strike these: This is the  HTNA
3402 			 * algorithm i.e. we don't strike If our TSN is
3403 			 * larger than the Highest TSN Newly Acked.
3404 			 */
3405 			;
3406 		} else {
3407 			/* Strike the TSN */
3408 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3409 				sctp_log_fr(biggest_tsn_newly_acked,
3410 				    tp1->rec.data.tsn,
3411 				    tp1->sent,
3412 				    SCTP_FR_LOG_STRIKE_CHUNK);
3413 			}
3414 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3415 				tp1->sent++;
3416 			}
3417 			if ((asoc->sctp_cmt_on_off > 0) &&
3418 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3419 				/*
3420 				 * CMT DAC algorithm: If SACK flag is set to
3421 				 * 0, then lowest_newack test will not pass
3422 				 * because it would have been set to the
3423 				 * cumack earlier. If not already to be
3424 				 * rtx'd, If not a mixed sack and if tp1 is
3425 				 * not between two sacked TSNs, then mark by
3426 				 * one more. NOTE that we are marking by one
3427 				 * additional time since the SACK DAC flag
3428 				 * indicates that two packets have been
3429 				 * received after this missing TSN.
3430 				 */
3431 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3432 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3433 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3434 						sctp_log_fr(48 + num_dests_sacked,
3435 						    tp1->rec.data.tsn,
3436 						    tp1->sent,
3437 						    SCTP_FR_LOG_STRIKE_CHUNK);
3438 					}
3439 					tp1->sent++;
3440 				}
3441 			}
3442 		}
3443 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3444 			struct sctp_nets *alt;
3445 
3446 			/* fix counts and things */
3447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3448 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3449 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3450 				    tp1->book_size,
3451 				    (uint32_t) (uintptr_t) tp1->whoTo,
3452 				    tp1->rec.data.tsn);
3453 			}
3454 			if (tp1->whoTo) {
3455 				tp1->whoTo->net_ack++;
3456 				sctp_flight_size_decrease(tp1);
3457 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3458 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3459 					    tp1);
3460 				}
3461 			}
3462 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3463 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3464 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3465 			}
3466 			/* add back to the rwnd */
3467 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3468 
3469 			/* remove from the total flight */
3470 			sctp_total_flight_decrease(stcb, tp1);
3471 
3472 			if ((stcb->asoc.prsctp_supported) &&
3473 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3474 				/* Has it been retransmitted tv_sec times? -
3475 				 * we store the retran count there. */
3476 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3477 					/* Yes, so drop it */
3478 					if (tp1->data != NULL) {
3479 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3480 						    SCTP_SO_NOT_LOCKED);
3481 					}
3482 					/* Make sure to flag we had a FR */
3483 					tp1->whoTo->net_ack++;
3484 					continue;
3485 				}
3486 			}
3487 			/* SCTP_PRINTF("OK, we are now ready to FR this
3488 			 * guy\n"); */
3489 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3490 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3491 				    0, SCTP_FR_MARKED);
3492 			}
3493 			if (strike_flag) {
3494 				/* This is a subsequent FR */
3495 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3496 			}
3497 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3498 			if (asoc->sctp_cmt_on_off > 0) {
3499 				/*
3500 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3501 				 * If CMT is being used, then pick dest with
3502 				 * largest ssthresh for any retransmission.
3503 				 */
3504 				tp1->no_fr_allowed = 1;
3505 				alt = tp1->whoTo;
3506 				/* sa_ignore NO_NULL_CHK */
3507 				if (asoc->sctp_cmt_pf > 0) {
3508 					/* JRS 5/18/07 - If CMT PF is on,
3509 					 * use the PF version of
3510 					 * find_alt_net() */
3511 					alt = sctp_find_alternate_net(stcb, alt, 2);
3512 				} else {
3513 					/* JRS 5/18/07 - If only CMT is on,
3514 					 * use the CMT version of
3515 					 * find_alt_net() */
3516 					/* sa_ignore NO_NULL_CHK */
3517 					alt = sctp_find_alternate_net(stcb, alt, 1);
3518 				}
3519 				if (alt == NULL) {
3520 					alt = tp1->whoTo;
3521 				}
3522 				/*
3523 				 * CUCv2: If a different dest is picked for
3524 				 * the retransmission, then new
3525 				 * (rtx-)pseudo_cumack needs to be tracked
3526 				 * for orig dest. Let CUCv2 track new (rtx-)
3527 				 * pseudo-cumack always.
3528 				 */
3529 				if (tp1->whoTo) {
3530 					tp1->whoTo->find_pseudo_cumack = 1;
3531 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3532 				}
3533 			} else {/* CMT is OFF */
3534 
3535 #ifdef SCTP_FR_TO_ALTERNATE
3536 				/* Can we find an alternate? */
3537 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3538 #else
3539 				/*
3540 				 * default behavior is to NOT retransmit
3541 				 * FR's to an alternate. Armando Caro's
3542 				 * paper details why.
3543 				 */
3544 				alt = tp1->whoTo;
3545 #endif
3546 			}
3547 
3548 			tp1->rec.data.doing_fast_retransmit = 1;
3549 			tot_retrans++;
3550 			/* mark the sending seq for possible subsequent FR's */
3551 			/*
3552 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3553 			 * (uint32_t)tpi->rec.data.tsn);
3554 			 */
3555 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3556 				/*
3557 				 * If the queue of send is empty then its
3558 				 * the next sequence number that will be
3559 				 * assigned so we subtract one from this to
3560 				 * get the one we last sent.
3561 				 */
3562 				tp1->rec.data.fast_retran_tsn = sending_seq;
3563 			} else {
3564 				/*
3565 				 * If there are chunks on the send queue
3566 				 * (unsent data that has made it from the
3567 				 * stream queues but not out the door, we
3568 				 * take the first one (which will have the
3569 				 * lowest TSN) and subtract one to get the
3570 				 * one we last sent.
3571 				 */
3572 				struct sctp_tmit_chunk *ttt;
3573 
3574 				ttt = TAILQ_FIRST(&asoc->send_queue);
3575 				tp1->rec.data.fast_retran_tsn =
3576 				    ttt->rec.data.tsn;
3577 			}
3578 
3579 			if (tp1->do_rtt) {
3580 				/*
3581 				 * this guy had a RTO calculation pending on
3582 				 * it, cancel it
3583 				 */
3584 				if ((tp1->whoTo != NULL) &&
3585 				    (tp1->whoTo->rto_needed == 0)) {
3586 					tp1->whoTo->rto_needed = 1;
3587 				}
3588 				tp1->do_rtt = 0;
3589 			}
3590 			if (alt != tp1->whoTo) {
3591 				/* yes, there is an alternate. */
3592 				sctp_free_remote_addr(tp1->whoTo);
3593 				/* sa_ignore FREED_MEMORY */
3594 				tp1->whoTo = alt;
3595 				atomic_add_int(&alt->ref_count, 1);
3596 			}
3597 		}
3598 	}
3599 }
3600 
3601 struct sctp_tmit_chunk *
3602 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3603     struct sctp_association *asoc)
3604 {
3605 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3606 	struct timeval now;
3607 	int now_filled = 0;
3608 
3609 	if (asoc->prsctp_supported == 0) {
3610 		return (NULL);
3611 	}
3612 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3613 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3614 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3615 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3616 			/* no chance to advance, out of here */
3617 			break;
3618 		}
3619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3620 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3621 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3622 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3623 				    asoc->advanced_peer_ack_point,
3624 				    tp1->rec.data.tsn, 0, 0);
3625 			}
3626 		}
3627 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3628 			/*
3629 			 * We can't fwd-tsn past any that are reliable aka
3630 			 * retransmitted until the asoc fails.
3631 			 */
3632 			break;
3633 		}
3634 		if (!now_filled) {
3635 			(void)SCTP_GETTIME_TIMEVAL(&now);
3636 			now_filled = 1;
3637 		}
3638 		/*
3639 		 * now we got a chunk which is marked for another
3640 		 * retransmission to a PR-stream but has run out its chances
3641 		 * already maybe OR has been marked to skip now. Can we skip
3642 		 * it if its a resend?
3643 		 */
3644 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3645 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3646 			/*
3647 			 * Now is this one marked for resend and its time is
3648 			 * now up?
3649 			 */
3650 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3651 				/* Yes so drop it */
3652 				if (tp1->data) {
3653 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3654 					    1, SCTP_SO_NOT_LOCKED);
3655 				}
3656 			} else {
3657 				/*
3658 				 * No, we are done when hit one for resend
3659 				 * whos time as not expired.
3660 				 */
3661 				break;
3662 			}
3663 		}
3664 		/*
3665 		 * Ok now if this chunk is marked to drop it we can clean up
3666 		 * the chunk, advance our peer ack point and we can check
3667 		 * the next chunk.
3668 		 */
3669 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3670 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3671 			/* advance PeerAckPoint goes forward */
3672 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3673 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3674 				a_adv = tp1;
3675 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3676 				/* No update but we do save the chk */
3677 				a_adv = tp1;
3678 			}
3679 		} else {
3680 			/*
3681 			 * If it is still in RESEND we can advance no
3682 			 * further
3683 			 */
3684 			break;
3685 		}
3686 	}
3687 	return (a_adv);
3688 }
3689 
3690 static int
3691 sctp_fs_audit(struct sctp_association *asoc)
3692 {
3693 	struct sctp_tmit_chunk *chk;
3694 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3695 	int ret;
3696 #ifndef INVARIANTS
3697 	int entry_flight, entry_cnt;
3698 #endif
3699 
3700 	ret = 0;
3701 #ifndef INVARIANTS
3702 	entry_flight = asoc->total_flight;
3703 	entry_cnt = asoc->total_flight_count;
3704 #endif
3705 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3706 		return (0);
3707 
3708 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3709 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3710 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3711 			    chk->rec.data.tsn,
3712 			    chk->send_size,
3713 			    chk->snd_count);
3714 			inflight++;
3715 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3716 			resend++;
3717 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3718 			inbetween++;
3719 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3720 			above++;
3721 		} else {
3722 			acked++;
3723 		}
3724 	}
3725 
3726 	if ((inflight > 0) || (inbetween > 0)) {
3727 #ifdef INVARIANTS
3728 		panic("Flight size-express incorrect? \n");
3729 #else
3730 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3731 		    entry_flight, entry_cnt);
3732 
3733 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3734 		    inflight, inbetween, resend, above, acked);
3735 		ret = 1;
3736 #endif
3737 	}
3738 	return (ret);
3739 }
3740 
3741 
3742 static void
3743 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3744     struct sctp_association *asoc,
3745     struct sctp_tmit_chunk *tp1)
3746 {
3747 	tp1->window_probe = 0;
3748 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3749 		/* TSN's skipped we do NOT move back. */
3750 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3751 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3752 		    tp1->book_size,
3753 		    (uint32_t) (uintptr_t) tp1->whoTo,
3754 		    tp1->rec.data.tsn);
3755 		return;
3756 	}
3757 	/* First setup this by shrinking flight */
3758 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3759 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3760 		    tp1);
3761 	}
3762 	sctp_flight_size_decrease(tp1);
3763 	sctp_total_flight_decrease(stcb, tp1);
3764 	/* Now mark for resend */
3765 	tp1->sent = SCTP_DATAGRAM_RESEND;
3766 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3767 
3768 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3769 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3770 		    tp1->whoTo->flight_size,
3771 		    tp1->book_size,
3772 		    (uint32_t) (uintptr_t) tp1->whoTo,
3773 		    tp1->rec.data.tsn);
3774 	}
3775 }
3776 
3777 void
3778 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3779     uint32_t rwnd, int *abort_now, int ecne_seen)
3780 {
3781 	struct sctp_nets *net;
3782 	struct sctp_association *asoc;
3783 	struct sctp_tmit_chunk *tp1, *tp2;
3784 	uint32_t old_rwnd;
3785 	int win_probe_recovery = 0;
3786 	int win_probe_recovered = 0;
3787 	int j, done_once = 0;
3788 	int rto_ok = 1;
3789 	uint32_t send_s;
3790 
3791 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3792 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3793 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3794 	}
3795 	SCTP_TCB_LOCK_ASSERT(stcb);
3796 #ifdef SCTP_ASOCLOG_OF_TSNS
3797 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3798 	stcb->asoc.cumack_log_at++;
3799 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3800 		stcb->asoc.cumack_log_at = 0;
3801 	}
3802 #endif
3803 	asoc = &stcb->asoc;
3804 	old_rwnd = asoc->peers_rwnd;
3805 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3806 		/* old ack */
3807 		return;
3808 	} else if (asoc->last_acked_seq == cumack) {
3809 		/* Window update sack */
3810 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3811 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3812 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3813 			/* SWS sender side engages */
3814 			asoc->peers_rwnd = 0;
3815 		}
3816 		if (asoc->peers_rwnd > old_rwnd) {
3817 			goto again;
3818 		}
3819 		return;
3820 	}
3821 	/* First setup for CC stuff */
3822 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3823 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3824 			/* Drag along the window_tsn for cwr's */
3825 			net->cwr_window_tsn = cumack;
3826 		}
3827 		net->prev_cwnd = net->cwnd;
3828 		net->net_ack = 0;
3829 		net->net_ack2 = 0;
3830 
3831 		/*
3832 		 * CMT: Reset CUC and Fast recovery algo variables before
3833 		 * SACK processing
3834 		 */
3835 		net->new_pseudo_cumack = 0;
3836 		net->will_exit_fast_recovery = 0;
3837 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3838 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3839 		}
3840 	}
3841 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3842 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3843 		    sctpchunk_listhead);
3844 		send_s = tp1->rec.data.tsn + 1;
3845 	} else {
3846 		send_s = asoc->sending_seq;
3847 	}
3848 	if (SCTP_TSN_GE(cumack, send_s)) {
3849 		struct mbuf *op_err;
3850 		char msg[SCTP_DIAG_INFO_LEN];
3851 
3852 		*abort_now = 1;
3853 		/* XXX */
3854 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3855 		    cumack, send_s);
3856 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3857 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
3858 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3859 		return;
3860 	}
3861 	asoc->this_sack_highest_gap = cumack;
3862 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3863 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3864 		    stcb->asoc.overall_error_count,
3865 		    0,
3866 		    SCTP_FROM_SCTP_INDATA,
3867 		    __LINE__);
3868 	}
3869 	stcb->asoc.overall_error_count = 0;
3870 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3871 		/* process the new consecutive TSN first */
3872 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3873 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
3874 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3875 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3876 				}
3877 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3878 					/*
3879 					 * If it is less than ACKED, it is
3880 					 * now no-longer in flight. Higher
3881 					 * values may occur during marking
3882 					 */
3883 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3884 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3885 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3886 							    tp1->whoTo->flight_size,
3887 							    tp1->book_size,
3888 							    (uint32_t) (uintptr_t) tp1->whoTo,
3889 							    tp1->rec.data.tsn);
3890 						}
3891 						sctp_flight_size_decrease(tp1);
3892 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3893 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3894 							    tp1);
3895 						}
3896 						/* sa_ignore NO_NULL_CHK */
3897 						sctp_total_flight_decrease(stcb, tp1);
3898 					}
3899 					tp1->whoTo->net_ack += tp1->send_size;
3900 					if (tp1->snd_count < 2) {
3901 						/*
3902 						 * True non-retransmited
3903 						 * chunk
3904 						 */
3905 						tp1->whoTo->net_ack2 +=
3906 						    tp1->send_size;
3907 
3908 						/* update RTO too? */
3909 						if (tp1->do_rtt) {
3910 							if (rto_ok) {
3911 								tp1->whoTo->RTO =
3912 								/*
3913 								 * sa_ignore
3914 								 * NO_NULL_CHK
3915 								 */
3916 								    sctp_calculate_rto(stcb,
3917 								    asoc, tp1->whoTo,
3918 								    &tp1->sent_rcv_time,
3919 								    sctp_align_safe_nocopy,
3920 								    SCTP_RTT_FROM_DATA);
3921 								rto_ok = 0;
3922 							}
3923 							if (tp1->whoTo->rto_needed == 0) {
3924 								tp1->whoTo->rto_needed = 1;
3925 							}
3926 							tp1->do_rtt = 0;
3927 						}
3928 					}
3929 					/*
3930 					 * CMT: CUCv2 algorithm. From the
3931 					 * cumack'd TSNs, for each TSN being
3932 					 * acked for the first time, set the
3933 					 * following variables for the
3934 					 * corresp destination.
3935 					 * new_pseudo_cumack will trigger a
3936 					 * cwnd update.
3937 					 * find_(rtx_)pseudo_cumack will
3938 					 * trigger search for the next
3939 					 * expected (rtx-)pseudo-cumack.
3940 					 */
3941 					tp1->whoTo->new_pseudo_cumack = 1;
3942 					tp1->whoTo->find_pseudo_cumack = 1;
3943 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3944 
3945 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3946 						/* sa_ignore NO_NULL_CHK */
3947 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3948 					}
3949 				}
3950 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3951 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3952 				}
3953 				if (tp1->rec.data.chunk_was_revoked) {
3954 					/* deflate the cwnd */
3955 					tp1->whoTo->cwnd -= tp1->book_size;
3956 					tp1->rec.data.chunk_was_revoked = 0;
3957 				}
3958 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3959 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3960 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
3961 #ifdef INVARIANTS
3962 					} else {
3963 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3964 #endif
3965 					}
3966 				}
3967 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3968 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3969 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
3970 					asoc->trigger_reset = 1;
3971 				}
3972 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3973 				if (tp1->data) {
3974 					/* sa_ignore NO_NULL_CHK */
3975 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3976 					sctp_m_freem(tp1->data);
3977 					tp1->data = NULL;
3978 				}
3979 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3980 					sctp_log_sack(asoc->last_acked_seq,
3981 					    cumack,
3982 					    tp1->rec.data.tsn,
3983 					    0,
3984 					    0,
3985 					    SCTP_LOG_FREE_SENT);
3986 				}
3987 				asoc->sent_queue_cnt--;
3988 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3989 			} else {
3990 				break;
3991 			}
3992 		}
3993 
3994 	}
3995 	/* sa_ignore NO_NULL_CHK */
3996 	if (stcb->sctp_socket) {
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 		struct socket *so;
3999 
4000 #endif
4001 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4002 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4003 			/* sa_ignore NO_NULL_CHK */
4004 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4005 		}
4006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4007 		so = SCTP_INP_SO(stcb->sctp_ep);
4008 		atomic_add_int(&stcb->asoc.refcnt, 1);
4009 		SCTP_TCB_UNLOCK(stcb);
4010 		SCTP_SOCKET_LOCK(so, 1);
4011 		SCTP_TCB_LOCK(stcb);
4012 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4013 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4014 			/* assoc was freed while we were unlocked */
4015 			SCTP_SOCKET_UNLOCK(so, 1);
4016 			return;
4017 		}
4018 #endif
4019 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4020 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4021 		SCTP_SOCKET_UNLOCK(so, 1);
4022 #endif
4023 	} else {
4024 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4025 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4026 		}
4027 	}
4028 
4029 	/* JRS - Use the congestion control given in the CC module */
4030 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4031 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4032 			if (net->net_ack2 > 0) {
4033 				/*
4034 				 * Karn's rule applies to clearing error
4035 				 * count, this is optional.
4036 				 */
4037 				net->error_count = 0;
4038 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4039 					/* addr came good */
4040 					net->dest_state |= SCTP_ADDR_REACHABLE;
4041 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4042 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4043 				}
4044 				if (net == stcb->asoc.primary_destination) {
4045 					if (stcb->asoc.alternate) {
4046 						/* release the alternate,
4047 						 * primary is good */
4048 						sctp_free_remote_addr(stcb->asoc.alternate);
4049 						stcb->asoc.alternate = NULL;
4050 					}
4051 				}
4052 				if (net->dest_state & SCTP_ADDR_PF) {
4053 					net->dest_state &= ~SCTP_ADDR_PF;
4054 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4055 					    stcb->sctp_ep, stcb, net,
4056 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4057 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4058 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4059 					/* Done with this net */
4060 					net->net_ack = 0;
4061 				}
4062 				/* restore any doubled timers */
4063 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4064 				if (net->RTO < stcb->asoc.minrto) {
4065 					net->RTO = stcb->asoc.minrto;
4066 				}
4067 				if (net->RTO > stcb->asoc.maxrto) {
4068 					net->RTO = stcb->asoc.maxrto;
4069 				}
4070 			}
4071 		}
4072 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4073 	}
4074 	asoc->last_acked_seq = cumack;
4075 
4076 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4077 		/* nothing left in-flight */
4078 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4079 			net->flight_size = 0;
4080 			net->partial_bytes_acked = 0;
4081 		}
4082 		asoc->total_flight = 0;
4083 		asoc->total_flight_count = 0;
4084 	}
4085 	/* RWND update */
4086 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4087 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4088 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4089 		/* SWS sender side engages */
4090 		asoc->peers_rwnd = 0;
4091 	}
4092 	if (asoc->peers_rwnd > old_rwnd) {
4093 		win_probe_recovery = 1;
4094 	}
4095 	/* Now assure a timer where data is queued at */
4096 again:
4097 	j = 0;
4098 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4099 		int to_ticks;
4100 
4101 		if (win_probe_recovery && (net->window_probe)) {
4102 			win_probe_recovered = 1;
4103 			/*
4104 			 * Find first chunk that was used with window probe
4105 			 * and clear the sent
4106 			 */
4107 			/* sa_ignore FREED_MEMORY */
4108 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4109 				if (tp1->window_probe) {
4110 					/* move back to data send queue */
4111 					sctp_window_probe_recovery(stcb, asoc, tp1);
4112 					break;
4113 				}
4114 			}
4115 		}
4116 		if (net->RTO == 0) {
4117 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4118 		} else {
4119 			to_ticks = MSEC_TO_TICKS(net->RTO);
4120 		}
4121 		if (net->flight_size) {
4122 			j++;
4123 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4124 			    sctp_timeout_handler, &net->rxt_timer);
4125 			if (net->window_probe) {
4126 				net->window_probe = 0;
4127 			}
4128 		} else {
4129 			if (net->window_probe) {
4130 				/* In window probes we must assure a timer
4131 				 * is still running there */
4132 				net->window_probe = 0;
4133 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4134 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4135 					    sctp_timeout_handler, &net->rxt_timer);
4136 				}
4137 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4138 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4139 				    stcb, net,
4140 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4141 			}
4142 		}
4143 	}
4144 	if ((j == 0) &&
4145 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4146 	    (asoc->sent_queue_retran_cnt == 0) &&
4147 	    (win_probe_recovered == 0) &&
4148 	    (done_once == 0)) {
4149 		/*
4150 		 * huh, this should not happen unless all packets are
4151 		 * PR-SCTP and marked to skip of course.
4152 		 */
4153 		if (sctp_fs_audit(asoc)) {
4154 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4155 				net->flight_size = 0;
4156 			}
4157 			asoc->total_flight = 0;
4158 			asoc->total_flight_count = 0;
4159 			asoc->sent_queue_retran_cnt = 0;
4160 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4161 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4162 					sctp_flight_size_increase(tp1);
4163 					sctp_total_flight_increase(stcb, tp1);
4164 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4165 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4166 				}
4167 			}
4168 		}
4169 		done_once = 1;
4170 		goto again;
4171 	}
4172 	/**********************************/
4173 	/* Now what about shutdown issues */
4174 	/**********************************/
4175 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4176 		/* nothing left on sendqueue.. consider done */
4177 		/* clean up */
4178 		if ((asoc->stream_queue_cnt == 1) &&
4179 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4180 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4181 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4182 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4183 		}
4184 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4185 		    (asoc->stream_queue_cnt == 0)) {
4186 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4187 				/* Need to abort here */
4188 				struct mbuf *op_err;
4189 
4190 		abort_out_now:
4191 				*abort_now = 1;
4192 				/* XXX */
4193 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4194 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4195 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4196 				return;
4197 			} else {
4198 				struct sctp_nets *netp;
4199 
4200 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4201 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4202 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4203 				}
4204 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4205 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4206 				sctp_stop_timers_for_shutdown(stcb);
4207 				if (asoc->alternate) {
4208 					netp = asoc->alternate;
4209 				} else {
4210 					netp = asoc->primary_destination;
4211 				}
4212 				sctp_send_shutdown(stcb, netp);
4213 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4214 				    stcb->sctp_ep, stcb, netp);
4215 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4216 				    stcb->sctp_ep, stcb, netp);
4217 			}
4218 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4219 		    (asoc->stream_queue_cnt == 0)) {
4220 			struct sctp_nets *netp;
4221 
4222 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4223 				goto abort_out_now;
4224 			}
4225 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4226 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4227 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4228 			sctp_stop_timers_for_shutdown(stcb);
4229 			if (asoc->alternate) {
4230 				netp = asoc->alternate;
4231 			} else {
4232 				netp = asoc->primary_destination;
4233 			}
4234 			sctp_send_shutdown_ack(stcb, netp);
4235 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4236 			    stcb->sctp_ep, stcb, netp);
4237 		}
4238 	}
4239 	/*********************************************/
4240 	/* Here we perform PR-SCTP procedures        */
4241 	/* (section 4.2)                             */
4242 	/*********************************************/
4243 	/* C1. update advancedPeerAckPoint */
4244 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4245 		asoc->advanced_peer_ack_point = cumack;
4246 	}
4247 	/* PR-Sctp issues need to be addressed too */
4248 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4249 		struct sctp_tmit_chunk *lchk;
4250 		uint32_t old_adv_peer_ack_point;
4251 
4252 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4253 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4254 		/* C3. See if we need to send a Fwd-TSN */
4255 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4256 			/*
4257 			 * ISSUE with ECN, see FWD-TSN processing.
4258 			 */
4259 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4260 				send_forward_tsn(stcb, asoc);
4261 			} else if (lchk) {
4262 				/* try to FR fwd-tsn's that get lost too */
4263 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4264 					send_forward_tsn(stcb, asoc);
4265 				}
4266 			}
4267 		}
4268 		if (lchk) {
4269 			/* Assure a timer is up */
4270 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4271 			    stcb->sctp_ep, stcb, lchk->whoTo);
4272 		}
4273 	}
4274 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4275 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4276 		    rwnd,
4277 		    stcb->asoc.peers_rwnd,
4278 		    stcb->asoc.total_flight,
4279 		    stcb->asoc.total_output_queue_size);
4280 	}
4281 }
4282 
4283 void
4284 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4285     struct sctp_tcb *stcb,
4286     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4287     int *abort_now, uint8_t flags,
4288     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4289 {
4290 	struct sctp_association *asoc;
4291 	struct sctp_tmit_chunk *tp1, *tp2;
4292 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4293 	uint16_t wake_him = 0;
4294 	uint32_t send_s = 0;
4295 	long j;
4296 	int accum_moved = 0;
4297 	int will_exit_fast_recovery = 0;
4298 	uint32_t a_rwnd, old_rwnd;
4299 	int win_probe_recovery = 0;
4300 	int win_probe_recovered = 0;
4301 	struct sctp_nets *net = NULL;
4302 	int done_once;
4303 	int rto_ok = 1;
4304 	uint8_t reneged_all = 0;
4305 	uint8_t cmt_dac_flag;
4306 
4307 	/*
4308 	 * we take any chance we can to service our queues since we cannot
4309 	 * get awoken when the socket is read from :<
4310 	 */
4311 	/*
4312 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4313 	 * old sack, if so discard. 2) If there is nothing left in the send
4314 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4315 	 * too, update any rwnd change and verify no timers are running.
4316 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4317 	 * moved process these first and note that it moved. 4) Process any
4318 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4319 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4320 	 * sync up flightsizes and things, stop all timers and also check
4321 	 * for shutdown_pending state. If so then go ahead and send off the
4322 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4323 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4324 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4325 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4326 	 * if in shutdown_recv state.
4327 	 */
4328 	SCTP_TCB_LOCK_ASSERT(stcb);
4329 	/* CMT DAC algo */
4330 	this_sack_lowest_newack = 0;
4331 	SCTP_STAT_INCR(sctps_slowpath_sack);
4332 	last_tsn = cum_ack;
4333 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4334 #ifdef SCTP_ASOCLOG_OF_TSNS
4335 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4336 	stcb->asoc.cumack_log_at++;
4337 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4338 		stcb->asoc.cumack_log_at = 0;
4339 	}
4340 #endif
4341 	a_rwnd = rwnd;
4342 
4343 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4344 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4345 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4346 	}
4347 	old_rwnd = stcb->asoc.peers_rwnd;
4348 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4349 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4350 		    stcb->asoc.overall_error_count,
4351 		    0,
4352 		    SCTP_FROM_SCTP_INDATA,
4353 		    __LINE__);
4354 	}
4355 	stcb->asoc.overall_error_count = 0;
4356 	asoc = &stcb->asoc;
4357 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4358 		sctp_log_sack(asoc->last_acked_seq,
4359 		    cum_ack,
4360 		    0,
4361 		    num_seg,
4362 		    num_dup,
4363 		    SCTP_LOG_NEW_SACK);
4364 	}
4365 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4366 		uint16_t i;
4367 		uint32_t *dupdata, dblock;
4368 
4369 		for (i = 0; i < num_dup; i++) {
4370 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4371 			    sizeof(uint32_t), (uint8_t *) & dblock);
4372 			if (dupdata == NULL) {
4373 				break;
4374 			}
4375 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4376 		}
4377 	}
4378 	/* reality check */
4379 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4380 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4381 		    sctpchunk_listhead);
4382 		send_s = tp1->rec.data.tsn + 1;
4383 	} else {
4384 		tp1 = NULL;
4385 		send_s = asoc->sending_seq;
4386 	}
4387 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4388 		struct mbuf *op_err;
4389 		char msg[SCTP_DIAG_INFO_LEN];
4390 
4391 		/*
4392 		 * no way, we have not even sent this TSN out yet. Peer is
4393 		 * hopelessly messed up with us.
4394 		 */
4395 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4396 		    cum_ack, send_s);
4397 		if (tp1) {
4398 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4399 			    tp1->rec.data.tsn, (void *)tp1);
4400 		}
4401 hopeless_peer:
4402 		*abort_now = 1;
4403 		/* XXX */
4404 		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4405 		    cum_ack, send_s);
4406 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4407 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4408 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4409 		return;
4410 	}
4411 	/**********************/
4412 	/* 1) check the range */
4413 	/**********************/
4414 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4415 		/* acking something behind */
4416 		return;
4417 	}
4418 	/* update the Rwnd of the peer */
4419 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4420 	    TAILQ_EMPTY(&asoc->send_queue) &&
4421 	    (asoc->stream_queue_cnt == 0)) {
4422 		/* nothing left on send/sent and strmq */
4423 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4424 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4425 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4426 		}
4427 		asoc->peers_rwnd = a_rwnd;
4428 		if (asoc->sent_queue_retran_cnt) {
4429 			asoc->sent_queue_retran_cnt = 0;
4430 		}
4431 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4432 			/* SWS sender side engages */
4433 			asoc->peers_rwnd = 0;
4434 		}
4435 		/* stop any timers */
4436 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4437 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4438 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4439 			net->partial_bytes_acked = 0;
4440 			net->flight_size = 0;
4441 		}
4442 		asoc->total_flight = 0;
4443 		asoc->total_flight_count = 0;
4444 		return;
4445 	}
4446 	/*
4447 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4448 	 * things. The total byte count acked is tracked in netAckSz AND
4449 	 * netAck2 is used to track the total bytes acked that are un-
4450 	 * amibguious and were never retransmitted. We track these on a per
4451 	 * destination address basis.
4452 	 */
4453 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4454 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4455 			/* Drag along the window_tsn for cwr's */
4456 			net->cwr_window_tsn = cum_ack;
4457 		}
4458 		net->prev_cwnd = net->cwnd;
4459 		net->net_ack = 0;
4460 		net->net_ack2 = 0;
4461 
4462 		/*
4463 		 * CMT: Reset CUC and Fast recovery algo variables before
4464 		 * SACK processing
4465 		 */
4466 		net->new_pseudo_cumack = 0;
4467 		net->will_exit_fast_recovery = 0;
4468 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4469 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4470 		}
4471 	}
4472 	/* process the new consecutive TSN first */
4473 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4474 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4475 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4476 				accum_moved = 1;
4477 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4478 					/*
4479 					 * If it is less than ACKED, it is
4480 					 * now no-longer in flight. Higher
4481 					 * values may occur during marking
4482 					 */
4483 					if ((tp1->whoTo->dest_state &
4484 					    SCTP_ADDR_UNCONFIRMED) &&
4485 					    (tp1->snd_count < 2)) {
4486 						/*
4487 						 * If there was no retran
4488 						 * and the address is
4489 						 * un-confirmed and we sent
4490 						 * there and are now
4491 						 * sacked.. its confirmed,
4492 						 * mark it so.
4493 						 */
4494 						tp1->whoTo->dest_state &=
4495 						    ~SCTP_ADDR_UNCONFIRMED;
4496 					}
4497 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4498 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4499 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4500 							    tp1->whoTo->flight_size,
4501 							    tp1->book_size,
4502 							    (uint32_t) (uintptr_t) tp1->whoTo,
4503 							    tp1->rec.data.tsn);
4504 						}
4505 						sctp_flight_size_decrease(tp1);
4506 						sctp_total_flight_decrease(stcb, tp1);
4507 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4508 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4509 							    tp1);
4510 						}
4511 					}
4512 					tp1->whoTo->net_ack += tp1->send_size;
4513 
4514 					/* CMT SFR and DAC algos */
4515 					this_sack_lowest_newack = tp1->rec.data.tsn;
4516 					tp1->whoTo->saw_newack = 1;
4517 
4518 					if (tp1->snd_count < 2) {
4519 						/*
4520 						 * True non-retransmited
4521 						 * chunk
4522 						 */
4523 						tp1->whoTo->net_ack2 +=
4524 						    tp1->send_size;
4525 
4526 						/* update RTO too? */
4527 						if (tp1->do_rtt) {
4528 							if (rto_ok) {
4529 								tp1->whoTo->RTO =
4530 								    sctp_calculate_rto(stcb,
4531 								    asoc, tp1->whoTo,
4532 								    &tp1->sent_rcv_time,
4533 								    sctp_align_safe_nocopy,
4534 								    SCTP_RTT_FROM_DATA);
4535 								rto_ok = 0;
4536 							}
4537 							if (tp1->whoTo->rto_needed == 0) {
4538 								tp1->whoTo->rto_needed = 1;
4539 							}
4540 							tp1->do_rtt = 0;
4541 						}
4542 					}
4543 					/*
4544 					 * CMT: CUCv2 algorithm. From the
4545 					 * cumack'd TSNs, for each TSN being
4546 					 * acked for the first time, set the
4547 					 * following variables for the
4548 					 * corresp destination.
4549 					 * new_pseudo_cumack will trigger a
4550 					 * cwnd update.
4551 					 * find_(rtx_)pseudo_cumack will
4552 					 * trigger search for the next
4553 					 * expected (rtx-)pseudo-cumack.
4554 					 */
4555 					tp1->whoTo->new_pseudo_cumack = 1;
4556 					tp1->whoTo->find_pseudo_cumack = 1;
4557 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4558 
4559 
4560 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4561 						sctp_log_sack(asoc->last_acked_seq,
4562 						    cum_ack,
4563 						    tp1->rec.data.tsn,
4564 						    0,
4565 						    0,
4566 						    SCTP_LOG_TSN_ACKED);
4567 					}
4568 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4569 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4570 					}
4571 				}
4572 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4573 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4574 #ifdef SCTP_AUDITING_ENABLED
4575 					sctp_audit_log(0xB3,
4576 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4577 #endif
4578 				}
4579 				if (tp1->rec.data.chunk_was_revoked) {
4580 					/* deflate the cwnd */
4581 					tp1->whoTo->cwnd -= tp1->book_size;
4582 					tp1->rec.data.chunk_was_revoked = 0;
4583 				}
4584 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4585 					tp1->sent = SCTP_DATAGRAM_ACKED;
4586 				}
4587 			}
4588 		} else {
4589 			break;
4590 		}
4591 	}
4592 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4593 	/* always set this up to cum-ack */
4594 	asoc->this_sack_highest_gap = last_tsn;
4595 
4596 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4597 
4598 		/*
4599 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4600 		 * to be greater than the cumack. Also reset saw_newack to 0
4601 		 * for all dests.
4602 		 */
4603 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4604 			net->saw_newack = 0;
4605 			net->this_sack_highest_newack = last_tsn;
4606 		}
4607 
4608 		/*
4609 		 * thisSackHighestGap will increase while handling NEW
4610 		 * segments this_sack_highest_newack will increase while
4611 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4612 		 * used for CMT DAC algo. saw_newack will also change.
4613 		 */
4614 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4615 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4616 		    num_seg, num_nr_seg, &rto_ok)) {
4617 			wake_him++;
4618 		}
4619 		/*
4620 		 * validate the biggest_tsn_acked in the gap acks if strict
4621 		 * adherence is wanted.
4622 		 */
4623 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4624 			/*
4625 			 * peer is either confused or we are under attack.
4626 			 * We must abort.
4627 			 */
4628 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4629 			    biggest_tsn_acked, send_s);
4630 			goto hopeless_peer;
4631 		}
4632 	}
4633 	/*******************************************/
4634 	/* cancel ALL T3-send timer if accum moved */
4635 	/*******************************************/
4636 	if (asoc->sctp_cmt_on_off > 0) {
4637 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 			if (net->new_pseudo_cumack)
4639 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4640 				    stcb, net,
4641 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4642 
4643 		}
4644 	} else {
4645 		if (accum_moved) {
4646 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4647 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4648 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4649 			}
4650 		}
4651 	}
4652 	/********************************************/
4653 	/* drop the acked chunks from the sentqueue */
4654 	/********************************************/
4655 	asoc->last_acked_seq = cum_ack;
4656 
4657 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4658 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4659 			break;
4660 		}
4661 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4662 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4663 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4664 #ifdef INVARIANTS
4665 			} else {
4666 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4667 #endif
4668 			}
4669 		}
4670 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4671 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4672 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4673 			asoc->trigger_reset = 1;
4674 		}
4675 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4676 		if (PR_SCTP_ENABLED(tp1->flags)) {
4677 			if (asoc->pr_sctp_cnt != 0)
4678 				asoc->pr_sctp_cnt--;
4679 		}
4680 		asoc->sent_queue_cnt--;
4681 		if (tp1->data) {
4682 			/* sa_ignore NO_NULL_CHK */
4683 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4684 			sctp_m_freem(tp1->data);
4685 			tp1->data = NULL;
4686 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4687 				asoc->sent_queue_cnt_removeable--;
4688 			}
4689 		}
4690 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4691 			sctp_log_sack(asoc->last_acked_seq,
4692 			    cum_ack,
4693 			    tp1->rec.data.tsn,
4694 			    0,
4695 			    0,
4696 			    SCTP_LOG_FREE_SENT);
4697 		}
4698 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4699 		wake_him++;
4700 	}
4701 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4702 #ifdef INVARIANTS
4703 		panic("Warning flight size is positive and should be 0");
4704 #else
4705 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4706 		    asoc->total_flight);
4707 #endif
4708 		asoc->total_flight = 0;
4709 	}
4710 	/* sa_ignore NO_NULL_CHK */
4711 	if ((wake_him) && (stcb->sctp_socket)) {
4712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4713 		struct socket *so;
4714 
4715 #endif
4716 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4717 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4718 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4719 		}
4720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4721 		so = SCTP_INP_SO(stcb->sctp_ep);
4722 		atomic_add_int(&stcb->asoc.refcnt, 1);
4723 		SCTP_TCB_UNLOCK(stcb);
4724 		SCTP_SOCKET_LOCK(so, 1);
4725 		SCTP_TCB_LOCK(stcb);
4726 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4727 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4728 			/* assoc was freed while we were unlocked */
4729 			SCTP_SOCKET_UNLOCK(so, 1);
4730 			return;
4731 		}
4732 #endif
4733 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4734 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4735 		SCTP_SOCKET_UNLOCK(so, 1);
4736 #endif
4737 	} else {
4738 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4739 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4740 		}
4741 	}
4742 
4743 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4744 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4745 			/* Setup so we will exit RFC2582 fast recovery */
4746 			will_exit_fast_recovery = 1;
4747 		}
4748 	}
4749 	/*
4750 	 * Check for revoked fragments:
4751 	 *
4752 	 * if Previous sack - Had no frags then we can't have any revoked if
4753 	 * Previous sack - Had frag's then - If we now have frags aka
4754 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4755 	 * some of them. else - The peer revoked all ACKED fragments, since
4756 	 * we had some before and now we have NONE.
4757 	 */
4758 
4759 	if (num_seg) {
4760 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4761 		asoc->saw_sack_with_frags = 1;
4762 	} else if (asoc->saw_sack_with_frags) {
4763 		int cnt_revoked = 0;
4764 
4765 		/* Peer revoked all dg's marked or acked */
4766 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4767 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4768 				tp1->sent = SCTP_DATAGRAM_SENT;
4769 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4770 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4771 					    tp1->whoTo->flight_size,
4772 					    tp1->book_size,
4773 					    (uint32_t) (uintptr_t) tp1->whoTo,
4774 					    tp1->rec.data.tsn);
4775 				}
4776 				sctp_flight_size_increase(tp1);
4777 				sctp_total_flight_increase(stcb, tp1);
4778 				tp1->rec.data.chunk_was_revoked = 1;
4779 				/*
4780 				 * To ensure that this increase in
4781 				 * flightsize, which is artificial, does not
4782 				 * throttle the sender, we also increase the
4783 				 * cwnd artificially.
4784 				 */
4785 				tp1->whoTo->cwnd += tp1->book_size;
4786 				cnt_revoked++;
4787 			}
4788 		}
4789 		if (cnt_revoked) {
4790 			reneged_all = 1;
4791 		}
4792 		asoc->saw_sack_with_frags = 0;
4793 	}
4794 	if (num_nr_seg > 0)
4795 		asoc->saw_sack_with_nr_frags = 1;
4796 	else
4797 		asoc->saw_sack_with_nr_frags = 0;
4798 
4799 	/* JRS - Use the congestion control given in the CC module */
4800 	if (ecne_seen == 0) {
4801 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4802 			if (net->net_ack2 > 0) {
4803 				/*
4804 				 * Karn's rule applies to clearing error
4805 				 * count, this is optional.
4806 				 */
4807 				net->error_count = 0;
4808 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4809 					/* addr came good */
4810 					net->dest_state |= SCTP_ADDR_REACHABLE;
4811 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4812 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4813 				}
4814 				if (net == stcb->asoc.primary_destination) {
4815 					if (stcb->asoc.alternate) {
4816 						/* release the alternate,
4817 						 * primary is good */
4818 						sctp_free_remote_addr(stcb->asoc.alternate);
4819 						stcb->asoc.alternate = NULL;
4820 					}
4821 				}
4822 				if (net->dest_state & SCTP_ADDR_PF) {
4823 					net->dest_state &= ~SCTP_ADDR_PF;
4824 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4825 					    stcb->sctp_ep, stcb, net,
4826 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4827 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4828 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4829 					/* Done with this net */
4830 					net->net_ack = 0;
4831 				}
4832 				/* restore any doubled timers */
4833 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4834 				if (net->RTO < stcb->asoc.minrto) {
4835 					net->RTO = stcb->asoc.minrto;
4836 				}
4837 				if (net->RTO > stcb->asoc.maxrto) {
4838 					net->RTO = stcb->asoc.maxrto;
4839 				}
4840 			}
4841 		}
4842 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4843 	}
4844 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4845 		/* nothing left in-flight */
4846 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4847 			/* stop all timers */
4848 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4849 			    stcb, net,
4850 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4851 			net->flight_size = 0;
4852 			net->partial_bytes_acked = 0;
4853 		}
4854 		asoc->total_flight = 0;
4855 		asoc->total_flight_count = 0;
4856 	}
4857 	/**********************************/
4858 	/* Now what about shutdown issues */
4859 	/**********************************/
4860 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4861 		/* nothing left on sendqueue.. consider done */
4862 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4863 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4864 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4865 		}
4866 		asoc->peers_rwnd = a_rwnd;
4867 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4868 			/* SWS sender side engages */
4869 			asoc->peers_rwnd = 0;
4870 		}
4871 		/* clean up */
4872 		if ((asoc->stream_queue_cnt == 1) &&
4873 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4874 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4875 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4876 			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4877 		}
4878 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4879 		    (asoc->stream_queue_cnt == 0)) {
4880 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4881 				/* Need to abort here */
4882 				struct mbuf *op_err;
4883 
4884 		abort_out_now:
4885 				*abort_now = 1;
4886 				/* XXX */
4887 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4888 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4889 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4890 				return;
4891 			} else {
4892 				struct sctp_nets *netp;
4893 
4894 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4895 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4896 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4897 				}
4898 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4899 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4900 				sctp_stop_timers_for_shutdown(stcb);
4901 				if (asoc->alternate) {
4902 					netp = asoc->alternate;
4903 				} else {
4904 					netp = asoc->primary_destination;
4905 				}
4906 				sctp_send_shutdown(stcb, netp);
4907 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4908 				    stcb->sctp_ep, stcb, netp);
4909 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4910 				    stcb->sctp_ep, stcb, netp);
4911 			}
4912 			return;
4913 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4914 		    (asoc->stream_queue_cnt == 0)) {
4915 			struct sctp_nets *netp;
4916 
4917 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4918 				goto abort_out_now;
4919 			}
4920 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4921 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4922 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4923 			sctp_stop_timers_for_shutdown(stcb);
4924 			if (asoc->alternate) {
4925 				netp = asoc->alternate;
4926 			} else {
4927 				netp = asoc->primary_destination;
4928 			}
4929 			sctp_send_shutdown_ack(stcb, netp);
4930 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4931 			    stcb->sctp_ep, stcb, netp);
4932 			return;
4933 		}
4934 	}
4935 	/*
4936 	 * Now here we are going to recycle net_ack for a different use...
4937 	 * HEADS UP.
4938 	 */
4939 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4940 		net->net_ack = 0;
4941 	}
4942 
4943 	/*
4944 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4945 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4946 	 * automatically ensure that.
4947 	 */
4948 	if ((asoc->sctp_cmt_on_off > 0) &&
4949 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4950 	    (cmt_dac_flag == 0)) {
4951 		this_sack_lowest_newack = cum_ack;
4952 	}
4953 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4954 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4955 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4956 	}
4957 	/* JRS - Use the congestion control given in the CC module */
4958 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4959 
4960 	/* Now are we exiting loss recovery ? */
4961 	if (will_exit_fast_recovery) {
4962 		/* Ok, we must exit fast recovery */
4963 		asoc->fast_retran_loss_recovery = 0;
4964 	}
4965 	if ((asoc->sat_t3_loss_recovery) &&
4966 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4967 		/* end satellite t3 loss recovery */
4968 		asoc->sat_t3_loss_recovery = 0;
4969 	}
4970 	/*
4971 	 * CMT Fast recovery
4972 	 */
4973 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4974 		if (net->will_exit_fast_recovery) {
4975 			/* Ok, we must exit fast recovery */
4976 			net->fast_retran_loss_recovery = 0;
4977 		}
4978 	}
4979 
4980 	/* Adjust and set the new rwnd value */
4981 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4982 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4983 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4984 	}
4985 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4986 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4987 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4988 		/* SWS sender side engages */
4989 		asoc->peers_rwnd = 0;
4990 	}
4991 	if (asoc->peers_rwnd > old_rwnd) {
4992 		win_probe_recovery = 1;
4993 	}
4994 	/*
4995 	 * Now we must setup so we have a timer up for anyone with
4996 	 * outstanding data.
4997 	 */
4998 	done_once = 0;
4999 again:
5000 	j = 0;
5001 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5002 		if (win_probe_recovery && (net->window_probe)) {
5003 			win_probe_recovered = 1;
5004 			/*-
5005 			 * Find first chunk that was used with
5006 			 * window probe and clear the event. Put
5007 			 * it back into the send queue as if has
5008 			 * not been sent.
5009 			 */
5010 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5011 				if (tp1->window_probe) {
5012 					sctp_window_probe_recovery(stcb, asoc, tp1);
5013 					break;
5014 				}
5015 			}
5016 		}
5017 		if (net->flight_size) {
5018 			j++;
5019 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5020 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5021 				    stcb->sctp_ep, stcb, net);
5022 			}
5023 			if (net->window_probe) {
5024 				net->window_probe = 0;
5025 			}
5026 		} else {
5027 			if (net->window_probe) {
5028 				/* In window probes we must assure a timer
5029 				 * is still running there */
5030 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5031 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5032 					    stcb->sctp_ep, stcb, net);
5033 
5034 				}
5035 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5036 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5037 				    stcb, net,
5038 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5039 			}
5040 		}
5041 	}
5042 	if ((j == 0) &&
5043 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5044 	    (asoc->sent_queue_retran_cnt == 0) &&
5045 	    (win_probe_recovered == 0) &&
5046 	    (done_once == 0)) {
5047 		/*
5048 		 * huh, this should not happen unless all packets are
5049 		 * PR-SCTP and marked to skip of course.
5050 		 */
5051 		if (sctp_fs_audit(asoc)) {
5052 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5053 				net->flight_size = 0;
5054 			}
5055 			asoc->total_flight = 0;
5056 			asoc->total_flight_count = 0;
5057 			asoc->sent_queue_retran_cnt = 0;
5058 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5059 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5060 					sctp_flight_size_increase(tp1);
5061 					sctp_total_flight_increase(stcb, tp1);
5062 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5063 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5064 				}
5065 			}
5066 		}
5067 		done_once = 1;
5068 		goto again;
5069 	}
5070 	/*********************************************/
5071 	/* Here we perform PR-SCTP procedures        */
5072 	/* (section 4.2)                             */
5073 	/*********************************************/
5074 	/* C1. update advancedPeerAckPoint */
5075 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5076 		asoc->advanced_peer_ack_point = cum_ack;
5077 	}
5078 	/* C2. try to further move advancedPeerAckPoint ahead */
5079 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5080 		struct sctp_tmit_chunk *lchk;
5081 		uint32_t old_adv_peer_ack_point;
5082 
5083 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5084 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5085 		/* C3. See if we need to send a Fwd-TSN */
5086 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5087 			/*
5088 			 * ISSUE with ECN, see FWD-TSN processing.
5089 			 */
5090 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5091 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5092 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5093 				    old_adv_peer_ack_point);
5094 			}
5095 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5096 				send_forward_tsn(stcb, asoc);
5097 			} else if (lchk) {
5098 				/* try to FR fwd-tsn's that get lost too */
5099 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5100 					send_forward_tsn(stcb, asoc);
5101 				}
5102 			}
5103 		}
5104 		if (lchk) {
5105 			/* Assure a timer is up */
5106 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5107 			    stcb->sctp_ep, stcb, lchk->whoTo);
5108 		}
5109 	}
5110 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5111 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5112 		    a_rwnd,
5113 		    stcb->asoc.peers_rwnd,
5114 		    stcb->asoc.total_flight,
5115 		    stcb->asoc.total_output_queue_size);
5116 	}
5117 }
5118 
5119 void
5120 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5121 {
5122 	/* Copy cum-ack */
5123 	uint32_t cum_ack, a_rwnd;
5124 
5125 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5126 	/* Arrange so a_rwnd does NOT change */
5127 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5128 
5129 	/* Now call the express sack handling */
5130 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5131 }
5132 
5133 static void
5134 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5135     struct sctp_stream_in *strmin)
5136 {
5137 	struct sctp_queued_to_read *ctl, *nctl;
5138 	struct sctp_association *asoc;
5139 	uint32_t mid;
5140 	int need_reasm_check = 0;
5141 
5142 	asoc = &stcb->asoc;
5143 	mid = strmin->last_mid_delivered;
5144 	/*
5145 	 * First deliver anything prior to and including the stream no that
5146 	 * came in.
5147 	 */
5148 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5149 		if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
5150 			/* this is deliverable now */
5151 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5152 				if (ctl->on_strm_q) {
5153 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5154 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5155 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5156 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5157 #ifdef INVARIANTS
5158 					} else {
5159 						panic("strmin: %p ctl: %p unknown %d",
5160 						    strmin, ctl, ctl->on_strm_q);
5161 #endif
5162 					}
5163 					ctl->on_strm_q = 0;
5164 				}
5165 				/* subtract pending on streams */
5166 				asoc->size_on_all_streams -= ctl->length;
5167 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5168 				/* deliver it to at least the delivery-q */
5169 				if (stcb->sctp_socket) {
5170 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5171 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5172 					    ctl,
5173 					    &stcb->sctp_socket->so_rcv,
5174 					    1, SCTP_READ_LOCK_HELD,
5175 					    SCTP_SO_NOT_LOCKED);
5176 				}
5177 			} else {
5178 				/* Its a fragmented message */
5179 				if (ctl->first_frag_seen) {
5180 					/* Make it so this is next to
5181 					 * deliver, we restore later */
5182 					strmin->last_mid_delivered = ctl->mid - 1;
5183 					need_reasm_check = 1;
5184 					break;
5185 				}
5186 			}
5187 		} else {
5188 			/* no more delivery now. */
5189 			break;
5190 		}
5191 	}
5192 	if (need_reasm_check) {
5193 		int ret;
5194 
5195 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5196 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5197 			/* Restore the next to deliver unless we are ahead */
5198 			strmin->last_mid_delivered = mid;
5199 		}
5200 		if (ret == 0) {
5201 			/* Left the front Partial one on */
5202 			return;
5203 		}
5204 		need_reasm_check = 0;
5205 	}
5206 	/*
5207 	 * now we must deliver things in queue the normal way  if any are
5208 	 * now ready.
5209 	 */
5210 	mid = strmin->last_mid_delivered + 1;
5211 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
5212 		if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
5213 			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5214 				/* this is deliverable now */
5215 				if (ctl->on_strm_q) {
5216 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5217 						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
5218 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5219 						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
5220 #ifdef INVARIANTS
5221 					} else {
5222 						panic("strmin: %p ctl: %p unknown %d",
5223 						    strmin, ctl, ctl->on_strm_q);
5224 #endif
5225 					}
5226 					ctl->on_strm_q = 0;
5227 				}
5228 				/* subtract pending on streams */
5229 				asoc->size_on_all_streams -= ctl->length;
5230 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5231 				/* deliver it to at least the delivery-q */
5232 				strmin->last_mid_delivered = ctl->mid;
5233 				if (stcb->sctp_socket) {
5234 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5235 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5236 					    ctl,
5237 					    &stcb->sctp_socket->so_rcv, 1,
5238 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5239 
5240 				}
5241 				mid = strmin->last_mid_delivered + 1;
5242 			} else {
5243 				/* Its a fragmented message */
5244 				if (ctl->first_frag_seen) {
5245 					/* Make it so this is next to
5246 					 * deliver */
5247 					strmin->last_mid_delivered = ctl->mid - 1;
5248 					need_reasm_check = 1;
5249 					break;
5250 				}
5251 			}
5252 		} else {
5253 			break;
5254 		}
5255 	}
5256 	if (need_reasm_check) {
5257 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5258 	}
5259 }
5260 
5261 
5262 
5263 static void
5264 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5265     struct sctp_association *asoc,
5266     uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5267 {
5268 	struct sctp_queued_to_read *control;
5269 	struct sctp_stream_in *strm;
5270 	struct sctp_tmit_chunk *chk, *nchk;
5271 	int cnt_removed = 0;
5272 
5273 	/*
5274 	 * For now large messages held on the stream reasm that are complete
5275 	 * will be tossed too. We could in theory do more work to spin
5276 	 * through and stop after dumping one msg aka seeing the start of a
5277 	 * new msg at the head, and call the delivery function... to see if
5278 	 * it can be delivered... But for now we just dump everything on the
5279 	 * queue.
5280 	 */
5281 	strm = &asoc->strmin[stream];
5282 	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5283 	if (control == NULL) {
5284 		/* Not found */
5285 		return;
5286 	}
5287 	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5288 		return;
5289 	}
5290 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5291 		/* Purge hanging chunks */
5292 		if (!asoc->idata_supported && (ordered == 0)) {
5293 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5294 				break;
5295 			}
5296 		}
5297 		cnt_removed++;
5298 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5299 		asoc->size_on_reasm_queue -= chk->send_size;
5300 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5301 		if (chk->data) {
5302 			sctp_m_freem(chk->data);
5303 			chk->data = NULL;
5304 		}
5305 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5306 	}
5307 	if (!TAILQ_EMPTY(&control->reasm)) {
5308 		/* This has to be old data, unordered */
5309 		if (control->data) {
5310 			sctp_m_freem(control->data);
5311 			control->data = NULL;
5312 		}
5313 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5314 		chk = TAILQ_FIRST(&control->reasm);
5315 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5316 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5317 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5318 			    chk, SCTP_READ_LOCK_HELD);
5319 		}
5320 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5321 		return;
5322 	}
5323 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5324 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5325 		control->on_strm_q = 0;
5326 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5327 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5328 		control->on_strm_q = 0;
5329 #ifdef INVARIANTS
5330 	} else if (control->on_strm_q) {
5331 		panic("strm: %p ctl: %p unknown %d",
5332 		    strm, control, control->on_strm_q);
5333 #endif
5334 	}
5335 	control->on_strm_q = 0;
5336 	if (control->on_read_q == 0) {
5337 		sctp_free_remote_addr(control->whoFrom);
5338 		if (control->data) {
5339 			sctp_m_freem(control->data);
5340 			control->data = NULL;
5341 		}
5342 		sctp_free_a_readq(stcb, control);
5343 	}
5344 }
5345 
5346 void
5347 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5348     struct sctp_forward_tsn_chunk *fwd,
5349     int *abort_flag, struct mbuf *m, int offset)
5350 {
5351 	/* The pr-sctp fwd tsn */
5352 	/*
5353 	 * here we will perform all the data receiver side steps for
5354 	 * processing FwdTSN, as required in by pr-sctp draft:
5355 	 *
5356 	 * Assume we get FwdTSN(x):
5357 	 *
5358 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5359 	 * + others we have 3) examine and update re-ordering queue on
5360 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5361 	 * report where we are.
5362 	 */
5363 	struct sctp_association *asoc;
5364 	uint32_t new_cum_tsn, gap;
5365 	unsigned int i, fwd_sz, m_size;
5366 	uint32_t str_seq;
5367 	struct sctp_stream_in *strm;
5368 	struct sctp_queued_to_read *ctl, *sv;
5369 
5370 	asoc = &stcb->asoc;
5371 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5372 		SCTPDBG(SCTP_DEBUG_INDATA1,
5373 		    "Bad size too small/big fwd-tsn\n");
5374 		return;
5375 	}
5376 	m_size = (stcb->asoc.mapping_array_size << 3);
5377 	/*************************************************************/
5378 	/* 1. Here we update local cumTSN and shift the bitmap array */
5379 	/*************************************************************/
5380 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5381 
5382 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5383 		/* Already got there ... */
5384 		return;
5385 	}
5386 	/*
5387 	 * now we know the new TSN is more advanced, let's find the actual
5388 	 * gap
5389 	 */
5390 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5391 	asoc->cumulative_tsn = new_cum_tsn;
5392 	if (gap >= m_size) {
5393 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5394 			struct mbuf *op_err;
5395 			char msg[SCTP_DIAG_INFO_LEN];
5396 
5397 			/*
5398 			 * out of range (of single byte chunks in the rwnd I
5399 			 * give out). This must be an attacker.
5400 			 */
5401 			*abort_flag = 1;
5402 			snprintf(msg, sizeof(msg),
5403 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5404 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5405 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5406 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5407 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5408 			return;
5409 		}
5410 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5411 
5412 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5413 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5414 		asoc->highest_tsn_inside_map = new_cum_tsn;
5415 
5416 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5417 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5418 
5419 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5420 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5421 		}
5422 	} else {
5423 		SCTP_TCB_LOCK_ASSERT(stcb);
5424 		for (i = 0; i <= gap; i++) {
5425 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5426 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5427 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5428 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5429 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5430 				}
5431 			}
5432 		}
5433 	}
5434 	/*************************************************************/
5435 	/* 2. Clear up re-assembly queue                             */
5436 	/*************************************************************/
5437 
5438 	/* This is now done as part of clearing up the stream/seq */
5439 	if (asoc->idata_supported == 0) {
5440 		uint16_t sid;
5441 
5442 		/* Flush all the un-ordered data based on cum-tsn */
5443 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5444 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5445 			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5446 		}
5447 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5448 	}
5449 	/*******************************************************/
5450 	/* 3. Update the PR-stream re-ordering queues and fix  */
5451 	/* delivery issues as needed.                       */
5452 	/*******************************************************/
5453 	fwd_sz -= sizeof(*fwd);
5454 	if (m && fwd_sz) {
5455 		/* New method. */
5456 		unsigned int num_str;
5457 		uint32_t mid, cur_mid;
5458 		uint16_t sid;
5459 		uint16_t ordered, flags;
5460 		struct sctp_strseq *stseq, strseqbuf;
5461 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5462 
5463 		offset += sizeof(*fwd);
5464 
5465 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5466 		if (asoc->idata_supported) {
5467 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5468 		} else {
5469 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5470 		}
5471 		for (i = 0; i < num_str; i++) {
5472 			if (asoc->idata_supported) {
5473 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5474 				    sizeof(struct sctp_strseq_mid),
5475 				    (uint8_t *) & strseqbuf_m);
5476 				offset += sizeof(struct sctp_strseq_mid);
5477 				if (stseq_m == NULL) {
5478 					break;
5479 				}
5480 				sid = ntohs(stseq_m->sid);
5481 				mid = ntohl(stseq_m->mid);
5482 				flags = ntohs(stseq_m->flags);
5483 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5484 					ordered = 0;
5485 				} else {
5486 					ordered = 1;
5487 				}
5488 			} else {
5489 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5490 				    sizeof(struct sctp_strseq),
5491 				    (uint8_t *) & strseqbuf);
5492 				offset += sizeof(struct sctp_strseq);
5493 				if (stseq == NULL) {
5494 					break;
5495 				}
5496 				sid = ntohs(stseq->sid);
5497 				mid = (uint32_t) ntohs(stseq->ssn);
5498 				ordered = 1;
5499 			}
5500 			/* Convert */
5501 
5502 			/* now process */
5503 
5504 			/*
5505 			 * Ok we now look for the stream/seq on the read
5506 			 * queue where its not all delivered. If we find it
5507 			 * we transmute the read entry into a PDI_ABORTED.
5508 			 */
5509 			if (sid >= asoc->streamincnt) {
5510 				/* screwed up streams, stop!  */
5511 				break;
5512 			}
5513 			if ((asoc->str_of_pdapi == sid) &&
5514 			    (asoc->ssn_of_pdapi == mid)) {
5515 				/*
5516 				 * If this is the one we were partially
5517 				 * delivering now then we no longer are.
5518 				 * Note this will change with the reassembly
5519 				 * re-write.
5520 				 */
5521 				asoc->fragmented_delivery_inprogress = 0;
5522 			}
5523 			strm = &asoc->strmin[sid];
5524 			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5525 				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5526 			}
5527 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5528 				if ((ctl->sinfo_stream == sid) &&
5529 				    (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
5530 					str_seq = (sid << 16) | (0x0000ffff & mid);
5531 					ctl->pdapi_aborted = 1;
5532 					sv = stcb->asoc.control_pdapi;
5533 					ctl->end_added = 1;
5534 					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
5535 						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
5536 					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
5537 						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
5538 #ifdef INVARIANTS
5539 					} else if (ctl->on_strm_q) {
5540 						panic("strm: %p ctl: %p unknown %d",
5541 						    strm, ctl, ctl->on_strm_q);
5542 #endif
5543 					}
5544 					ctl->on_strm_q = 0;
5545 					stcb->asoc.control_pdapi = ctl;
5546 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5547 					    stcb,
5548 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5549 					    (void *)&str_seq,
5550 					    SCTP_SO_NOT_LOCKED);
5551 					stcb->asoc.control_pdapi = sv;
5552 					break;
5553 				} else if ((ctl->sinfo_stream == sid) &&
5554 				    SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
5555 					/* We are past our victim SSN */
5556 					break;
5557 				}
5558 			}
5559 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5560 				/* Update the sequence number */
5561 				strm->last_mid_delivered = mid;
5562 			}
5563 			/* now kick the stream the new way */
5564 			/* sa_ignore NO_NULL_CHK */
5565 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5566 		}
5567 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5568 	}
5569 	/*
5570 	 * Now slide thing forward.
5571 	 */
5572 	sctp_slide_mapping_arrays(stcb);
5573 }
5574