xref: /freebsd/sys/netinet/sctp_indata.c (revision ca2e4ecd7395ba655ab4bebe7262a06e634216ce)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	/*
254 	 * Make sure that there is no un-initialized padding between the
255 	 * cmsg header and cmsg data and after the cmsg data.
256 	 */
257 	memset(cmh, 0, len);
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 		cmh->cmsg_type = SCTP_RCVINFO;
262 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 		rcvinfo->rcv_context = sinfo->sinfo_context;
270 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 	}
274 	if (provide_nxt) {
275 		cmh->cmsg_level = IPPROTO_SCTP;
276 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 		cmh->cmsg_type = SCTP_NXTINFO;
278 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 		nxtinfo->nxt_flags = 0;
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 		}
287 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 		}
290 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 	}
296 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 		cmh->cmsg_level = IPPROTO_SCTP;
298 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 		if (use_extended) {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 			cmh->cmsg_type = SCTP_EXTRCV;
302 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 		} else {
305 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 			cmh->cmsg_type = SCTP_SNDRCV;
307 			*outinfo = *sinfo;
308 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 		}
310 	}
311 	return (ret);
312 }
313 
314 
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 	uint32_t gap, i, cumackp1;
319 	int fnd = 0;
320 
321 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 		return;
323 	}
324 	cumackp1 = asoc->cumulative_tsn + 1;
325 	if (SCTP_TSN_GT(cumackp1, tsn)) {
326 		/*
327 		 * this tsn is behind the cum ack and thus we don't need to
328 		 * worry about it being moved from one to the other.
329 		 */
330 		return;
331 	}
332 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 		sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 		panic("Things are really messed up now!!");
338 #endif
339 	}
340 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 		asoc->highest_tsn_inside_nr_map = tsn;
344 	}
345 	if (tsn == asoc->highest_tsn_inside_map) {
346 		/* We must back down to see what the new highest is */
347 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 				asoc->highest_tsn_inside_map = i;
351 				fnd = 1;
352 				break;
353 			}
354 		}
355 		if (!fnd) {
356 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 		}
358 	}
359 }
360 
361 
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 	struct sctp_tmit_chunk *chk, *nchk;
371 	uint16_t nxt_todel;
372 	uint16_t stream_no;
373 	int end = 0;
374 	int cntDel;
375 	struct sctp_queued_to_read *control, *ctl, *nctl;
376 
377 	if (stcb == NULL)
378 		return;
379 
380 	cntDel = stream_no = 0;
381 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 		/* socket above is long gone or going.. */
385 abandon:
386 		asoc->fragmented_delivery_inprogress = 0;
387 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 			asoc->size_on_reasm_queue -= chk->send_size;
390 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 			/*
392 			 * Lose the data pointer, since its in the socket
393 			 * buffer
394 			 */
395 			if (chk->data) {
396 				sctp_m_freem(chk->data);
397 				chk->data = NULL;
398 			}
399 			/* Now free the address and data */
400 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 			/* sa_ignore FREED_MEMORY */
402 		}
403 		return;
404 	}
405 	SCTP_TCB_LOCK_ASSERT(stcb);
406 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 			/* Can't deliver more :< */
409 			return;
410 		}
411 		stream_no = chk->rec.data.stream_number;
412 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 		if (nxt_todel != chk->rec.data.stream_seq &&
414 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 			/*
416 			 * Not the next sequence to deliver in its stream OR
417 			 * unordered
418 			 */
419 			return;
420 		}
421 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422 
423 			control = sctp_build_readq_entry_chk(stcb, chk);
424 			if (control == NULL) {
425 				/* out of memory? */
426 				return;
427 			}
428 			/* save it off for our future deliveries */
429 			stcb->asoc.control_pdapi = control;
430 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 				end = 1;
432 			else
433 				end = 0;
434 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 			sctp_add_to_readq(stcb->sctp_ep,
436 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 			cntDel++;
439 		} else {
440 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 				end = 1;
442 			else
443 				end = 0;
444 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 			    stcb->asoc.control_pdapi,
447 			    chk->data, end, chk->rec.data.TSN_seq,
448 			    &stcb->sctp_socket->so_rcv)) {
449 				/*
450 				 * something is very wrong, either
451 				 * control_pdapi is NULL, or the tail_mbuf
452 				 * is corrupt, or there is a EOM already on
453 				 * the mbuf chain.
454 				 */
455 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 					goto abandon;
457 				} else {
458 #ifdef INVARIANTS
459 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 						panic("This should not happen control_pdapi NULL?");
461 					}
462 					/* if we did not panic, it was a EOM */
463 					panic("Bad chunking ??");
464 #else
465 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 					}
468 					SCTP_PRINTF("Bad chunking ??\n");
469 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470 
471 #endif
472 					goto abandon;
473 				}
474 			}
475 			cntDel++;
476 		}
477 		/* pull it we did it */
478 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 			asoc->fragmented_delivery_inprogress = 0;
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 				asoc->strmin[stream_no].last_sequence_delivered++;
483 			}
484 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 			}
487 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 			/*
489 			 * turn the flag back on since we just  delivered
490 			 * yet another one.
491 			 */
492 			asoc->fragmented_delivery_inprogress = 1;
493 		}
494 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498 
499 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 		asoc->size_on_reasm_queue -= chk->send_size;
501 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 		/* free up the chk */
503 		chk->data = NULL;
504 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505 
506 		if (asoc->fragmented_delivery_inprogress == 0) {
507 			/*
508 			 * Now lets see if we can deliver the next one on
509 			 * the stream
510 			 */
511 			struct sctp_stream_in *strm;
512 
513 			strm = &asoc->strmin[stream_no];
514 			nxt_todel = strm->last_sequence_delivered + 1;
515 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 				/* Deliver more if we can. */
517 				if (nxt_todel == ctl->sinfo_ssn) {
518 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 					asoc->size_on_all_streams -= ctl->length;
520 					sctp_ucount_decr(asoc->cnt_on_all_streams);
521 					strm->last_sequence_delivered++;
522 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 					sctp_add_to_readq(stcb->sctp_ep, stcb,
524 					    ctl,
525 					    &stcb->sctp_socket->so_rcv, 1,
526 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 				} else {
528 					break;
529 				}
530 				nxt_todel = strm->last_sequence_delivered + 1;
531 			}
532 			break;
533 		}
534 	}
535 }
536 
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 	/*
548 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 	 * all the data in one stream this could happen quite rapidly. One
550 	 * could use the TSN to keep track of things, but this scheme breaks
551 	 * down in the other type of stream useage that could occur. Send a
552 	 * single msg to stream 0, send 4Billion messages to stream 1, now
553 	 * send a message to stream 0. You have a situation where the TSN
554 	 * has wrapped but not in the stream. Is this worth worrying about
555 	 * or should we just change our queue sort at the bottom to be by
556 	 * TSN.
557 	 *
558 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 	 * assignment this could happen... and I don't see how this would be
561 	 * a violation. So for now I am undecided an will leave the sort by
562 	 * SSN alone. Maybe a hybred approach is the answer
563 	 *
564 	 */
565 	struct sctp_stream_in *strm;
566 	struct sctp_queued_to_read *at;
567 	int queue_needed;
568 	uint16_t nxt_todel;
569 	struct mbuf *op_err;
570 	char msg[SCTP_DIAG_INFO_LEN];
571 
572 	queue_needed = 1;
573 	asoc->size_on_all_streams += control->length;
574 	sctp_ucount_incr(asoc->cnt_on_all_streams);
575 	strm = &asoc->strmin[control->sinfo_stream];
576 	nxt_todel = strm->last_sequence_delivered + 1;
577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 	}
580 	SCTPDBG(SCTP_DEBUG_INDATA1,
581 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
582 	    (uint32_t) control->sinfo_stream,
583 	    (uint32_t) strm->last_sequence_delivered,
584 	    (uint32_t) nxt_todel);
585 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 		/* The incoming sseq is behind where we last delivered? */
587 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 		    control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590 		/*
591 		 * throw it in the stream so it gets cleaned up in
592 		 * association destruction
593 		 */
594 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 		    strm->last_sequence_delivered, control->sinfo_tsn,
597 		    control->sinfo_stream, control->sinfo_ssn);
598 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601 		*abort_flag = 1;
602 		return;
603 
604 	}
605 	if (nxt_todel == control->sinfo_ssn) {
606 		/* can be delivered right away? */
607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
608 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
609 		}
610 		/* EY it wont be queued if it could be delivered directly */
611 		queue_needed = 0;
612 		asoc->size_on_all_streams -= control->length;
613 		sctp_ucount_decr(asoc->cnt_on_all_streams);
614 		strm->last_sequence_delivered++;
615 
616 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
617 		sctp_add_to_readq(stcb->sctp_ep, stcb,
618 		    control,
619 		    &stcb->sctp_socket->so_rcv, 1,
620 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
621 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
622 			/* all delivered */
623 			nxt_todel = strm->last_sequence_delivered + 1;
624 			if (nxt_todel == control->sinfo_ssn) {
625 				TAILQ_REMOVE(&strm->inqueue, control, next);
626 				asoc->size_on_all_streams -= control->length;
627 				sctp_ucount_decr(asoc->cnt_on_all_streams);
628 				strm->last_sequence_delivered++;
629 				/*
630 				 * We ignore the return of deliver_data here
631 				 * since we always can hold the chunk on the
632 				 * d-queue. And we have a finite number that
633 				 * can be delivered from the strq.
634 				 */
635 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 					sctp_log_strm_del(control, NULL,
637 					    SCTP_STR_LOG_FROM_IMMED_DEL);
638 				}
639 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 				sctp_add_to_readq(stcb->sctp_ep, stcb,
641 				    control,
642 				    &stcb->sctp_socket->so_rcv, 1,
643 				    SCTP_READ_LOCK_NOT_HELD,
644 				    SCTP_SO_NOT_LOCKED);
645 				continue;
646 			}
647 			break;
648 		}
649 	}
650 	if (queue_needed) {
651 		/*
652 		 * Ok, we did not deliver this guy, find the correct place
653 		 * to put it on the queue.
654 		 */
655 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
656 			goto protocol_error;
657 		}
658 		if (TAILQ_EMPTY(&strm->inqueue)) {
659 			/* Empty queue */
660 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
661 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
662 			}
663 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
664 		} else {
665 			TAILQ_FOREACH(at, &strm->inqueue, next) {
666 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
667 					/*
668 					 * one in queue is bigger than the
669 					 * new one, insert before this one
670 					 */
671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 						sctp_log_strm_del(control, at,
673 						    SCTP_STR_LOG_FROM_INSERT_MD);
674 					}
675 					TAILQ_INSERT_BEFORE(at, control, next);
676 					break;
677 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
678 					/*
679 					 * Gak, He sent me a duplicate str
680 					 * seq number
681 					 */
682 					/*
683 					 * foo bar, I guess I will just free
684 					 * this new guy, should we abort
685 					 * too? FIX ME MAYBE? Or it COULD be
686 					 * that the SSN's have wrapped.
687 					 * Maybe I should compare to TSN
688 					 * somehow... sigh for now just blow
689 					 * away the chunk!
690 					 */
691 
692 					if (control->data)
693 						sctp_m_freem(control->data);
694 					control->data = NULL;
695 					asoc->size_on_all_streams -= control->length;
696 					sctp_ucount_decr(asoc->cnt_on_all_streams);
697 					if (control->whoFrom) {
698 						sctp_free_remote_addr(control->whoFrom);
699 						control->whoFrom = NULL;
700 					}
701 					sctp_free_a_readq(stcb, control);
702 					return;
703 				} else {
704 					if (TAILQ_NEXT(at, next) == NULL) {
705 						/*
706 						 * We are at the end, insert
707 						 * it after this one
708 						 */
709 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
710 							sctp_log_strm_del(control, at,
711 							    SCTP_STR_LOG_FROM_INSERT_TL);
712 						}
713 						TAILQ_INSERT_AFTER(&strm->inqueue,
714 						    at, control, next);
715 						break;
716 					}
717 				}
718 			}
719 		}
720 	}
721 }
722 
723 /*
724  * Returns two things: You get the total size of the deliverable parts of the
725  * first fragmented message on the reassembly queue. And you get a 1 back if
726  * all of the message is ready or a 0 back if the message is still incomplete
727  */
728 static int
729 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
730 {
731 	struct sctp_tmit_chunk *chk;
732 	uint32_t tsn;
733 
734 	*t_size = 0;
735 	chk = TAILQ_FIRST(&asoc->reasmqueue);
736 	if (chk == NULL) {
737 		/* nothing on the queue */
738 		return (0);
739 	}
740 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
741 		/* Not a first on the queue */
742 		return (0);
743 	}
744 	tsn = chk->rec.data.TSN_seq;
745 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
746 		if (tsn != chk->rec.data.TSN_seq) {
747 			return (0);
748 		}
749 		*t_size += chk->send_size;
750 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
751 			return (1);
752 		}
753 		tsn++;
754 	}
755 	return (0);
756 }
757 
758 static void
759 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
760 {
761 	struct sctp_tmit_chunk *chk;
762 	uint16_t nxt_todel;
763 	uint32_t tsize, pd_point;
764 
765 doit_again:
766 	chk = TAILQ_FIRST(&asoc->reasmqueue);
767 	if (chk == NULL) {
768 		/* Huh? */
769 		asoc->size_on_reasm_queue = 0;
770 		asoc->cnt_on_reasm_queue = 0;
771 		return;
772 	}
773 	if (asoc->fragmented_delivery_inprogress == 0) {
774 		nxt_todel =
775 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
776 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
777 		    (nxt_todel == chk->rec.data.stream_seq ||
778 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
779 			/*
780 			 * Yep the first one is here and its ok to deliver
781 			 * but should we?
782 			 */
783 			if (stcb->sctp_socket) {
784 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
785 				    stcb->sctp_ep->partial_delivery_point);
786 			} else {
787 				pd_point = stcb->sctp_ep->partial_delivery_point;
788 			}
789 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
790 				/*
791 				 * Yes, we setup to start reception, by
792 				 * backing down the TSN just in case we
793 				 * can't deliver. If we
794 				 */
795 				asoc->fragmented_delivery_inprogress = 1;
796 				asoc->tsn_last_delivered =
797 				    chk->rec.data.TSN_seq - 1;
798 				asoc->str_of_pdapi =
799 				    chk->rec.data.stream_number;
800 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
801 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
802 				asoc->fragment_flags = chk->rec.data.rcv_flags;
803 				sctp_service_reassembly(stcb, asoc);
804 			}
805 		}
806 	} else {
807 		/*
808 		 * Service re-assembly will deliver stream data queued at
809 		 * the end of fragmented delivery.. but it wont know to go
810 		 * back and call itself again... we do that here with the
811 		 * got doit_again
812 		 */
813 		sctp_service_reassembly(stcb, asoc);
814 		if (asoc->fragmented_delivery_inprogress == 0) {
815 			/*
816 			 * finished our Fragmented delivery, could be more
817 			 * waiting?
818 			 */
819 			goto doit_again;
820 		}
821 	}
822 }
823 
824 /*
825  * Dump onto the re-assembly queue, in its proper place. After dumping on the
826  * queue, see if anthing can be delivered. If so pull it off (or as much as
827  * we can. If we run out of space then we must dump what we can and set the
828  * appropriate flag to say we queued what we could.
829  */
830 static void
831 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
832     struct sctp_tmit_chunk *chk, int *abort_flag)
833 {
834 	struct mbuf *op_err;
835 	char msg[SCTP_DIAG_INFO_LEN];
836 	uint32_t cum_ackp1, prev_tsn, post_tsn;
837 	struct sctp_tmit_chunk *at, *prev, *next;
838 
839 	prev = next = NULL;
840 	cum_ackp1 = asoc->tsn_last_delivered + 1;
841 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
842 		/* This is the first one on the queue */
843 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
844 		/*
845 		 * we do not check for delivery of anything when only one
846 		 * fragment is here
847 		 */
848 		asoc->size_on_reasm_queue = chk->send_size;
849 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
850 		if (chk->rec.data.TSN_seq == cum_ackp1) {
851 			if (asoc->fragmented_delivery_inprogress == 0 &&
852 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
853 			    SCTP_DATA_FIRST_FRAG) {
854 				/*
855 				 * An empty queue, no delivery inprogress,
856 				 * we hit the next one and it does NOT have
857 				 * a FIRST fragment mark.
858 				 */
859 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
860 				snprintf(msg, sizeof(msg),
861 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
862 				    chk->rec.data.TSN_seq,
863 				    chk->rec.data.stream_number,
864 				    chk->rec.data.stream_seq);
865 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
866 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
867 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
868 				*abort_flag = 1;
869 			} else if (asoc->fragmented_delivery_inprogress &&
870 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
871 				/*
872 				 * We are doing a partial delivery and the
873 				 * NEXT chunk MUST be either the LAST or
874 				 * MIDDLE fragment NOT a FIRST
875 				 */
876 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
877 				snprintf(msg, sizeof(msg),
878 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
879 				    chk->rec.data.TSN_seq,
880 				    chk->rec.data.stream_number,
881 				    chk->rec.data.stream_seq);
882 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
883 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
884 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
885 				*abort_flag = 1;
886 			} else if (asoc->fragmented_delivery_inprogress) {
887 				/*
888 				 * Here we are ok with a MIDDLE or LAST
889 				 * piece
890 				 */
891 				if (chk->rec.data.stream_number !=
892 				    asoc->str_of_pdapi) {
893 					/* Got to be the right STR No */
894 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
895 					    chk->rec.data.stream_number,
896 					    asoc->str_of_pdapi);
897 					snprintf(msg, sizeof(msg),
898 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
899 					    asoc->str_of_pdapi,
900 					    chk->rec.data.TSN_seq,
901 					    chk->rec.data.stream_number,
902 					    chk->rec.data.stream_seq);
903 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
904 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
905 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
906 					*abort_flag = 1;
907 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
908 					    SCTP_DATA_UNORDERED &&
909 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
910 					/* Got to be the right STR Seq */
911 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
912 					    chk->rec.data.stream_seq,
913 					    asoc->ssn_of_pdapi);
914 					snprintf(msg, sizeof(msg),
915 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
916 					    asoc->ssn_of_pdapi,
917 					    chk->rec.data.TSN_seq,
918 					    chk->rec.data.stream_number,
919 					    chk->rec.data.stream_seq);
920 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
921 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
922 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
923 					*abort_flag = 1;
924 				}
925 			}
926 		}
927 		return;
928 	}
929 	/* Find its place */
930 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
931 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
932 			/*
933 			 * one in queue is bigger than the new one, insert
934 			 * before this one
935 			 */
936 			/* A check */
937 			asoc->size_on_reasm_queue += chk->send_size;
938 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
939 			next = at;
940 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
941 			break;
942 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
943 			/* Gak, He sent me a duplicate str seq number */
944 			/*
945 			 * foo bar, I guess I will just free this new guy,
946 			 * should we abort too? FIX ME MAYBE? Or it COULD be
947 			 * that the SSN's have wrapped. Maybe I should
948 			 * compare to TSN somehow... sigh for now just blow
949 			 * away the chunk!
950 			 */
951 			if (chk->data) {
952 				sctp_m_freem(chk->data);
953 				chk->data = NULL;
954 			}
955 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
956 			return;
957 		} else {
958 			prev = at;
959 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
960 				/*
961 				 * We are at the end, insert it after this
962 				 * one
963 				 */
964 				/* check it first */
965 				asoc->size_on_reasm_queue += chk->send_size;
966 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
967 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
968 				break;
969 			}
970 		}
971 	}
972 	/* Now the audits */
973 	if (prev) {
974 		prev_tsn = chk->rec.data.TSN_seq - 1;
975 		if (prev_tsn == prev->rec.data.TSN_seq) {
976 			/*
977 			 * Ok the one I am dropping onto the end is the
978 			 * NEXT. A bit of valdiation here.
979 			 */
980 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
981 			    SCTP_DATA_FIRST_FRAG ||
982 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
983 			    SCTP_DATA_MIDDLE_FRAG) {
984 				/*
985 				 * Insert chk MUST be a MIDDLE or LAST
986 				 * fragment
987 				 */
988 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
989 				    SCTP_DATA_FIRST_FRAG) {
990 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
991 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
992 					snprintf(msg, sizeof(msg),
993 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
994 					    chk->rec.data.TSN_seq,
995 					    chk->rec.data.stream_number,
996 					    chk->rec.data.stream_seq);
997 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
999 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1000 					*abort_flag = 1;
1001 					return;
1002 				}
1003 				if (chk->rec.data.stream_number !=
1004 				    prev->rec.data.stream_number) {
1005 					/*
1006 					 * Huh, need the correct STR here,
1007 					 * they must be the same.
1008 					 */
1009 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1010 					    chk->rec.data.stream_number,
1011 					    prev->rec.data.stream_number);
1012 					snprintf(msg, sizeof(msg),
1013 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1014 					    prev->rec.data.stream_number,
1015 					    chk->rec.data.TSN_seq,
1016 					    chk->rec.data.stream_number,
1017 					    chk->rec.data.stream_seq);
1018 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1019 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1020 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1021 					*abort_flag = 1;
1022 					return;
1023 				}
1024 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1025 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1026 					/*
1027 					 * Huh, need the same ordering here,
1028 					 * they must be the same.
1029 					 */
1030 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1031 					snprintf(msg, sizeof(msg),
1032 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1033 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1034 					    chk->rec.data.TSN_seq,
1035 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1036 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1038 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1039 					*abort_flag = 1;
1040 					return;
1041 				}
1042 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1043 				    chk->rec.data.stream_seq !=
1044 				    prev->rec.data.stream_seq) {
1045 					/*
1046 					 * Huh, need the correct STR here,
1047 					 * they must be the same.
1048 					 */
1049 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1050 					    chk->rec.data.stream_seq,
1051 					    prev->rec.data.stream_seq);
1052 					snprintf(msg, sizeof(msg),
1053 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1054 					    prev->rec.data.stream_seq,
1055 					    chk->rec.data.TSN_seq,
1056 					    chk->rec.data.stream_number,
1057 					    chk->rec.data.stream_seq);
1058 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1059 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1060 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1061 					*abort_flag = 1;
1062 					return;
1063 				}
1064 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 			    SCTP_DATA_LAST_FRAG) {
1066 				/* Insert chk MUST be a FIRST */
1067 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1068 				    SCTP_DATA_FIRST_FRAG) {
1069 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1070 					snprintf(msg, sizeof(msg),
1071 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1072 					    chk->rec.data.TSN_seq,
1073 					    chk->rec.data.stream_number,
1074 					    chk->rec.data.stream_seq);
1075 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1076 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1077 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1078 					*abort_flag = 1;
1079 					return;
1080 				}
1081 			}
1082 		}
1083 	}
1084 	if (next) {
1085 		post_tsn = chk->rec.data.TSN_seq + 1;
1086 		if (post_tsn == next->rec.data.TSN_seq) {
1087 			/*
1088 			 * Ok the one I am inserting ahead of is my NEXT
1089 			 * one. A bit of valdiation here.
1090 			 */
1091 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1092 				/* Insert chk MUST be a last fragment */
1093 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1094 				    != SCTP_DATA_LAST_FRAG) {
1095 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1096 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1097 					snprintf(msg, sizeof(msg),
1098 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1099 					    chk->rec.data.TSN_seq,
1100 					    chk->rec.data.stream_number,
1101 					    chk->rec.data.stream_seq);
1102 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1103 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1104 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1105 					*abort_flag = 1;
1106 					return;
1107 				}
1108 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1109 				    SCTP_DATA_MIDDLE_FRAG ||
1110 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1111 			    SCTP_DATA_LAST_FRAG) {
1112 				/*
1113 				 * Insert chk CAN be MIDDLE or FIRST NOT
1114 				 * LAST
1115 				 */
1116 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1117 				    SCTP_DATA_LAST_FRAG) {
1118 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1119 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1120 					snprintf(msg, sizeof(msg),
1121 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 					    chk->rec.data.TSN_seq,
1123 					    chk->rec.data.stream_number,
1124 					    chk->rec.data.stream_seq);
1125 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1127 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128 					*abort_flag = 1;
1129 					return;
1130 				}
1131 				if (chk->rec.data.stream_number !=
1132 				    next->rec.data.stream_number) {
1133 					/*
1134 					 * Huh, need the correct STR here,
1135 					 * they must be the same.
1136 					 */
1137 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1138 					    chk->rec.data.stream_number,
1139 					    next->rec.data.stream_number);
1140 					snprintf(msg, sizeof(msg),
1141 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142 					    next->rec.data.stream_number,
1143 					    chk->rec.data.TSN_seq,
1144 					    chk->rec.data.stream_number,
1145 					    chk->rec.data.stream_seq);
1146 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1148 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1149 					*abort_flag = 1;
1150 					return;
1151 				}
1152 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1153 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1154 					/*
1155 					 * Huh, need the same ordering here,
1156 					 * they must be the same.
1157 					 */
1158 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1159 					snprintf(msg, sizeof(msg),
1160 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1161 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1162 					    chk->rec.data.TSN_seq,
1163 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1164 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1165 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1166 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1167 					*abort_flag = 1;
1168 					return;
1169 				}
1170 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1171 				    chk->rec.data.stream_seq !=
1172 				    next->rec.data.stream_seq) {
1173 					/*
1174 					 * Huh, need the correct STR here,
1175 					 * they must be the same.
1176 					 */
1177 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1178 					    chk->rec.data.stream_seq,
1179 					    next->rec.data.stream_seq);
1180 					snprintf(msg, sizeof(msg),
1181 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1182 					    next->rec.data.stream_seq,
1183 					    chk->rec.data.TSN_seq,
1184 					    chk->rec.data.stream_number,
1185 					    chk->rec.data.stream_seq);
1186 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1187 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1188 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1189 					*abort_flag = 1;
1190 					return;
1191 				}
1192 			}
1193 		}
1194 	}
1195 	/* Do we need to do some delivery? check */
1196 	sctp_deliver_reasm_check(stcb, asoc);
1197 }
1198 
1199 /*
1200  * This is an unfortunate routine. It checks to make sure a evil guy is not
1201  * stuffing us full of bad packet fragments. A broken peer could also do this
1202  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1203  * :< more cycles.
1204  */
1205 static int
1206 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1207     uint32_t TSN_seq)
1208 {
1209 	struct sctp_tmit_chunk *at;
1210 	uint32_t tsn_est;
1211 
1212 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1213 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1214 			/* is it one bigger? */
1215 			tsn_est = at->rec.data.TSN_seq + 1;
1216 			if (tsn_est == TSN_seq) {
1217 				/* yep. It better be a last then */
1218 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1219 				    SCTP_DATA_LAST_FRAG) {
1220 					/*
1221 					 * Ok this guy belongs next to a guy
1222 					 * that is NOT last, it should be a
1223 					 * middle/last, not a complete
1224 					 * chunk.
1225 					 */
1226 					return (1);
1227 				} else {
1228 					/*
1229 					 * This guy is ok since its a LAST
1230 					 * and the new chunk is a fully
1231 					 * self- contained one.
1232 					 */
1233 					return (0);
1234 				}
1235 			}
1236 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1237 			/* Software error since I have a dup? */
1238 			return (1);
1239 		} else {
1240 			/*
1241 			 * Ok, 'at' is larger than new chunk but does it
1242 			 * need to be right before it.
1243 			 */
1244 			tsn_est = TSN_seq + 1;
1245 			if (tsn_est == at->rec.data.TSN_seq) {
1246 				/* Yep, It better be a first */
1247 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1248 				    SCTP_DATA_FIRST_FRAG) {
1249 					return (1);
1250 				} else {
1251 					return (0);
1252 				}
1253 			}
1254 		}
1255 	}
1256 	return (0);
1257 }
1258 
1259 static int
1260 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1261     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1262     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1263     int *break_flag, int last_chunk)
1264 {
1265 	/* Process a data chunk */
1266 	/* struct sctp_tmit_chunk *chk; */
1267 	struct sctp_tmit_chunk *chk;
1268 	uint32_t tsn, gap;
1269 	struct mbuf *dmbuf;
1270 	int the_len;
1271 	int need_reasm_check = 0;
1272 	uint16_t strmno, strmseq;
1273 	struct mbuf *op_err;
1274 	char msg[SCTP_DIAG_INFO_LEN];
1275 	struct sctp_queued_to_read *control;
1276 	int ordered;
1277 	uint32_t protocol_id;
1278 	uint8_t chunk_flags;
1279 	struct sctp_stream_reset_list *liste;
1280 
1281 	chk = NULL;
1282 	tsn = ntohl(ch->dp.tsn);
1283 	chunk_flags = ch->ch.chunk_flags;
1284 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1285 		asoc->send_sack = 1;
1286 	}
1287 	protocol_id = ch->dp.protocol_id;
1288 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1289 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1290 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1291 	}
1292 	if (stcb == NULL) {
1293 		return (0);
1294 	}
1295 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1296 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1297 		/* It is a duplicate */
1298 		SCTP_STAT_INCR(sctps_recvdupdata);
1299 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1300 			/* Record a dup for the next outbound sack */
1301 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1302 			asoc->numduptsns++;
1303 		}
1304 		asoc->send_sack = 1;
1305 		return (0);
1306 	}
1307 	/* Calculate the number of TSN's between the base and this TSN */
1308 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1309 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1310 		/* Can't hold the bit in the mapping at max array, toss it */
1311 		return (0);
1312 	}
1313 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1314 		SCTP_TCB_LOCK_ASSERT(stcb);
1315 		if (sctp_expand_mapping_array(asoc, gap)) {
1316 			/* Can't expand, drop it */
1317 			return (0);
1318 		}
1319 	}
1320 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1321 		*high_tsn = tsn;
1322 	}
1323 	/* See if we have received this one already */
1324 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1325 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1326 		SCTP_STAT_INCR(sctps_recvdupdata);
1327 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1328 			/* Record a dup for the next outbound sack */
1329 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1330 			asoc->numduptsns++;
1331 		}
1332 		asoc->send_sack = 1;
1333 		return (0);
1334 	}
1335 	/*
1336 	 * Check to see about the GONE flag, duplicates would cause a sack
1337 	 * to be sent up above
1338 	 */
1339 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1340 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1341 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1342 		/*
1343 		 * wait a minute, this guy is gone, there is no longer a
1344 		 * receiver. Send peer an ABORT!
1345 		 */
1346 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1347 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1348 		*abort_flag = 1;
1349 		return (0);
1350 	}
1351 	/*
1352 	 * Now before going further we see if there is room. If NOT then we
1353 	 * MAY let one through only IF this TSN is the one we are waiting
1354 	 * for on a partial delivery API.
1355 	 */
1356 
1357 	/* now do the tests */
1358 	if (((asoc->cnt_on_all_streams +
1359 	    asoc->cnt_on_reasm_queue +
1360 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1361 	    (((int)asoc->my_rwnd) <= 0)) {
1362 		/*
1363 		 * When we have NO room in the rwnd we check to make sure
1364 		 * the reader is doing its job...
1365 		 */
1366 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1367 			/* some to read, wake-up */
1368 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1369 			struct socket *so;
1370 
1371 			so = SCTP_INP_SO(stcb->sctp_ep);
1372 			atomic_add_int(&stcb->asoc.refcnt, 1);
1373 			SCTP_TCB_UNLOCK(stcb);
1374 			SCTP_SOCKET_LOCK(so, 1);
1375 			SCTP_TCB_LOCK(stcb);
1376 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1377 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1378 				/* assoc was freed while we were unlocked */
1379 				SCTP_SOCKET_UNLOCK(so, 1);
1380 				return (0);
1381 			}
1382 #endif
1383 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1384 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1385 			SCTP_SOCKET_UNLOCK(so, 1);
1386 #endif
1387 		}
1388 		/* now is it in the mapping array of what we have accepted? */
1389 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1390 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1391 			/* Nope not in the valid range dump it */
1392 			sctp_set_rwnd(stcb, asoc);
1393 			if ((asoc->cnt_on_all_streams +
1394 			    asoc->cnt_on_reasm_queue +
1395 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1396 				SCTP_STAT_INCR(sctps_datadropchklmt);
1397 			} else {
1398 				SCTP_STAT_INCR(sctps_datadroprwnd);
1399 			}
1400 			*break_flag = 1;
1401 			return (0);
1402 		}
1403 	}
1404 	strmno = ntohs(ch->dp.stream_id);
1405 	if (strmno >= asoc->streamincnt) {
1406 		struct sctp_paramhdr *phdr;
1407 		struct mbuf *mb;
1408 
1409 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1410 		    0, M_NOWAIT, 1, MT_DATA);
1411 		if (mb != NULL) {
1412 			/* add some space up front so prepend will work well */
1413 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1414 			phdr = mtod(mb, struct sctp_paramhdr *);
1415 			/*
1416 			 * Error causes are just param's and this one has
1417 			 * two back to back phdr, one with the error type
1418 			 * and size, the other with the streamid and a rsvd
1419 			 */
1420 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1421 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1422 			phdr->param_length =
1423 			    htons(sizeof(struct sctp_paramhdr) * 2);
1424 			phdr++;
1425 			/* We insert the stream in the type field */
1426 			phdr->param_type = ch->dp.stream_id;
1427 			/* And set the length to 0 for the rsvd field */
1428 			phdr->param_length = 0;
1429 			sctp_queue_op_err(stcb, mb);
1430 		}
1431 		SCTP_STAT_INCR(sctps_badsid);
1432 		SCTP_TCB_LOCK_ASSERT(stcb);
1433 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1434 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1435 			asoc->highest_tsn_inside_nr_map = tsn;
1436 		}
1437 		if (tsn == (asoc->cumulative_tsn + 1)) {
1438 			/* Update cum-ack */
1439 			asoc->cumulative_tsn = tsn;
1440 		}
1441 		return (0);
1442 	}
1443 	/*
1444 	 * Before we continue lets validate that we are not being fooled by
1445 	 * an evil attacker. We can only have 4k chunks based on our TSN
1446 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1447 	 * way our stream sequence numbers could have wrapped. We of course
1448 	 * only validate the FIRST fragment so the bit must be set.
1449 	 */
1450 	strmseq = ntohs(ch->dp.stream_sequence);
1451 #ifdef SCTP_ASOCLOG_OF_TSNS
1452 	SCTP_TCB_LOCK_ASSERT(stcb);
1453 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1454 		asoc->tsn_in_at = 0;
1455 		asoc->tsn_in_wrapped = 1;
1456 	}
1457 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1458 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1459 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1460 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1461 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1462 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1463 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1464 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1465 	asoc->tsn_in_at++;
1466 #endif
1467 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1468 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1469 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1470 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1471 		/* The incoming sseq is behind where we last delivered? */
1472 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1473 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1474 
1475 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1476 		    asoc->strmin[strmno].last_sequence_delivered,
1477 		    tsn, strmno, strmseq);
1478 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1479 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1480 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1481 		*abort_flag = 1;
1482 		return (0);
1483 	}
1484 	/************************************
1485 	 * From here down we may find ch-> invalid
1486 	 * so its a good idea NOT to use it.
1487 	 *************************************/
1488 
1489 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1490 	if (last_chunk == 0) {
1491 		dmbuf = SCTP_M_COPYM(*m,
1492 		    (offset + sizeof(struct sctp_data_chunk)),
1493 		    the_len, M_NOWAIT);
1494 #ifdef SCTP_MBUF_LOGGING
1495 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1496 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1497 		}
1498 #endif
1499 	} else {
1500 		/* We can steal the last chunk */
1501 		int l_len;
1502 
1503 		dmbuf = *m;
1504 		/* lop off the top part */
1505 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1506 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1507 			l_len = SCTP_BUF_LEN(dmbuf);
1508 		} else {
1509 			/*
1510 			 * need to count up the size hopefully does not hit
1511 			 * this to often :-0
1512 			 */
1513 			struct mbuf *lat;
1514 
1515 			l_len = 0;
1516 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1517 				l_len += SCTP_BUF_LEN(lat);
1518 			}
1519 		}
1520 		if (l_len > the_len) {
1521 			/* Trim the end round bytes off  too */
1522 			m_adj(dmbuf, -(l_len - the_len));
1523 		}
1524 	}
1525 	if (dmbuf == NULL) {
1526 		SCTP_STAT_INCR(sctps_nomem);
1527 		return (0);
1528 	}
1529 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1530 	    asoc->fragmented_delivery_inprogress == 0 &&
1531 	    TAILQ_EMPTY(&asoc->resetHead) &&
1532 	    ((ordered == 0) ||
1533 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1534 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1535 		/* Candidate for express delivery */
1536 		/*
1537 		 * Its not fragmented, No PD-API is up, Nothing in the
1538 		 * delivery queue, Its un-ordered OR ordered and the next to
1539 		 * deliver AND nothing else is stuck on the stream queue,
1540 		 * And there is room for it in the socket buffer. Lets just
1541 		 * stuff it up the buffer....
1542 		 */
1543 
1544 		/* It would be nice to avoid this copy if we could :< */
1545 		sctp_alloc_a_readq(stcb, control);
1546 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1547 		    protocol_id,
1548 		    strmno, strmseq,
1549 		    chunk_flags,
1550 		    dmbuf);
1551 		if (control == NULL) {
1552 			goto failed_express_del;
1553 		}
1554 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1555 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1556 			asoc->highest_tsn_inside_nr_map = tsn;
1557 		}
1558 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1559 		    control, &stcb->sctp_socket->so_rcv,
1560 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1561 
1562 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1563 			/* for ordered, bump what we delivered */
1564 			asoc->strmin[strmno].last_sequence_delivered++;
1565 		}
1566 		SCTP_STAT_INCR(sctps_recvexpress);
1567 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1568 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1569 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1570 		}
1571 		control = NULL;
1572 
1573 		goto finish_express_del;
1574 	}
1575 failed_express_del:
1576 	/* If we reach here this is a new chunk */
1577 	chk = NULL;
1578 	control = NULL;
1579 	/* Express for fragmented delivery? */
1580 	if ((asoc->fragmented_delivery_inprogress) &&
1581 	    (stcb->asoc.control_pdapi) &&
1582 	    (asoc->str_of_pdapi == strmno) &&
1583 	    (asoc->ssn_of_pdapi == strmseq)
1584 	    ) {
1585 		control = stcb->asoc.control_pdapi;
1586 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1587 			/* Can't be another first? */
1588 			goto failed_pdapi_express_del;
1589 		}
1590 		if (tsn == (control->sinfo_tsn + 1)) {
1591 			/* Yep, we can add it on */
1592 			int end = 0;
1593 
1594 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1595 				end = 1;
1596 			}
1597 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1598 			    tsn,
1599 			    &stcb->sctp_socket->so_rcv)) {
1600 				SCTP_PRINTF("Append fails end:%d\n", end);
1601 				goto failed_pdapi_express_del;
1602 			}
1603 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605 				asoc->highest_tsn_inside_nr_map = tsn;
1606 			}
1607 			SCTP_STAT_INCR(sctps_recvexpressm);
1608 			asoc->tsn_last_delivered = tsn;
1609 			asoc->fragment_flags = chunk_flags;
1610 			asoc->tsn_of_pdapi_last_delivered = tsn;
1611 			asoc->last_flags_delivered = chunk_flags;
1612 			asoc->last_strm_seq_delivered = strmseq;
1613 			asoc->last_strm_no_delivered = strmno;
1614 			if (end) {
1615 				/* clean up the flags and such */
1616 				asoc->fragmented_delivery_inprogress = 0;
1617 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1618 					asoc->strmin[strmno].last_sequence_delivered++;
1619 				}
1620 				stcb->asoc.control_pdapi = NULL;
1621 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1622 					/*
1623 					 * There could be another message
1624 					 * ready
1625 					 */
1626 					need_reasm_check = 1;
1627 				}
1628 			}
1629 			control = NULL;
1630 			goto finish_express_del;
1631 		}
1632 	}
1633 failed_pdapi_express_del:
1634 	control = NULL;
1635 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1636 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1637 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1638 			asoc->highest_tsn_inside_nr_map = tsn;
1639 		}
1640 	} else {
1641 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1642 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1643 			asoc->highest_tsn_inside_map = tsn;
1644 		}
1645 	}
1646 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1647 		sctp_alloc_a_chunk(stcb, chk);
1648 		if (chk == NULL) {
1649 			/* No memory so we drop the chunk */
1650 			SCTP_STAT_INCR(sctps_nomem);
1651 			if (last_chunk == 0) {
1652 				/* we copied it, free the copy */
1653 				sctp_m_freem(dmbuf);
1654 			}
1655 			return (0);
1656 		}
1657 		chk->rec.data.TSN_seq = tsn;
1658 		chk->no_fr_allowed = 0;
1659 		chk->rec.data.stream_seq = strmseq;
1660 		chk->rec.data.stream_number = strmno;
1661 		chk->rec.data.payloadtype = protocol_id;
1662 		chk->rec.data.context = stcb->asoc.context;
1663 		chk->rec.data.doing_fast_retransmit = 0;
1664 		chk->rec.data.rcv_flags = chunk_flags;
1665 		chk->asoc = asoc;
1666 		chk->send_size = the_len;
1667 		chk->whoTo = net;
1668 		atomic_add_int(&net->ref_count, 1);
1669 		chk->data = dmbuf;
1670 	} else {
1671 		sctp_alloc_a_readq(stcb, control);
1672 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1673 		    protocol_id,
1674 		    strmno, strmseq,
1675 		    chunk_flags,
1676 		    dmbuf);
1677 		if (control == NULL) {
1678 			/* No memory so we drop the chunk */
1679 			SCTP_STAT_INCR(sctps_nomem);
1680 			if (last_chunk == 0) {
1681 				/* we copied it, free the copy */
1682 				sctp_m_freem(dmbuf);
1683 			}
1684 			return (0);
1685 		}
1686 		control->length = the_len;
1687 	}
1688 
1689 	/* Mark it as received */
1690 	/* Now queue it where it belongs */
1691 	if (control != NULL) {
1692 		/* First a sanity check */
1693 		if (asoc->fragmented_delivery_inprogress) {
1694 			/*
1695 			 * Ok, we have a fragmented delivery in progress if
1696 			 * this chunk is next to deliver OR belongs in our
1697 			 * view to the reassembly, the peer is evil or
1698 			 * broken.
1699 			 */
1700 			uint32_t estimate_tsn;
1701 
1702 			estimate_tsn = asoc->tsn_last_delivered + 1;
1703 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1704 			    (estimate_tsn == control->sinfo_tsn)) {
1705 				/* Evil/Broke peer */
1706 				sctp_m_freem(control->data);
1707 				control->data = NULL;
1708 				if (control->whoFrom) {
1709 					sctp_free_remote_addr(control->whoFrom);
1710 					control->whoFrom = NULL;
1711 				}
1712 				sctp_free_a_readq(stcb, control);
1713 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1714 				    tsn, strmno, strmseq);
1715 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1716 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1717 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1718 				*abort_flag = 1;
1719 				if (last_chunk) {
1720 					*m = NULL;
1721 				}
1722 				return (0);
1723 			} else {
1724 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1725 					sctp_m_freem(control->data);
1726 					control->data = NULL;
1727 					if (control->whoFrom) {
1728 						sctp_free_remote_addr(control->whoFrom);
1729 						control->whoFrom = NULL;
1730 					}
1731 					sctp_free_a_readq(stcb, control);
1732 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1733 					    tsn, strmno, strmseq);
1734 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1735 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1736 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1737 					*abort_flag = 1;
1738 					if (last_chunk) {
1739 						*m = NULL;
1740 					}
1741 					return (0);
1742 				}
1743 			}
1744 		} else {
1745 			/* No PDAPI running */
1746 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1747 				/*
1748 				 * Reassembly queue is NOT empty validate
1749 				 * that this tsn does not need to be in
1750 				 * reasembly queue. If it does then our peer
1751 				 * is broken or evil.
1752 				 */
1753 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1754 					sctp_m_freem(control->data);
1755 					control->data = NULL;
1756 					if (control->whoFrom) {
1757 						sctp_free_remote_addr(control->whoFrom);
1758 						control->whoFrom = NULL;
1759 					}
1760 					sctp_free_a_readq(stcb, control);
1761 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1762 					    tsn, strmno, strmseq);
1763 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1765 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1766 					*abort_flag = 1;
1767 					if (last_chunk) {
1768 						*m = NULL;
1769 					}
1770 					return (0);
1771 				}
1772 			}
1773 		}
1774 		/* ok, if we reach here we have passed the sanity checks */
1775 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1776 			/* queue directly into socket buffer */
1777 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1778 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1779 			    control,
1780 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1781 		} else {
1782 			/*
1783 			 * Special check for when streams are resetting. We
1784 			 * could be more smart about this and check the
1785 			 * actual stream to see if it is not being reset..
1786 			 * that way we would not create a HOLB when amongst
1787 			 * streams being reset and those not being reset.
1788 			 *
1789 			 * We take complete messages that have a stream reset
1790 			 * intervening (aka the TSN is after where our
1791 			 * cum-ack needs to be) off and put them on a
1792 			 * pending_reply_queue. The reassembly ones we do
1793 			 * not have to worry about since they are all sorted
1794 			 * and proceessed by TSN order. It is only the
1795 			 * singletons I must worry about.
1796 			 */
1797 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1798 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1799 				/*
1800 				 * yep its past where we need to reset... go
1801 				 * ahead and queue it.
1802 				 */
1803 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1804 					/* first one on */
1805 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1806 				} else {
1807 					struct sctp_queued_to_read *ctlOn,
1808 					                   *nctlOn;
1809 					unsigned char inserted = 0;
1810 
1811 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1812 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1813 							continue;
1814 						} else {
1815 							/* found it */
1816 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1817 							inserted = 1;
1818 							break;
1819 						}
1820 					}
1821 					if (inserted == 0) {
1822 						/*
1823 						 * must be put at end, use
1824 						 * prevP (all setup from
1825 						 * loop) to setup nextP.
1826 						 */
1827 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1828 					}
1829 				}
1830 			} else {
1831 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1832 				if (*abort_flag) {
1833 					if (last_chunk) {
1834 						*m = NULL;
1835 					}
1836 					return (0);
1837 				}
1838 			}
1839 		}
1840 	} else {
1841 		/* Into the re-assembly queue */
1842 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1843 		if (*abort_flag) {
1844 			/*
1845 			 * the assoc is now gone and chk was put onto the
1846 			 * reasm queue, which has all been freed.
1847 			 */
1848 			if (last_chunk) {
1849 				*m = NULL;
1850 			}
1851 			return (0);
1852 		}
1853 	}
1854 finish_express_del:
1855 	if (tsn == (asoc->cumulative_tsn + 1)) {
1856 		/* Update cum-ack */
1857 		asoc->cumulative_tsn = tsn;
1858 	}
1859 	if (last_chunk) {
1860 		*m = NULL;
1861 	}
1862 	if (ordered) {
1863 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1864 	} else {
1865 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1866 	}
1867 	SCTP_STAT_INCR(sctps_recvdata);
1868 	/* Set it present please */
1869 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1870 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1871 	}
1872 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1873 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1874 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1875 	}
1876 	/* check the special flag for stream resets */
1877 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1878 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1879 		/*
1880 		 * we have finished working through the backlogged TSN's now
1881 		 * time to reset streams. 1: call reset function. 2: free
1882 		 * pending_reply space 3: distribute any chunks in
1883 		 * pending_reply_queue.
1884 		 */
1885 		struct sctp_queued_to_read *ctl, *nctl;
1886 
1887 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1888 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1889 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
1890 		SCTP_FREE(liste, SCTP_M_STRESET);
1891 		/* sa_ignore FREED_MEMORY */
1892 		liste = TAILQ_FIRST(&asoc->resetHead);
1893 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1894 			/* All can be removed */
1895 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1896 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1897 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1898 				if (*abort_flag) {
1899 					return (0);
1900 				}
1901 			}
1902 		} else {
1903 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1904 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1905 					break;
1906 				}
1907 				/*
1908 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1909 				 * process it which is the NOT of
1910 				 * ctl->sinfo_tsn > liste->tsn
1911 				 */
1912 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1913 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1914 				if (*abort_flag) {
1915 					return (0);
1916 				}
1917 			}
1918 		}
1919 		/*
1920 		 * Now service re-assembly to pick up anything that has been
1921 		 * held on reassembly queue?
1922 		 */
1923 		sctp_deliver_reasm_check(stcb, asoc);
1924 		need_reasm_check = 0;
1925 	}
1926 	if (need_reasm_check) {
1927 		/* Another one waits ? */
1928 		sctp_deliver_reasm_check(stcb, asoc);
1929 	}
1930 	return (1);
1931 }
1932 
1933 int8_t sctp_map_lookup_tab[256] = {
1934 	0, 1, 0, 2, 0, 1, 0, 3,
1935 	0, 1, 0, 2, 0, 1, 0, 4,
1936 	0, 1, 0, 2, 0, 1, 0, 3,
1937 	0, 1, 0, 2, 0, 1, 0, 5,
1938 	0, 1, 0, 2, 0, 1, 0, 3,
1939 	0, 1, 0, 2, 0, 1, 0, 4,
1940 	0, 1, 0, 2, 0, 1, 0, 3,
1941 	0, 1, 0, 2, 0, 1, 0, 6,
1942 	0, 1, 0, 2, 0, 1, 0, 3,
1943 	0, 1, 0, 2, 0, 1, 0, 4,
1944 	0, 1, 0, 2, 0, 1, 0, 3,
1945 	0, 1, 0, 2, 0, 1, 0, 5,
1946 	0, 1, 0, 2, 0, 1, 0, 3,
1947 	0, 1, 0, 2, 0, 1, 0, 4,
1948 	0, 1, 0, 2, 0, 1, 0, 3,
1949 	0, 1, 0, 2, 0, 1, 0, 7,
1950 	0, 1, 0, 2, 0, 1, 0, 3,
1951 	0, 1, 0, 2, 0, 1, 0, 4,
1952 	0, 1, 0, 2, 0, 1, 0, 3,
1953 	0, 1, 0, 2, 0, 1, 0, 5,
1954 	0, 1, 0, 2, 0, 1, 0, 3,
1955 	0, 1, 0, 2, 0, 1, 0, 4,
1956 	0, 1, 0, 2, 0, 1, 0, 3,
1957 	0, 1, 0, 2, 0, 1, 0, 6,
1958 	0, 1, 0, 2, 0, 1, 0, 3,
1959 	0, 1, 0, 2, 0, 1, 0, 4,
1960 	0, 1, 0, 2, 0, 1, 0, 3,
1961 	0, 1, 0, 2, 0, 1, 0, 5,
1962 	0, 1, 0, 2, 0, 1, 0, 3,
1963 	0, 1, 0, 2, 0, 1, 0, 4,
1964 	0, 1, 0, 2, 0, 1, 0, 3,
1965 	0, 1, 0, 2, 0, 1, 0, 8
1966 };
1967 
1968 
1969 void
1970 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1971 {
1972 	/*
1973 	 * Now we also need to check the mapping array in a couple of ways.
1974 	 * 1) Did we move the cum-ack point?
1975 	 *
1976 	 * When you first glance at this you might think that all entries that
1977 	 * make up the postion of the cum-ack would be in the nr-mapping
1978 	 * array only.. i.e. things up to the cum-ack are always
1979 	 * deliverable. Thats true with one exception, when its a fragmented
1980 	 * message we may not deliver the data until some threshold (or all
1981 	 * of it) is in place. So we must OR the nr_mapping_array and
1982 	 * mapping_array to get a true picture of the cum-ack.
1983 	 */
1984 	struct sctp_association *asoc;
1985 	int at;
1986 	uint8_t val;
1987 	int slide_from, slide_end, lgap, distance;
1988 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1989 
1990 	asoc = &stcb->asoc;
1991 
1992 	old_cumack = asoc->cumulative_tsn;
1993 	old_base = asoc->mapping_array_base_tsn;
1994 	old_highest = asoc->highest_tsn_inside_map;
1995 	/*
1996 	 * We could probably improve this a small bit by calculating the
1997 	 * offset of the current cum-ack as the starting point.
1998 	 */
1999 	at = 0;
2000 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2001 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2002 		if (val == 0xff) {
2003 			at += 8;
2004 		} else {
2005 			/* there is a 0 bit */
2006 			at += sctp_map_lookup_tab[val];
2007 			break;
2008 		}
2009 	}
2010 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2011 
2012 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2013 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2014 #ifdef INVARIANTS
2015 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2016 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2017 #else
2018 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2019 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2020 		sctp_print_mapping_array(asoc);
2021 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2022 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2023 		}
2024 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2025 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2026 #endif
2027 	}
2028 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2029 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2030 	} else {
2031 		highest_tsn = asoc->highest_tsn_inside_map;
2032 	}
2033 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2034 		/* The complete array was completed by a single FR */
2035 		/* highest becomes the cum-ack */
2036 		int clr;
2037 
2038 #ifdef INVARIANTS
2039 		unsigned int i;
2040 
2041 #endif
2042 
2043 		/* clear the array */
2044 		clr = ((at + 7) >> 3);
2045 		if (clr > asoc->mapping_array_size) {
2046 			clr = asoc->mapping_array_size;
2047 		}
2048 		memset(asoc->mapping_array, 0, clr);
2049 		memset(asoc->nr_mapping_array, 0, clr);
2050 #ifdef INVARIANTS
2051 		for (i = 0; i < asoc->mapping_array_size; i++) {
2052 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2053 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2054 				sctp_print_mapping_array(asoc);
2055 			}
2056 		}
2057 #endif
2058 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2059 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2060 	} else if (at >= 8) {
2061 		/* we can slide the mapping array down */
2062 		/* slide_from holds where we hit the first NON 0xff byte */
2063 
2064 		/*
2065 		 * now calculate the ceiling of the move using our highest
2066 		 * TSN value
2067 		 */
2068 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2069 		slide_end = (lgap >> 3);
2070 		if (slide_end < slide_from) {
2071 			sctp_print_mapping_array(asoc);
2072 #ifdef INVARIANTS
2073 			panic("impossible slide");
2074 #else
2075 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2076 			    lgap, slide_end, slide_from, at);
2077 			return;
2078 #endif
2079 		}
2080 		if (slide_end > asoc->mapping_array_size) {
2081 #ifdef INVARIANTS
2082 			panic("would overrun buffer");
2083 #else
2084 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2085 			    asoc->mapping_array_size, slide_end);
2086 			slide_end = asoc->mapping_array_size;
2087 #endif
2088 		}
2089 		distance = (slide_end - slide_from) + 1;
2090 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2091 			sctp_log_map(old_base, old_cumack, old_highest,
2092 			    SCTP_MAP_PREPARE_SLIDE);
2093 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2094 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2095 		}
2096 		if (distance + slide_from > asoc->mapping_array_size ||
2097 		    distance < 0) {
2098 			/*
2099 			 * Here we do NOT slide forward the array so that
2100 			 * hopefully when more data comes in to fill it up
2101 			 * we will be able to slide it forward. Really I
2102 			 * don't think this should happen :-0
2103 			 */
2104 
2105 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2106 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2107 				    (uint32_t) asoc->mapping_array_size,
2108 				    SCTP_MAP_SLIDE_NONE);
2109 			}
2110 		} else {
2111 			int ii;
2112 
2113 			for (ii = 0; ii < distance; ii++) {
2114 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2115 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2116 
2117 			}
2118 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2119 				asoc->mapping_array[ii] = 0;
2120 				asoc->nr_mapping_array[ii] = 0;
2121 			}
2122 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2123 				asoc->highest_tsn_inside_map += (slide_from << 3);
2124 			}
2125 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2126 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2127 			}
2128 			asoc->mapping_array_base_tsn += (slide_from << 3);
2129 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2130 				sctp_log_map(asoc->mapping_array_base_tsn,
2131 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2132 				    SCTP_MAP_SLIDE_RESULT);
2133 			}
2134 		}
2135 	}
2136 }
2137 
2138 void
2139 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2140 {
2141 	struct sctp_association *asoc;
2142 	uint32_t highest_tsn;
2143 
2144 	asoc = &stcb->asoc;
2145 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2146 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2147 	} else {
2148 		highest_tsn = asoc->highest_tsn_inside_map;
2149 	}
2150 
2151 	/*
2152 	 * Now we need to see if we need to queue a sack or just start the
2153 	 * timer (if allowed).
2154 	 */
2155 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2156 		/*
2157 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2158 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2159 		 * SACK
2160 		 */
2161 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2162 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2163 			    stcb->sctp_ep, stcb, NULL,
2164 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2165 		}
2166 		sctp_send_shutdown(stcb,
2167 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2168 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2169 	} else {
2170 		int is_a_gap;
2171 
2172 		/* is there a gap now ? */
2173 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2174 
2175 		/*
2176 		 * CMT DAC algorithm: increase number of packets received
2177 		 * since last ack
2178 		 */
2179 		stcb->asoc.cmt_dac_pkts_rcvd++;
2180 
2181 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2182 							 * SACK */
2183 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2184 							 * longer is one */
2185 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2186 		    (is_a_gap) ||	/* is still a gap */
2187 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2188 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2189 		    ) {
2190 
2191 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2192 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2193 			    (stcb->asoc.send_sack == 0) &&
2194 			    (stcb->asoc.numduptsns == 0) &&
2195 			    (stcb->asoc.delayed_ack) &&
2196 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2197 
2198 				/*
2199 				 * CMT DAC algorithm: With CMT, delay acks
2200 				 * even in the face of
2201 				 *
2202 				 * reordering. Therefore, if acks that do not
2203 				 * have to be sent because of the above
2204 				 * reasons, will be delayed. That is, acks
2205 				 * that would have been sent due to gap
2206 				 * reports will be delayed with DAC. Start
2207 				 * the delayed ack timer.
2208 				 */
2209 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2210 				    stcb->sctp_ep, stcb, NULL);
2211 			} else {
2212 				/*
2213 				 * Ok we must build a SACK since the timer
2214 				 * is pending, we got our first packet OR
2215 				 * there are gaps or duplicates.
2216 				 */
2217 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2218 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2219 			}
2220 		} else {
2221 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2222 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2223 				    stcb->sctp_ep, stcb, NULL);
2224 			}
2225 		}
2226 	}
2227 }
2228 
2229 void
2230 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2231 {
2232 	struct sctp_tmit_chunk *chk;
2233 	uint32_t tsize, pd_point;
2234 	uint16_t nxt_todel;
2235 
2236 	if (asoc->fragmented_delivery_inprogress) {
2237 		sctp_service_reassembly(stcb, asoc);
2238 	}
2239 	/* Can we proceed further, i.e. the PD-API is complete */
2240 	if (asoc->fragmented_delivery_inprogress) {
2241 		/* no */
2242 		return;
2243 	}
2244 	/*
2245 	 * Now is there some other chunk I can deliver from the reassembly
2246 	 * queue.
2247 	 */
2248 doit_again:
2249 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2250 	if (chk == NULL) {
2251 		asoc->size_on_reasm_queue = 0;
2252 		asoc->cnt_on_reasm_queue = 0;
2253 		return;
2254 	}
2255 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2256 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2257 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2258 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2259 		/*
2260 		 * Yep the first one is here. We setup to start reception,
2261 		 * by backing down the TSN just in case we can't deliver.
2262 		 */
2263 
2264 		/*
2265 		 * Before we start though either all of the message should
2266 		 * be here or the socket buffer max or nothing on the
2267 		 * delivery queue and something can be delivered.
2268 		 */
2269 		if (stcb->sctp_socket) {
2270 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2271 			    stcb->sctp_ep->partial_delivery_point);
2272 		} else {
2273 			pd_point = stcb->sctp_ep->partial_delivery_point;
2274 		}
2275 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2276 			asoc->fragmented_delivery_inprogress = 1;
2277 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2278 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2279 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2280 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2281 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2282 			sctp_service_reassembly(stcb, asoc);
2283 			if (asoc->fragmented_delivery_inprogress == 0) {
2284 				goto doit_again;
2285 			}
2286 		}
2287 	}
2288 }
2289 
2290 int
2291 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2292     struct sockaddr *src, struct sockaddr *dst,
2293     struct sctphdr *sh, struct sctp_inpcb *inp,
2294     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2295     uint8_t mflowtype, uint32_t mflowid,
2296     uint32_t vrf_id, uint16_t port)
2297 {
2298 	struct sctp_data_chunk *ch, chunk_buf;
2299 	struct sctp_association *asoc;
2300 	int num_chunks = 0;	/* number of control chunks processed */
2301 	int stop_proc = 0;
2302 	int chk_length, break_flag, last_chunk;
2303 	int abort_flag = 0, was_a_gap;
2304 	struct mbuf *m;
2305 	uint32_t highest_tsn;
2306 
2307 	/* set the rwnd */
2308 	sctp_set_rwnd(stcb, &stcb->asoc);
2309 
2310 	m = *mm;
2311 	SCTP_TCB_LOCK_ASSERT(stcb);
2312 	asoc = &stcb->asoc;
2313 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2314 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2315 	} else {
2316 		highest_tsn = asoc->highest_tsn_inside_map;
2317 	}
2318 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2319 	/*
2320 	 * setup where we got the last DATA packet from for any SACK that
2321 	 * may need to go out. Don't bump the net. This is done ONLY when a
2322 	 * chunk is assigned.
2323 	 */
2324 	asoc->last_data_chunk_from = net;
2325 
2326 	/*-
2327 	 * Now before we proceed we must figure out if this is a wasted
2328 	 * cluster... i.e. it is a small packet sent in and yet the driver
2329 	 * underneath allocated a full cluster for it. If so we must copy it
2330 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2331 	 * with cluster starvation. Note for __Panda__ we don't do this
2332 	 * since it has clusters all the way down to 64 bytes.
2333 	 */
2334 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2335 		/* we only handle mbufs that are singletons.. not chains */
2336 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2337 		if (m) {
2338 			/* ok lets see if we can copy the data up */
2339 			caddr_t *from, *to;
2340 
2341 			/* get the pointers and copy */
2342 			to = mtod(m, caddr_t *);
2343 			from = mtod((*mm), caddr_t *);
2344 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2345 			/* copy the length and free up the old */
2346 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2347 			sctp_m_freem(*mm);
2348 			/* sucess, back copy */
2349 			*mm = m;
2350 		} else {
2351 			/* We are in trouble in the mbuf world .. yikes */
2352 			m = *mm;
2353 		}
2354 	}
2355 	/* get pointer to the first chunk header */
2356 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2357 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2358 	if (ch == NULL) {
2359 		return (1);
2360 	}
2361 	/*
2362 	 * process all DATA chunks...
2363 	 */
2364 	*high_tsn = asoc->cumulative_tsn;
2365 	break_flag = 0;
2366 	asoc->data_pkts_seen++;
2367 	while (stop_proc == 0) {
2368 		/* validate chunk length */
2369 		chk_length = ntohs(ch->ch.chunk_length);
2370 		if (length - *offset < chk_length) {
2371 			/* all done, mutulated chunk */
2372 			stop_proc = 1;
2373 			continue;
2374 		}
2375 		if (ch->ch.chunk_type == SCTP_DATA) {
2376 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2377 				/*
2378 				 * Need to send an abort since we had a
2379 				 * invalid data chunk.
2380 				 */
2381 				struct mbuf *op_err;
2382 				char msg[SCTP_DIAG_INFO_LEN];
2383 
2384 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2385 				    chk_length);
2386 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2387 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2388 				sctp_abort_association(inp, stcb, m, iphlen,
2389 				    src, dst, sh, op_err,
2390 				    mflowtype, mflowid,
2391 				    vrf_id, port);
2392 				return (2);
2393 			}
2394 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2395 				/*
2396 				 * Need to send an abort since we had an
2397 				 * empty data chunk.
2398 				 */
2399 				struct mbuf *op_err;
2400 
2401 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2402 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2403 				sctp_abort_association(inp, stcb, m, iphlen,
2404 				    src, dst, sh, op_err,
2405 				    mflowtype, mflowid,
2406 				    vrf_id, port);
2407 				return (2);
2408 			}
2409 #ifdef SCTP_AUDITING_ENABLED
2410 			sctp_audit_log(0xB1, 0);
2411 #endif
2412 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2413 				last_chunk = 1;
2414 			} else {
2415 				last_chunk = 0;
2416 			}
2417 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2418 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2419 			    last_chunk)) {
2420 				num_chunks++;
2421 			}
2422 			if (abort_flag)
2423 				return (2);
2424 
2425 			if (break_flag) {
2426 				/*
2427 				 * Set because of out of rwnd space and no
2428 				 * drop rep space left.
2429 				 */
2430 				stop_proc = 1;
2431 				continue;
2432 			}
2433 		} else {
2434 			/* not a data chunk in the data region */
2435 			switch (ch->ch.chunk_type) {
2436 			case SCTP_INITIATION:
2437 			case SCTP_INITIATION_ACK:
2438 			case SCTP_SELECTIVE_ACK:
2439 			case SCTP_NR_SELECTIVE_ACK:
2440 			case SCTP_HEARTBEAT_REQUEST:
2441 			case SCTP_HEARTBEAT_ACK:
2442 			case SCTP_ABORT_ASSOCIATION:
2443 			case SCTP_SHUTDOWN:
2444 			case SCTP_SHUTDOWN_ACK:
2445 			case SCTP_OPERATION_ERROR:
2446 			case SCTP_COOKIE_ECHO:
2447 			case SCTP_COOKIE_ACK:
2448 			case SCTP_ECN_ECHO:
2449 			case SCTP_ECN_CWR:
2450 			case SCTP_SHUTDOWN_COMPLETE:
2451 			case SCTP_AUTHENTICATION:
2452 			case SCTP_ASCONF_ACK:
2453 			case SCTP_PACKET_DROPPED:
2454 			case SCTP_STREAM_RESET:
2455 			case SCTP_FORWARD_CUM_TSN:
2456 			case SCTP_ASCONF:
2457 				/*
2458 				 * Now, what do we do with KNOWN chunks that
2459 				 * are NOT in the right place?
2460 				 *
2461 				 * For now, I do nothing but ignore them. We
2462 				 * may later want to add sysctl stuff to
2463 				 * switch out and do either an ABORT() or
2464 				 * possibly process them.
2465 				 */
2466 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2467 					struct mbuf *op_err;
2468 
2469 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2470 					sctp_abort_association(inp, stcb,
2471 					    m, iphlen,
2472 					    src, dst,
2473 					    sh, op_err,
2474 					    mflowtype, mflowid,
2475 					    vrf_id, port);
2476 					return (2);
2477 				}
2478 				break;
2479 			default:
2480 				/* unknown chunk type, use bit rules */
2481 				if (ch->ch.chunk_type & 0x40) {
2482 					/* Add a error report to the queue */
2483 					struct mbuf *merr;
2484 					struct sctp_paramhdr *phd;
2485 
2486 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2487 					if (merr) {
2488 						phd = mtod(merr, struct sctp_paramhdr *);
2489 						/*
2490 						 * We cheat and use param
2491 						 * type since we did not
2492 						 * bother to define a error
2493 						 * cause struct. They are
2494 						 * the same basic format
2495 						 * with different names.
2496 						 */
2497 						phd->param_type =
2498 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2499 						phd->param_length =
2500 						    htons(chk_length + sizeof(*phd));
2501 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2502 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2503 						if (SCTP_BUF_NEXT(merr)) {
2504 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2505 								sctp_m_freem(merr);
2506 							} else {
2507 								sctp_queue_op_err(stcb, merr);
2508 							}
2509 						} else {
2510 							sctp_m_freem(merr);
2511 						}
2512 					}
2513 				}
2514 				if ((ch->ch.chunk_type & 0x80) == 0) {
2515 					/* discard the rest of this packet */
2516 					stop_proc = 1;
2517 				}	/* else skip this bad chunk and
2518 					 * continue... */
2519 				break;
2520 			}	/* switch of chunk type */
2521 		}
2522 		*offset += SCTP_SIZE32(chk_length);
2523 		if ((*offset >= length) || stop_proc) {
2524 			/* no more data left in the mbuf chain */
2525 			stop_proc = 1;
2526 			continue;
2527 		}
2528 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2529 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2530 		if (ch == NULL) {
2531 			*offset = length;
2532 			stop_proc = 1;
2533 			continue;
2534 		}
2535 	}
2536 	if (break_flag) {
2537 		/*
2538 		 * we need to report rwnd overrun drops.
2539 		 */
2540 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2541 	}
2542 	if (num_chunks) {
2543 		/*
2544 		 * Did we get data, if so update the time for auto-close and
2545 		 * give peer credit for being alive.
2546 		 */
2547 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2548 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2549 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2550 			    stcb->asoc.overall_error_count,
2551 			    0,
2552 			    SCTP_FROM_SCTP_INDATA,
2553 			    __LINE__);
2554 		}
2555 		stcb->asoc.overall_error_count = 0;
2556 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2557 	}
2558 	/* now service all of the reassm queue if needed */
2559 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2560 		sctp_service_queues(stcb, asoc);
2561 
2562 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2563 		/* Assure that we ack right away */
2564 		stcb->asoc.send_sack = 1;
2565 	}
2566 	/* Start a sack timer or QUEUE a SACK for sending */
2567 	sctp_sack_check(stcb, was_a_gap);
2568 	return (0);
2569 }
2570 
2571 static int
2572 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2573     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2574     int *num_frs,
2575     uint32_t * biggest_newly_acked_tsn,
2576     uint32_t * this_sack_lowest_newack,
2577     int *rto_ok)
2578 {
2579 	struct sctp_tmit_chunk *tp1;
2580 	unsigned int theTSN;
2581 	int j, wake_him = 0, circled = 0;
2582 
2583 	/* Recover the tp1 we last saw */
2584 	tp1 = *p_tp1;
2585 	if (tp1 == NULL) {
2586 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2587 	}
2588 	for (j = frag_strt; j <= frag_end; j++) {
2589 		theTSN = j + last_tsn;
2590 		while (tp1) {
2591 			if (tp1->rec.data.doing_fast_retransmit)
2592 				(*num_frs) += 1;
2593 
2594 			/*-
2595 			 * CMT: CUCv2 algorithm. For each TSN being
2596 			 * processed from the sent queue, track the
2597 			 * next expected pseudo-cumack, or
2598 			 * rtx_pseudo_cumack, if required. Separate
2599 			 * cumack trackers for first transmissions,
2600 			 * and retransmissions.
2601 			 */
2602 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2603 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2604 			    (tp1->snd_count == 1)) {
2605 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2606 				tp1->whoTo->find_pseudo_cumack = 0;
2607 			}
2608 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2609 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2610 			    (tp1->snd_count > 1)) {
2611 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2612 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2613 			}
2614 			if (tp1->rec.data.TSN_seq == theTSN) {
2615 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2616 					/*-
2617 					 * must be held until
2618 					 * cum-ack passes
2619 					 */
2620 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2621 						/*-
2622 						 * If it is less than RESEND, it is
2623 						 * now no-longer in flight.
2624 						 * Higher values may already be set
2625 						 * via previous Gap Ack Blocks...
2626 						 * i.e. ACKED or RESEND.
2627 						 */
2628 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2629 						    *biggest_newly_acked_tsn)) {
2630 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2631 						}
2632 						/*-
2633 						 * CMT: SFR algo (and HTNA) - set
2634 						 * saw_newack to 1 for dest being
2635 						 * newly acked. update
2636 						 * this_sack_highest_newack if
2637 						 * appropriate.
2638 						 */
2639 						if (tp1->rec.data.chunk_was_revoked == 0)
2640 							tp1->whoTo->saw_newack = 1;
2641 
2642 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2643 						    tp1->whoTo->this_sack_highest_newack)) {
2644 							tp1->whoTo->this_sack_highest_newack =
2645 							    tp1->rec.data.TSN_seq;
2646 						}
2647 						/*-
2648 						 * CMT DAC algo: also update
2649 						 * this_sack_lowest_newack
2650 						 */
2651 						if (*this_sack_lowest_newack == 0) {
2652 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2653 								sctp_log_sack(*this_sack_lowest_newack,
2654 								    last_tsn,
2655 								    tp1->rec.data.TSN_seq,
2656 								    0,
2657 								    0,
2658 								    SCTP_LOG_TSN_ACKED);
2659 							}
2660 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2661 						}
2662 						/*-
2663 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2664 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2665 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2666 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2667 						 * Separate pseudo_cumack trackers for first transmissions and
2668 						 * retransmissions.
2669 						 */
2670 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2671 							if (tp1->rec.data.chunk_was_revoked == 0) {
2672 								tp1->whoTo->new_pseudo_cumack = 1;
2673 							}
2674 							tp1->whoTo->find_pseudo_cumack = 1;
2675 						}
2676 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2677 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2678 						}
2679 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2680 							if (tp1->rec.data.chunk_was_revoked == 0) {
2681 								tp1->whoTo->new_pseudo_cumack = 1;
2682 							}
2683 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2684 						}
2685 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2686 							sctp_log_sack(*biggest_newly_acked_tsn,
2687 							    last_tsn,
2688 							    tp1->rec.data.TSN_seq,
2689 							    frag_strt,
2690 							    frag_end,
2691 							    SCTP_LOG_TSN_ACKED);
2692 						}
2693 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2694 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2695 							    tp1->whoTo->flight_size,
2696 							    tp1->book_size,
2697 							    (uintptr_t) tp1->whoTo,
2698 							    tp1->rec.data.TSN_seq);
2699 						}
2700 						sctp_flight_size_decrease(tp1);
2701 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2702 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2703 							    tp1);
2704 						}
2705 						sctp_total_flight_decrease(stcb, tp1);
2706 
2707 						tp1->whoTo->net_ack += tp1->send_size;
2708 						if (tp1->snd_count < 2) {
2709 							/*-
2710 							 * True non-retransmited chunk
2711 							 */
2712 							tp1->whoTo->net_ack2 += tp1->send_size;
2713 
2714 							/*-
2715 							 * update RTO too ?
2716 							 */
2717 							if (tp1->do_rtt) {
2718 								if (*rto_ok) {
2719 									tp1->whoTo->RTO =
2720 									    sctp_calculate_rto(stcb,
2721 									    &stcb->asoc,
2722 									    tp1->whoTo,
2723 									    &tp1->sent_rcv_time,
2724 									    sctp_align_safe_nocopy,
2725 									    SCTP_RTT_FROM_DATA);
2726 									*rto_ok = 0;
2727 								}
2728 								if (tp1->whoTo->rto_needed == 0) {
2729 									tp1->whoTo->rto_needed = 1;
2730 								}
2731 								tp1->do_rtt = 0;
2732 							}
2733 						}
2734 					}
2735 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2736 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2737 						    stcb->asoc.this_sack_highest_gap)) {
2738 							stcb->asoc.this_sack_highest_gap =
2739 							    tp1->rec.data.TSN_seq;
2740 						}
2741 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2742 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2743 #ifdef SCTP_AUDITING_ENABLED
2744 							sctp_audit_log(0xB2,
2745 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2746 #endif
2747 						}
2748 					}
2749 					/*-
2750 					 * All chunks NOT UNSENT fall through here and are marked
2751 					 * (leave PR-SCTP ones that are to skip alone though)
2752 					 */
2753 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2754 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2755 						tp1->sent = SCTP_DATAGRAM_MARKED;
2756 					}
2757 					if (tp1->rec.data.chunk_was_revoked) {
2758 						/* deflate the cwnd */
2759 						tp1->whoTo->cwnd -= tp1->book_size;
2760 						tp1->rec.data.chunk_was_revoked = 0;
2761 					}
2762 					/* NR Sack code here */
2763 					if (nr_sacking &&
2764 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2765 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2766 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2767 #ifdef INVARIANTS
2768 						} else {
2769 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2770 #endif
2771 						}
2772 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2773 						if (tp1->data) {
2774 							/*
2775 							 * sa_ignore
2776 							 * NO_NULL_CHK
2777 							 */
2778 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2779 							sctp_m_freem(tp1->data);
2780 							tp1->data = NULL;
2781 						}
2782 						wake_him++;
2783 					}
2784 				}
2785 				break;
2786 			}	/* if (tp1->TSN_seq == theTSN) */
2787 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2788 				break;
2789 			}
2790 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2791 			if ((tp1 == NULL) && (circled == 0)) {
2792 				circled++;
2793 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2794 			}
2795 		}		/* end while (tp1) */
2796 		if (tp1 == NULL) {
2797 			circled = 0;
2798 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2799 		}
2800 		/* In case the fragments were not in order we must reset */
2801 	}			/* end for (j = fragStart */
2802 	*p_tp1 = tp1;
2803 	return (wake_him);	/* Return value only used for nr-sack */
2804 }
2805 
2806 
2807 static int
2808 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2809     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2810     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2811     int num_seg, int num_nr_seg, int *rto_ok)
2812 {
2813 	struct sctp_gap_ack_block *frag, block;
2814 	struct sctp_tmit_chunk *tp1;
2815 	int i;
2816 	int num_frs = 0;
2817 	int chunk_freed;
2818 	int non_revocable;
2819 	uint16_t frag_strt, frag_end, prev_frag_end;
2820 
2821 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2822 	prev_frag_end = 0;
2823 	chunk_freed = 0;
2824 
2825 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2826 		if (i == num_seg) {
2827 			prev_frag_end = 0;
2828 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2829 		}
2830 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2831 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2832 		*offset += sizeof(block);
2833 		if (frag == NULL) {
2834 			return (chunk_freed);
2835 		}
2836 		frag_strt = ntohs(frag->start);
2837 		frag_end = ntohs(frag->end);
2838 
2839 		if (frag_strt > frag_end) {
2840 			/* This gap report is malformed, skip it. */
2841 			continue;
2842 		}
2843 		if (frag_strt <= prev_frag_end) {
2844 			/* This gap report is not in order, so restart. */
2845 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2846 		}
2847 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2848 			*biggest_tsn_acked = last_tsn + frag_end;
2849 		}
2850 		if (i < num_seg) {
2851 			non_revocable = 0;
2852 		} else {
2853 			non_revocable = 1;
2854 		}
2855 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2856 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2857 		    this_sack_lowest_newack, rto_ok)) {
2858 			chunk_freed = 1;
2859 		}
2860 		prev_frag_end = frag_end;
2861 	}
2862 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2863 		if (num_frs)
2864 			sctp_log_fr(*biggest_tsn_acked,
2865 			    *biggest_newly_acked_tsn,
2866 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2867 	}
2868 	return (chunk_freed);
2869 }
2870 
2871 static void
2872 sctp_check_for_revoked(struct sctp_tcb *stcb,
2873     struct sctp_association *asoc, uint32_t cumack,
2874     uint32_t biggest_tsn_acked)
2875 {
2876 	struct sctp_tmit_chunk *tp1;
2877 
2878 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2879 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2880 			/*
2881 			 * ok this guy is either ACK or MARKED. If it is
2882 			 * ACKED it has been previously acked but not this
2883 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2884 			 * again.
2885 			 */
2886 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2887 				break;
2888 			}
2889 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2890 				/* it has been revoked */
2891 				tp1->sent = SCTP_DATAGRAM_SENT;
2892 				tp1->rec.data.chunk_was_revoked = 1;
2893 				/*
2894 				 * We must add this stuff back in to assure
2895 				 * timers and such get started.
2896 				 */
2897 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2898 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2899 					    tp1->whoTo->flight_size,
2900 					    tp1->book_size,
2901 					    (uintptr_t) tp1->whoTo,
2902 					    tp1->rec.data.TSN_seq);
2903 				}
2904 				sctp_flight_size_increase(tp1);
2905 				sctp_total_flight_increase(stcb, tp1);
2906 				/*
2907 				 * We inflate the cwnd to compensate for our
2908 				 * artificial inflation of the flight_size.
2909 				 */
2910 				tp1->whoTo->cwnd += tp1->book_size;
2911 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2912 					sctp_log_sack(asoc->last_acked_seq,
2913 					    cumack,
2914 					    tp1->rec.data.TSN_seq,
2915 					    0,
2916 					    0,
2917 					    SCTP_LOG_TSN_REVOKED);
2918 				}
2919 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2920 				/* it has been re-acked in this SACK */
2921 				tp1->sent = SCTP_DATAGRAM_ACKED;
2922 			}
2923 		}
2924 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2925 			break;
2926 	}
2927 }
2928 
2929 
2930 static void
2931 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2932     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2933 {
2934 	struct sctp_tmit_chunk *tp1;
2935 	int strike_flag = 0;
2936 	struct timeval now;
2937 	int tot_retrans = 0;
2938 	uint32_t sending_seq;
2939 	struct sctp_nets *net;
2940 	int num_dests_sacked = 0;
2941 
2942 	/*
2943 	 * select the sending_seq, this is either the next thing ready to be
2944 	 * sent but not transmitted, OR, the next seq we assign.
2945 	 */
2946 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2947 	if (tp1 == NULL) {
2948 		sending_seq = asoc->sending_seq;
2949 	} else {
2950 		sending_seq = tp1->rec.data.TSN_seq;
2951 	}
2952 
2953 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2954 	if ((asoc->sctp_cmt_on_off > 0) &&
2955 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2956 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2957 			if (net->saw_newack)
2958 				num_dests_sacked++;
2959 		}
2960 	}
2961 	if (stcb->asoc.prsctp_supported) {
2962 		(void)SCTP_GETTIME_TIMEVAL(&now);
2963 	}
2964 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2965 		strike_flag = 0;
2966 		if (tp1->no_fr_allowed) {
2967 			/* this one had a timeout or something */
2968 			continue;
2969 		}
2970 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2971 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2972 				sctp_log_fr(biggest_tsn_newly_acked,
2973 				    tp1->rec.data.TSN_seq,
2974 				    tp1->sent,
2975 				    SCTP_FR_LOG_CHECK_STRIKE);
2976 		}
2977 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2978 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2979 			/* done */
2980 			break;
2981 		}
2982 		if (stcb->asoc.prsctp_supported) {
2983 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2984 				/* Is it expired? */
2985 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2986 					/* Yes so drop it */
2987 					if (tp1->data != NULL) {
2988 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2989 						    SCTP_SO_NOT_LOCKED);
2990 					}
2991 					continue;
2992 				}
2993 			}
2994 		}
2995 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2996 			/* we are beyond the tsn in the sack  */
2997 			break;
2998 		}
2999 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3000 			/* either a RESEND, ACKED, or MARKED */
3001 			/* skip */
3002 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3003 				/* Continue strikin FWD-TSN chunks */
3004 				tp1->rec.data.fwd_tsn_cnt++;
3005 			}
3006 			continue;
3007 		}
3008 		/*
3009 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3010 		 */
3011 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3012 			/*
3013 			 * No new acks were receieved for data sent to this
3014 			 * dest. Therefore, according to the SFR algo for
3015 			 * CMT, no data sent to this dest can be marked for
3016 			 * FR using this SACK.
3017 			 */
3018 			continue;
3019 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3020 		    tp1->whoTo->this_sack_highest_newack)) {
3021 			/*
3022 			 * CMT: New acks were receieved for data sent to
3023 			 * this dest. But no new acks were seen for data
3024 			 * sent after tp1. Therefore, according to the SFR
3025 			 * algo for CMT, tp1 cannot be marked for FR using
3026 			 * this SACK. This step covers part of the DAC algo
3027 			 * and the HTNA algo as well.
3028 			 */
3029 			continue;
3030 		}
3031 		/*
3032 		 * Here we check to see if we were have already done a FR
3033 		 * and if so we see if the biggest TSN we saw in the sack is
3034 		 * smaller than the recovery point. If so we don't strike
3035 		 * the tsn... otherwise we CAN strike the TSN.
3036 		 */
3037 		/*
3038 		 * @@@ JRI: Check for CMT if (accum_moved &&
3039 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3040 		 * 0)) {
3041 		 */
3042 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3043 			/*
3044 			 * Strike the TSN if in fast-recovery and cum-ack
3045 			 * moved.
3046 			 */
3047 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3048 				sctp_log_fr(biggest_tsn_newly_acked,
3049 				    tp1->rec.data.TSN_seq,
3050 				    tp1->sent,
3051 				    SCTP_FR_LOG_STRIKE_CHUNK);
3052 			}
3053 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3054 				tp1->sent++;
3055 			}
3056 			if ((asoc->sctp_cmt_on_off > 0) &&
3057 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3058 				/*
3059 				 * CMT DAC algorithm: If SACK flag is set to
3060 				 * 0, then lowest_newack test will not pass
3061 				 * because it would have been set to the
3062 				 * cumack earlier. If not already to be
3063 				 * rtx'd, If not a mixed sack and if tp1 is
3064 				 * not between two sacked TSNs, then mark by
3065 				 * one more. NOTE that we are marking by one
3066 				 * additional time since the SACK DAC flag
3067 				 * indicates that two packets have been
3068 				 * received after this missing TSN.
3069 				 */
3070 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3071 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3072 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3073 						sctp_log_fr(16 + num_dests_sacked,
3074 						    tp1->rec.data.TSN_seq,
3075 						    tp1->sent,
3076 						    SCTP_FR_LOG_STRIKE_CHUNK);
3077 					}
3078 					tp1->sent++;
3079 				}
3080 			}
3081 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3082 		    (asoc->sctp_cmt_on_off == 0)) {
3083 			/*
3084 			 * For those that have done a FR we must take
3085 			 * special consideration if we strike. I.e the
3086 			 * biggest_newly_acked must be higher than the
3087 			 * sending_seq at the time we did the FR.
3088 			 */
3089 			if (
3090 #ifdef SCTP_FR_TO_ALTERNATE
3091 			/*
3092 			 * If FR's go to new networks, then we must only do
3093 			 * this for singly homed asoc's. However if the FR's
3094 			 * go to the same network (Armando's work) then its
3095 			 * ok to FR multiple times.
3096 			 */
3097 			    (asoc->numnets < 2)
3098 #else
3099 			    (1)
3100 #endif
3101 			    ) {
3102 
3103 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3104 				    tp1->rec.data.fast_retran_tsn)) {
3105 					/*
3106 					 * Strike the TSN, since this ack is
3107 					 * beyond where things were when we
3108 					 * did a FR.
3109 					 */
3110 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3111 						sctp_log_fr(biggest_tsn_newly_acked,
3112 						    tp1->rec.data.TSN_seq,
3113 						    tp1->sent,
3114 						    SCTP_FR_LOG_STRIKE_CHUNK);
3115 					}
3116 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3117 						tp1->sent++;
3118 					}
3119 					strike_flag = 1;
3120 					if ((asoc->sctp_cmt_on_off > 0) &&
3121 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3122 						/*
3123 						 * CMT DAC algorithm: If
3124 						 * SACK flag is set to 0,
3125 						 * then lowest_newack test
3126 						 * will not pass because it
3127 						 * would have been set to
3128 						 * the cumack earlier. If
3129 						 * not already to be rtx'd,
3130 						 * If not a mixed sack and
3131 						 * if tp1 is not between two
3132 						 * sacked TSNs, then mark by
3133 						 * one more. NOTE that we
3134 						 * are marking by one
3135 						 * additional time since the
3136 						 * SACK DAC flag indicates
3137 						 * that two packets have
3138 						 * been received after this
3139 						 * missing TSN.
3140 						 */
3141 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3142 						    (num_dests_sacked == 1) &&
3143 						    SCTP_TSN_GT(this_sack_lowest_newack,
3144 						    tp1->rec.data.TSN_seq)) {
3145 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3146 								sctp_log_fr(32 + num_dests_sacked,
3147 								    tp1->rec.data.TSN_seq,
3148 								    tp1->sent,
3149 								    SCTP_FR_LOG_STRIKE_CHUNK);
3150 							}
3151 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3152 								tp1->sent++;
3153 							}
3154 						}
3155 					}
3156 				}
3157 			}
3158 			/*
3159 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3160 			 * algo covers HTNA.
3161 			 */
3162 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3163 		    biggest_tsn_newly_acked)) {
3164 			/*
3165 			 * We don't strike these: This is the  HTNA
3166 			 * algorithm i.e. we don't strike If our TSN is
3167 			 * larger than the Highest TSN Newly Acked.
3168 			 */
3169 			;
3170 		} else {
3171 			/* Strike the TSN */
3172 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3173 				sctp_log_fr(biggest_tsn_newly_acked,
3174 				    tp1->rec.data.TSN_seq,
3175 				    tp1->sent,
3176 				    SCTP_FR_LOG_STRIKE_CHUNK);
3177 			}
3178 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3179 				tp1->sent++;
3180 			}
3181 			if ((asoc->sctp_cmt_on_off > 0) &&
3182 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3183 				/*
3184 				 * CMT DAC algorithm: If SACK flag is set to
3185 				 * 0, then lowest_newack test will not pass
3186 				 * because it would have been set to the
3187 				 * cumack earlier. If not already to be
3188 				 * rtx'd, If not a mixed sack and if tp1 is
3189 				 * not between two sacked TSNs, then mark by
3190 				 * one more. NOTE that we are marking by one
3191 				 * additional time since the SACK DAC flag
3192 				 * indicates that two packets have been
3193 				 * received after this missing TSN.
3194 				 */
3195 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3196 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3197 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3198 						sctp_log_fr(48 + num_dests_sacked,
3199 						    tp1->rec.data.TSN_seq,
3200 						    tp1->sent,
3201 						    SCTP_FR_LOG_STRIKE_CHUNK);
3202 					}
3203 					tp1->sent++;
3204 				}
3205 			}
3206 		}
3207 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3208 			struct sctp_nets *alt;
3209 
3210 			/* fix counts and things */
3211 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3212 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3213 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3214 				    tp1->book_size,
3215 				    (uintptr_t) tp1->whoTo,
3216 				    tp1->rec.data.TSN_seq);
3217 			}
3218 			if (tp1->whoTo) {
3219 				tp1->whoTo->net_ack++;
3220 				sctp_flight_size_decrease(tp1);
3221 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3222 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3223 					    tp1);
3224 				}
3225 			}
3226 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3227 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3228 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3229 			}
3230 			/* add back to the rwnd */
3231 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3232 
3233 			/* remove from the total flight */
3234 			sctp_total_flight_decrease(stcb, tp1);
3235 
3236 			if ((stcb->asoc.prsctp_supported) &&
3237 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3238 				/*
3239 				 * Has it been retransmitted tv_sec times? -
3240 				 * we store the retran count there.
3241 				 */
3242 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3243 					/* Yes, so drop it */
3244 					if (tp1->data != NULL) {
3245 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3246 						    SCTP_SO_NOT_LOCKED);
3247 					}
3248 					/* Make sure to flag we had a FR */
3249 					tp1->whoTo->net_ack++;
3250 					continue;
3251 				}
3252 			}
3253 			/*
3254 			 * SCTP_PRINTF("OK, we are now ready to FR this
3255 			 * guy\n");
3256 			 */
3257 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3258 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3259 				    0, SCTP_FR_MARKED);
3260 			}
3261 			if (strike_flag) {
3262 				/* This is a subsequent FR */
3263 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3264 			}
3265 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3266 			if (asoc->sctp_cmt_on_off > 0) {
3267 				/*
3268 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3269 				 * If CMT is being used, then pick dest with
3270 				 * largest ssthresh for any retransmission.
3271 				 */
3272 				tp1->no_fr_allowed = 1;
3273 				alt = tp1->whoTo;
3274 				/* sa_ignore NO_NULL_CHK */
3275 				if (asoc->sctp_cmt_pf > 0) {
3276 					/*
3277 					 * JRS 5/18/07 - If CMT PF is on,
3278 					 * use the PF version of
3279 					 * find_alt_net()
3280 					 */
3281 					alt = sctp_find_alternate_net(stcb, alt, 2);
3282 				} else {
3283 					/*
3284 					 * JRS 5/18/07 - If only CMT is on,
3285 					 * use the CMT version of
3286 					 * find_alt_net()
3287 					 */
3288 					/* sa_ignore NO_NULL_CHK */
3289 					alt = sctp_find_alternate_net(stcb, alt, 1);
3290 				}
3291 				if (alt == NULL) {
3292 					alt = tp1->whoTo;
3293 				}
3294 				/*
3295 				 * CUCv2: If a different dest is picked for
3296 				 * the retransmission, then new
3297 				 * (rtx-)pseudo_cumack needs to be tracked
3298 				 * for orig dest. Let CUCv2 track new (rtx-)
3299 				 * pseudo-cumack always.
3300 				 */
3301 				if (tp1->whoTo) {
3302 					tp1->whoTo->find_pseudo_cumack = 1;
3303 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3304 				}
3305 			} else {/* CMT is OFF */
3306 
3307 #ifdef SCTP_FR_TO_ALTERNATE
3308 				/* Can we find an alternate? */
3309 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3310 #else
3311 				/*
3312 				 * default behavior is to NOT retransmit
3313 				 * FR's to an alternate. Armando Caro's
3314 				 * paper details why.
3315 				 */
3316 				alt = tp1->whoTo;
3317 #endif
3318 			}
3319 
3320 			tp1->rec.data.doing_fast_retransmit = 1;
3321 			tot_retrans++;
3322 			/* mark the sending seq for possible subsequent FR's */
3323 			/*
3324 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3325 			 * (uint32_t)tpi->rec.data.TSN_seq);
3326 			 */
3327 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3328 				/*
3329 				 * If the queue of send is empty then its
3330 				 * the next sequence number that will be
3331 				 * assigned so we subtract one from this to
3332 				 * get the one we last sent.
3333 				 */
3334 				tp1->rec.data.fast_retran_tsn = sending_seq;
3335 			} else {
3336 				/*
3337 				 * If there are chunks on the send queue
3338 				 * (unsent data that has made it from the
3339 				 * stream queues but not out the door, we
3340 				 * take the first one (which will have the
3341 				 * lowest TSN) and subtract one to get the
3342 				 * one we last sent.
3343 				 */
3344 				struct sctp_tmit_chunk *ttt;
3345 
3346 				ttt = TAILQ_FIRST(&asoc->send_queue);
3347 				tp1->rec.data.fast_retran_tsn =
3348 				    ttt->rec.data.TSN_seq;
3349 			}
3350 
3351 			if (tp1->do_rtt) {
3352 				/*
3353 				 * this guy had a RTO calculation pending on
3354 				 * it, cancel it
3355 				 */
3356 				if ((tp1->whoTo != NULL) &&
3357 				    (tp1->whoTo->rto_needed == 0)) {
3358 					tp1->whoTo->rto_needed = 1;
3359 				}
3360 				tp1->do_rtt = 0;
3361 			}
3362 			if (alt != tp1->whoTo) {
3363 				/* yes, there is an alternate. */
3364 				sctp_free_remote_addr(tp1->whoTo);
3365 				/* sa_ignore FREED_MEMORY */
3366 				tp1->whoTo = alt;
3367 				atomic_add_int(&alt->ref_count, 1);
3368 			}
3369 		}
3370 	}
3371 }
3372 
3373 struct sctp_tmit_chunk *
3374 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3375     struct sctp_association *asoc)
3376 {
3377 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3378 	struct timeval now;
3379 	int now_filled = 0;
3380 
3381 	if (asoc->prsctp_supported == 0) {
3382 		return (NULL);
3383 	}
3384 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3385 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3386 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3387 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3388 			/* no chance to advance, out of here */
3389 			break;
3390 		}
3391 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3392 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3393 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3394 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3395 				    asoc->advanced_peer_ack_point,
3396 				    tp1->rec.data.TSN_seq, 0, 0);
3397 			}
3398 		}
3399 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3400 			/*
3401 			 * We can't fwd-tsn past any that are reliable aka
3402 			 * retransmitted until the asoc fails.
3403 			 */
3404 			break;
3405 		}
3406 		if (!now_filled) {
3407 			(void)SCTP_GETTIME_TIMEVAL(&now);
3408 			now_filled = 1;
3409 		}
3410 		/*
3411 		 * now we got a chunk which is marked for another
3412 		 * retransmission to a PR-stream but has run out its chances
3413 		 * already maybe OR has been marked to skip now. Can we skip
3414 		 * it if its a resend?
3415 		 */
3416 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3417 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3418 			/*
3419 			 * Now is this one marked for resend and its time is
3420 			 * now up?
3421 			 */
3422 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3423 				/* Yes so drop it */
3424 				if (tp1->data) {
3425 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3426 					    1, SCTP_SO_NOT_LOCKED);
3427 				}
3428 			} else {
3429 				/*
3430 				 * No, we are done when hit one for resend
3431 				 * whos time as not expired.
3432 				 */
3433 				break;
3434 			}
3435 		}
3436 		/*
3437 		 * Ok now if this chunk is marked to drop it we can clean up
3438 		 * the chunk, advance our peer ack point and we can check
3439 		 * the next chunk.
3440 		 */
3441 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3442 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3443 			/* advance PeerAckPoint goes forward */
3444 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3445 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3446 				a_adv = tp1;
3447 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3448 				/* No update but we do save the chk */
3449 				a_adv = tp1;
3450 			}
3451 		} else {
3452 			/*
3453 			 * If it is still in RESEND we can advance no
3454 			 * further
3455 			 */
3456 			break;
3457 		}
3458 	}
3459 	return (a_adv);
3460 }
3461 
3462 static int
3463 sctp_fs_audit(struct sctp_association *asoc)
3464 {
3465 	struct sctp_tmit_chunk *chk;
3466 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3467 	int ret;
3468 
3469 #ifndef INVARIANTS
3470 	int entry_flight, entry_cnt;
3471 
3472 #endif
3473 
3474 	ret = 0;
3475 #ifndef INVARIANTS
3476 	entry_flight = asoc->total_flight;
3477 	entry_cnt = asoc->total_flight_count;
3478 #endif
3479 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3480 		return (0);
3481 
3482 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3483 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3484 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3485 			    chk->rec.data.TSN_seq,
3486 			    chk->send_size,
3487 			    chk->snd_count);
3488 			inflight++;
3489 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3490 			resend++;
3491 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3492 			inbetween++;
3493 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3494 			above++;
3495 		} else {
3496 			acked++;
3497 		}
3498 	}
3499 
3500 	if ((inflight > 0) || (inbetween > 0)) {
3501 #ifdef INVARIANTS
3502 		panic("Flight size-express incorrect? \n");
3503 #else
3504 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3505 		    entry_flight, entry_cnt);
3506 
3507 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3508 		    inflight, inbetween, resend, above, acked);
3509 		ret = 1;
3510 #endif
3511 	}
3512 	return (ret);
3513 }
3514 
3515 
3516 static void
3517 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3518     struct sctp_association *asoc,
3519     struct sctp_tmit_chunk *tp1)
3520 {
3521 	tp1->window_probe = 0;
3522 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3523 		/* TSN's skipped we do NOT move back. */
3524 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3525 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3526 		    tp1->book_size,
3527 		    (uintptr_t) tp1->whoTo,
3528 		    tp1->rec.data.TSN_seq);
3529 		return;
3530 	}
3531 	/* First setup this by shrinking flight */
3532 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3533 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3534 		    tp1);
3535 	}
3536 	sctp_flight_size_decrease(tp1);
3537 	sctp_total_flight_decrease(stcb, tp1);
3538 	/* Now mark for resend */
3539 	tp1->sent = SCTP_DATAGRAM_RESEND;
3540 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3541 
3542 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3543 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3544 		    tp1->whoTo->flight_size,
3545 		    tp1->book_size,
3546 		    (uintptr_t) tp1->whoTo,
3547 		    tp1->rec.data.TSN_seq);
3548 	}
3549 }
3550 
3551 void
3552 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3553     uint32_t rwnd, int *abort_now, int ecne_seen)
3554 {
3555 	struct sctp_nets *net;
3556 	struct sctp_association *asoc;
3557 	struct sctp_tmit_chunk *tp1, *tp2;
3558 	uint32_t old_rwnd;
3559 	int win_probe_recovery = 0;
3560 	int win_probe_recovered = 0;
3561 	int j, done_once = 0;
3562 	int rto_ok = 1;
3563 
3564 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3565 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3566 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3567 	}
3568 	SCTP_TCB_LOCK_ASSERT(stcb);
3569 #ifdef SCTP_ASOCLOG_OF_TSNS
3570 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3571 	stcb->asoc.cumack_log_at++;
3572 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3573 		stcb->asoc.cumack_log_at = 0;
3574 	}
3575 #endif
3576 	asoc = &stcb->asoc;
3577 	old_rwnd = asoc->peers_rwnd;
3578 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3579 		/* old ack */
3580 		return;
3581 	} else if (asoc->last_acked_seq == cumack) {
3582 		/* Window update sack */
3583 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3584 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3585 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3586 			/* SWS sender side engages */
3587 			asoc->peers_rwnd = 0;
3588 		}
3589 		if (asoc->peers_rwnd > old_rwnd) {
3590 			goto again;
3591 		}
3592 		return;
3593 	}
3594 	/* First setup for CC stuff */
3595 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3596 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3597 			/* Drag along the window_tsn for cwr's */
3598 			net->cwr_window_tsn = cumack;
3599 		}
3600 		net->prev_cwnd = net->cwnd;
3601 		net->net_ack = 0;
3602 		net->net_ack2 = 0;
3603 
3604 		/*
3605 		 * CMT: Reset CUC and Fast recovery algo variables before
3606 		 * SACK processing
3607 		 */
3608 		net->new_pseudo_cumack = 0;
3609 		net->will_exit_fast_recovery = 0;
3610 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3611 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3612 		}
3613 	}
3614 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3615 		uint32_t send_s;
3616 
3617 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3618 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3619 			    sctpchunk_listhead);
3620 			send_s = tp1->rec.data.TSN_seq + 1;
3621 		} else {
3622 			send_s = asoc->sending_seq;
3623 		}
3624 		if (SCTP_TSN_GE(cumack, send_s)) {
3625 			struct mbuf *op_err;
3626 			char msg[SCTP_DIAG_INFO_LEN];
3627 
3628 			*abort_now = 1;
3629 			/* XXX */
3630 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3631 			    cumack, send_s);
3632 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3633 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3634 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3635 			return;
3636 		}
3637 	}
3638 	asoc->this_sack_highest_gap = cumack;
3639 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3640 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3641 		    stcb->asoc.overall_error_count,
3642 		    0,
3643 		    SCTP_FROM_SCTP_INDATA,
3644 		    __LINE__);
3645 	}
3646 	stcb->asoc.overall_error_count = 0;
3647 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3648 		/* process the new consecutive TSN first */
3649 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3650 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3651 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3652 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3653 				}
3654 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3655 					/*
3656 					 * If it is less than ACKED, it is
3657 					 * now no-longer in flight. Higher
3658 					 * values may occur during marking
3659 					 */
3660 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3661 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3662 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3663 							    tp1->whoTo->flight_size,
3664 							    tp1->book_size,
3665 							    (uintptr_t) tp1->whoTo,
3666 							    tp1->rec.data.TSN_seq);
3667 						}
3668 						sctp_flight_size_decrease(tp1);
3669 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3670 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3671 							    tp1);
3672 						}
3673 						/* sa_ignore NO_NULL_CHK */
3674 						sctp_total_flight_decrease(stcb, tp1);
3675 					}
3676 					tp1->whoTo->net_ack += tp1->send_size;
3677 					if (tp1->snd_count < 2) {
3678 						/*
3679 						 * True non-retransmited
3680 						 * chunk
3681 						 */
3682 						tp1->whoTo->net_ack2 +=
3683 						    tp1->send_size;
3684 
3685 						/* update RTO too? */
3686 						if (tp1->do_rtt) {
3687 							if (rto_ok) {
3688 								tp1->whoTo->RTO =
3689 								/*
3690 								 * sa_ignore
3691 								 * NO_NULL_CH
3692 								 * K
3693 								 */
3694 								    sctp_calculate_rto(stcb,
3695 								    asoc, tp1->whoTo,
3696 								    &tp1->sent_rcv_time,
3697 								    sctp_align_safe_nocopy,
3698 								    SCTP_RTT_FROM_DATA);
3699 								rto_ok = 0;
3700 							}
3701 							if (tp1->whoTo->rto_needed == 0) {
3702 								tp1->whoTo->rto_needed = 1;
3703 							}
3704 							tp1->do_rtt = 0;
3705 						}
3706 					}
3707 					/*
3708 					 * CMT: CUCv2 algorithm. From the
3709 					 * cumack'd TSNs, for each TSN being
3710 					 * acked for the first time, set the
3711 					 * following variables for the
3712 					 * corresp destination.
3713 					 * new_pseudo_cumack will trigger a
3714 					 * cwnd update.
3715 					 * find_(rtx_)pseudo_cumack will
3716 					 * trigger search for the next
3717 					 * expected (rtx-)pseudo-cumack.
3718 					 */
3719 					tp1->whoTo->new_pseudo_cumack = 1;
3720 					tp1->whoTo->find_pseudo_cumack = 1;
3721 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3722 
3723 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3724 						/* sa_ignore NO_NULL_CHK */
3725 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3726 					}
3727 				}
3728 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3729 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3730 				}
3731 				if (tp1->rec.data.chunk_was_revoked) {
3732 					/* deflate the cwnd */
3733 					tp1->whoTo->cwnd -= tp1->book_size;
3734 					tp1->rec.data.chunk_was_revoked = 0;
3735 				}
3736 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3737 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3738 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3739 #ifdef INVARIANTS
3740 					} else {
3741 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3742 #endif
3743 					}
3744 				}
3745 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3746 				if (tp1->data) {
3747 					/* sa_ignore NO_NULL_CHK */
3748 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3749 					sctp_m_freem(tp1->data);
3750 					tp1->data = NULL;
3751 				}
3752 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3753 					sctp_log_sack(asoc->last_acked_seq,
3754 					    cumack,
3755 					    tp1->rec.data.TSN_seq,
3756 					    0,
3757 					    0,
3758 					    SCTP_LOG_FREE_SENT);
3759 				}
3760 				asoc->sent_queue_cnt--;
3761 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3762 			} else {
3763 				break;
3764 			}
3765 		}
3766 
3767 	}
3768 	/* sa_ignore NO_NULL_CHK */
3769 	if (stcb->sctp_socket) {
3770 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3771 		struct socket *so;
3772 
3773 #endif
3774 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3775 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3776 			/* sa_ignore NO_NULL_CHK */
3777 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3778 		}
3779 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3780 		so = SCTP_INP_SO(stcb->sctp_ep);
3781 		atomic_add_int(&stcb->asoc.refcnt, 1);
3782 		SCTP_TCB_UNLOCK(stcb);
3783 		SCTP_SOCKET_LOCK(so, 1);
3784 		SCTP_TCB_LOCK(stcb);
3785 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3786 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3787 			/* assoc was freed while we were unlocked */
3788 			SCTP_SOCKET_UNLOCK(so, 1);
3789 			return;
3790 		}
3791 #endif
3792 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3793 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3794 		SCTP_SOCKET_UNLOCK(so, 1);
3795 #endif
3796 	} else {
3797 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3798 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3799 		}
3800 	}
3801 
3802 	/* JRS - Use the congestion control given in the CC module */
3803 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3804 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3805 			if (net->net_ack2 > 0) {
3806 				/*
3807 				 * Karn's rule applies to clearing error
3808 				 * count, this is optional.
3809 				 */
3810 				net->error_count = 0;
3811 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3812 					/* addr came good */
3813 					net->dest_state |= SCTP_ADDR_REACHABLE;
3814 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3815 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3816 				}
3817 				if (net == stcb->asoc.primary_destination) {
3818 					if (stcb->asoc.alternate) {
3819 						/*
3820 						 * release the alternate,
3821 						 * primary is good
3822 						 */
3823 						sctp_free_remote_addr(stcb->asoc.alternate);
3824 						stcb->asoc.alternate = NULL;
3825 					}
3826 				}
3827 				if (net->dest_state & SCTP_ADDR_PF) {
3828 					net->dest_state &= ~SCTP_ADDR_PF;
3829 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3830 					    stcb->sctp_ep, stcb, net,
3831 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3832 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3833 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3834 					/* Done with this net */
3835 					net->net_ack = 0;
3836 				}
3837 				/* restore any doubled timers */
3838 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3839 				if (net->RTO < stcb->asoc.minrto) {
3840 					net->RTO = stcb->asoc.minrto;
3841 				}
3842 				if (net->RTO > stcb->asoc.maxrto) {
3843 					net->RTO = stcb->asoc.maxrto;
3844 				}
3845 			}
3846 		}
3847 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3848 	}
3849 	asoc->last_acked_seq = cumack;
3850 
3851 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3852 		/* nothing left in-flight */
3853 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3854 			net->flight_size = 0;
3855 			net->partial_bytes_acked = 0;
3856 		}
3857 		asoc->total_flight = 0;
3858 		asoc->total_flight_count = 0;
3859 	}
3860 	/* RWND update */
3861 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3862 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3863 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3864 		/* SWS sender side engages */
3865 		asoc->peers_rwnd = 0;
3866 	}
3867 	if (asoc->peers_rwnd > old_rwnd) {
3868 		win_probe_recovery = 1;
3869 	}
3870 	/* Now assure a timer where data is queued at */
3871 again:
3872 	j = 0;
3873 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3874 		int to_ticks;
3875 
3876 		if (win_probe_recovery && (net->window_probe)) {
3877 			win_probe_recovered = 1;
3878 			/*
3879 			 * Find first chunk that was used with window probe
3880 			 * and clear the sent
3881 			 */
3882 			/* sa_ignore FREED_MEMORY */
3883 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3884 				if (tp1->window_probe) {
3885 					/* move back to data send queue */
3886 					sctp_window_probe_recovery(stcb, asoc, tp1);
3887 					break;
3888 				}
3889 			}
3890 		}
3891 		if (net->RTO == 0) {
3892 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3893 		} else {
3894 			to_ticks = MSEC_TO_TICKS(net->RTO);
3895 		}
3896 		if (net->flight_size) {
3897 			j++;
3898 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3899 			    sctp_timeout_handler, &net->rxt_timer);
3900 			if (net->window_probe) {
3901 				net->window_probe = 0;
3902 			}
3903 		} else {
3904 			if (net->window_probe) {
3905 				/*
3906 				 * In window probes we must assure a timer
3907 				 * is still running there
3908 				 */
3909 				net->window_probe = 0;
3910 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3911 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3912 					    sctp_timeout_handler, &net->rxt_timer);
3913 				}
3914 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3915 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3916 				    stcb, net,
3917 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3918 			}
3919 		}
3920 	}
3921 	if ((j == 0) &&
3922 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3923 	    (asoc->sent_queue_retran_cnt == 0) &&
3924 	    (win_probe_recovered == 0) &&
3925 	    (done_once == 0)) {
3926 		/*
3927 		 * huh, this should not happen unless all packets are
3928 		 * PR-SCTP and marked to skip of course.
3929 		 */
3930 		if (sctp_fs_audit(asoc)) {
3931 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3932 				net->flight_size = 0;
3933 			}
3934 			asoc->total_flight = 0;
3935 			asoc->total_flight_count = 0;
3936 			asoc->sent_queue_retran_cnt = 0;
3937 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3938 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3939 					sctp_flight_size_increase(tp1);
3940 					sctp_total_flight_increase(stcb, tp1);
3941 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3942 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3943 				}
3944 			}
3945 		}
3946 		done_once = 1;
3947 		goto again;
3948 	}
3949 	/**********************************/
3950 	/* Now what about shutdown issues */
3951 	/**********************************/
3952 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3953 		/* nothing left on sendqueue.. consider done */
3954 		/* clean up */
3955 		if ((asoc->stream_queue_cnt == 1) &&
3956 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3957 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3958 		    (asoc->locked_on_sending)
3959 		    ) {
3960 			struct sctp_stream_queue_pending *sp;
3961 
3962 			/*
3963 			 * I may be in a state where we got all across.. but
3964 			 * cannot write more due to a shutdown... we abort
3965 			 * since the user did not indicate EOR in this case.
3966 			 * The sp will be cleaned during free of the asoc.
3967 			 */
3968 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3969 			    sctp_streamhead);
3970 			if ((sp) && (sp->length == 0)) {
3971 				/* Let cleanup code purge it */
3972 				if (sp->msg_is_complete) {
3973 					asoc->stream_queue_cnt--;
3974 				} else {
3975 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3976 					asoc->locked_on_sending = NULL;
3977 					asoc->stream_queue_cnt--;
3978 				}
3979 			}
3980 		}
3981 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3982 		    (asoc->stream_queue_cnt == 0)) {
3983 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3984 				/* Need to abort here */
3985 				struct mbuf *op_err;
3986 
3987 		abort_out_now:
3988 				*abort_now = 1;
3989 				/* XXX */
3990 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3991 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
3992 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3993 			} else {
3994 				struct sctp_nets *netp;
3995 
3996 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3997 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3998 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3999 				}
4000 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4001 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4002 				sctp_stop_timers_for_shutdown(stcb);
4003 				if (asoc->alternate) {
4004 					netp = asoc->alternate;
4005 				} else {
4006 					netp = asoc->primary_destination;
4007 				}
4008 				sctp_send_shutdown(stcb, netp);
4009 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4010 				    stcb->sctp_ep, stcb, netp);
4011 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4012 				    stcb->sctp_ep, stcb, netp);
4013 			}
4014 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4015 		    (asoc->stream_queue_cnt == 0)) {
4016 			struct sctp_nets *netp;
4017 
4018 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4019 				goto abort_out_now;
4020 			}
4021 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4022 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4023 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4024 			sctp_stop_timers_for_shutdown(stcb);
4025 			if (asoc->alternate) {
4026 				netp = asoc->alternate;
4027 			} else {
4028 				netp = asoc->primary_destination;
4029 			}
4030 			sctp_send_shutdown_ack(stcb, netp);
4031 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4032 			    stcb->sctp_ep, stcb, netp);
4033 		}
4034 	}
4035 	/*********************************************/
4036 	/* Here we perform PR-SCTP procedures        */
4037 	/* (section 4.2)                             */
4038 	/*********************************************/
4039 	/* C1. update advancedPeerAckPoint */
4040 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4041 		asoc->advanced_peer_ack_point = cumack;
4042 	}
4043 	/* PR-Sctp issues need to be addressed too */
4044 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4045 		struct sctp_tmit_chunk *lchk;
4046 		uint32_t old_adv_peer_ack_point;
4047 
4048 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4049 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4050 		/* C3. See if we need to send a Fwd-TSN */
4051 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4052 			/*
4053 			 * ISSUE with ECN, see FWD-TSN processing.
4054 			 */
4055 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4056 				send_forward_tsn(stcb, asoc);
4057 			} else if (lchk) {
4058 				/* try to FR fwd-tsn's that get lost too */
4059 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4060 					send_forward_tsn(stcb, asoc);
4061 				}
4062 			}
4063 		}
4064 		if (lchk) {
4065 			/* Assure a timer is up */
4066 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4067 			    stcb->sctp_ep, stcb, lchk->whoTo);
4068 		}
4069 	}
4070 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4071 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4072 		    rwnd,
4073 		    stcb->asoc.peers_rwnd,
4074 		    stcb->asoc.total_flight,
4075 		    stcb->asoc.total_output_queue_size);
4076 	}
4077 }
4078 
4079 void
4080 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4081     struct sctp_tcb *stcb,
4082     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4083     int *abort_now, uint8_t flags,
4084     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4085 {
4086 	struct sctp_association *asoc;
4087 	struct sctp_tmit_chunk *tp1, *tp2;
4088 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4089 	uint16_t wake_him = 0;
4090 	uint32_t send_s = 0;
4091 	long j;
4092 	int accum_moved = 0;
4093 	int will_exit_fast_recovery = 0;
4094 	uint32_t a_rwnd, old_rwnd;
4095 	int win_probe_recovery = 0;
4096 	int win_probe_recovered = 0;
4097 	struct sctp_nets *net = NULL;
4098 	int done_once;
4099 	int rto_ok = 1;
4100 	uint8_t reneged_all = 0;
4101 	uint8_t cmt_dac_flag;
4102 
4103 	/*
4104 	 * we take any chance we can to service our queues since we cannot
4105 	 * get awoken when the socket is read from :<
4106 	 */
4107 	/*
4108 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4109 	 * old sack, if so discard. 2) If there is nothing left in the send
4110 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4111 	 * too, update any rwnd change and verify no timers are running.
4112 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4113 	 * moved process these first and note that it moved. 4) Process any
4114 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4115 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4116 	 * sync up flightsizes and things, stop all timers and also check
4117 	 * for shutdown_pending state. If so then go ahead and send off the
4118 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4119 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4120 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4121 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4122 	 * if in shutdown_recv state.
4123 	 */
4124 	SCTP_TCB_LOCK_ASSERT(stcb);
4125 	/* CMT DAC algo */
4126 	this_sack_lowest_newack = 0;
4127 	SCTP_STAT_INCR(sctps_slowpath_sack);
4128 	last_tsn = cum_ack;
4129 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4130 #ifdef SCTP_ASOCLOG_OF_TSNS
4131 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4132 	stcb->asoc.cumack_log_at++;
4133 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4134 		stcb->asoc.cumack_log_at = 0;
4135 	}
4136 #endif
4137 	a_rwnd = rwnd;
4138 
4139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4140 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4141 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4142 	}
4143 	old_rwnd = stcb->asoc.peers_rwnd;
4144 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4145 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4146 		    stcb->asoc.overall_error_count,
4147 		    0,
4148 		    SCTP_FROM_SCTP_INDATA,
4149 		    __LINE__);
4150 	}
4151 	stcb->asoc.overall_error_count = 0;
4152 	asoc = &stcb->asoc;
4153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4154 		sctp_log_sack(asoc->last_acked_seq,
4155 		    cum_ack,
4156 		    0,
4157 		    num_seg,
4158 		    num_dup,
4159 		    SCTP_LOG_NEW_SACK);
4160 	}
4161 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4162 		uint16_t i;
4163 		uint32_t *dupdata, dblock;
4164 
4165 		for (i = 0; i < num_dup; i++) {
4166 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4167 			    sizeof(uint32_t), (uint8_t *) & dblock);
4168 			if (dupdata == NULL) {
4169 				break;
4170 			}
4171 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4172 		}
4173 	}
4174 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4175 		/* reality check */
4176 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4177 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4178 			    sctpchunk_listhead);
4179 			send_s = tp1->rec.data.TSN_seq + 1;
4180 		} else {
4181 			tp1 = NULL;
4182 			send_s = asoc->sending_seq;
4183 		}
4184 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4185 			struct mbuf *op_err;
4186 			char msg[SCTP_DIAG_INFO_LEN];
4187 
4188 			/*
4189 			 * no way, we have not even sent this TSN out yet.
4190 			 * Peer is hopelessly messed up with us.
4191 			 */
4192 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4193 			    cum_ack, send_s);
4194 			if (tp1) {
4195 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4196 				    tp1->rec.data.TSN_seq, (void *)tp1);
4197 			}
4198 	hopeless_peer:
4199 			*abort_now = 1;
4200 			/* XXX */
4201 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4202 			    cum_ack, send_s);
4203 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4204 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4205 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4206 			return;
4207 		}
4208 	}
4209 	/**********************/
4210 	/* 1) check the range */
4211 	/**********************/
4212 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4213 		/* acking something behind */
4214 		return;
4215 	}
4216 	/* update the Rwnd of the peer */
4217 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4218 	    TAILQ_EMPTY(&asoc->send_queue) &&
4219 	    (asoc->stream_queue_cnt == 0)) {
4220 		/* nothing left on send/sent and strmq */
4221 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4222 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4223 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4224 		}
4225 		asoc->peers_rwnd = a_rwnd;
4226 		if (asoc->sent_queue_retran_cnt) {
4227 			asoc->sent_queue_retran_cnt = 0;
4228 		}
4229 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4230 			/* SWS sender side engages */
4231 			asoc->peers_rwnd = 0;
4232 		}
4233 		/* stop any timers */
4234 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4235 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4236 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4237 			net->partial_bytes_acked = 0;
4238 			net->flight_size = 0;
4239 		}
4240 		asoc->total_flight = 0;
4241 		asoc->total_flight_count = 0;
4242 		return;
4243 	}
4244 	/*
4245 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4246 	 * things. The total byte count acked is tracked in netAckSz AND
4247 	 * netAck2 is used to track the total bytes acked that are un-
4248 	 * amibguious and were never retransmitted. We track these on a per
4249 	 * destination address basis.
4250 	 */
4251 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4252 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4253 			/* Drag along the window_tsn for cwr's */
4254 			net->cwr_window_tsn = cum_ack;
4255 		}
4256 		net->prev_cwnd = net->cwnd;
4257 		net->net_ack = 0;
4258 		net->net_ack2 = 0;
4259 
4260 		/*
4261 		 * CMT: Reset CUC and Fast recovery algo variables before
4262 		 * SACK processing
4263 		 */
4264 		net->new_pseudo_cumack = 0;
4265 		net->will_exit_fast_recovery = 0;
4266 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4267 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4268 		}
4269 	}
4270 	/* process the new consecutive TSN first */
4271 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4272 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4273 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4274 				accum_moved = 1;
4275 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4276 					/*
4277 					 * If it is less than ACKED, it is
4278 					 * now no-longer in flight. Higher
4279 					 * values may occur during marking
4280 					 */
4281 					if ((tp1->whoTo->dest_state &
4282 					    SCTP_ADDR_UNCONFIRMED) &&
4283 					    (tp1->snd_count < 2)) {
4284 						/*
4285 						 * If there was no retran
4286 						 * and the address is
4287 						 * un-confirmed and we sent
4288 						 * there and are now
4289 						 * sacked.. its confirmed,
4290 						 * mark it so.
4291 						 */
4292 						tp1->whoTo->dest_state &=
4293 						    ~SCTP_ADDR_UNCONFIRMED;
4294 					}
4295 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4296 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4297 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4298 							    tp1->whoTo->flight_size,
4299 							    tp1->book_size,
4300 							    (uintptr_t) tp1->whoTo,
4301 							    tp1->rec.data.TSN_seq);
4302 						}
4303 						sctp_flight_size_decrease(tp1);
4304 						sctp_total_flight_decrease(stcb, tp1);
4305 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4306 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4307 							    tp1);
4308 						}
4309 					}
4310 					tp1->whoTo->net_ack += tp1->send_size;
4311 
4312 					/* CMT SFR and DAC algos */
4313 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4314 					tp1->whoTo->saw_newack = 1;
4315 
4316 					if (tp1->snd_count < 2) {
4317 						/*
4318 						 * True non-retransmited
4319 						 * chunk
4320 						 */
4321 						tp1->whoTo->net_ack2 +=
4322 						    tp1->send_size;
4323 
4324 						/* update RTO too? */
4325 						if (tp1->do_rtt) {
4326 							if (rto_ok) {
4327 								tp1->whoTo->RTO =
4328 								    sctp_calculate_rto(stcb,
4329 								    asoc, tp1->whoTo,
4330 								    &tp1->sent_rcv_time,
4331 								    sctp_align_safe_nocopy,
4332 								    SCTP_RTT_FROM_DATA);
4333 								rto_ok = 0;
4334 							}
4335 							if (tp1->whoTo->rto_needed == 0) {
4336 								tp1->whoTo->rto_needed = 1;
4337 							}
4338 							tp1->do_rtt = 0;
4339 						}
4340 					}
4341 					/*
4342 					 * CMT: CUCv2 algorithm. From the
4343 					 * cumack'd TSNs, for each TSN being
4344 					 * acked for the first time, set the
4345 					 * following variables for the
4346 					 * corresp destination.
4347 					 * new_pseudo_cumack will trigger a
4348 					 * cwnd update.
4349 					 * find_(rtx_)pseudo_cumack will
4350 					 * trigger search for the next
4351 					 * expected (rtx-)pseudo-cumack.
4352 					 */
4353 					tp1->whoTo->new_pseudo_cumack = 1;
4354 					tp1->whoTo->find_pseudo_cumack = 1;
4355 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4356 
4357 
4358 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4359 						sctp_log_sack(asoc->last_acked_seq,
4360 						    cum_ack,
4361 						    tp1->rec.data.TSN_seq,
4362 						    0,
4363 						    0,
4364 						    SCTP_LOG_TSN_ACKED);
4365 					}
4366 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4367 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4368 					}
4369 				}
4370 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4371 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4372 #ifdef SCTP_AUDITING_ENABLED
4373 					sctp_audit_log(0xB3,
4374 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4375 #endif
4376 				}
4377 				if (tp1->rec.data.chunk_was_revoked) {
4378 					/* deflate the cwnd */
4379 					tp1->whoTo->cwnd -= tp1->book_size;
4380 					tp1->rec.data.chunk_was_revoked = 0;
4381 				}
4382 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4383 					tp1->sent = SCTP_DATAGRAM_ACKED;
4384 				}
4385 			}
4386 		} else {
4387 			break;
4388 		}
4389 	}
4390 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4391 	/* always set this up to cum-ack */
4392 	asoc->this_sack_highest_gap = last_tsn;
4393 
4394 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4395 
4396 		/*
4397 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4398 		 * to be greater than the cumack. Also reset saw_newack to 0
4399 		 * for all dests.
4400 		 */
4401 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4402 			net->saw_newack = 0;
4403 			net->this_sack_highest_newack = last_tsn;
4404 		}
4405 
4406 		/*
4407 		 * thisSackHighestGap will increase while handling NEW
4408 		 * segments this_sack_highest_newack will increase while
4409 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4410 		 * used for CMT DAC algo. saw_newack will also change.
4411 		 */
4412 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4413 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4414 		    num_seg, num_nr_seg, &rto_ok)) {
4415 			wake_him++;
4416 		}
4417 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4418 			/*
4419 			 * validate the biggest_tsn_acked in the gap acks if
4420 			 * strict adherence is wanted.
4421 			 */
4422 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4423 				/*
4424 				 * peer is either confused or we are under
4425 				 * attack. We must abort.
4426 				 */
4427 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4428 				    biggest_tsn_acked, send_s);
4429 				goto hopeless_peer;
4430 			}
4431 		}
4432 	}
4433 	/*******************************************/
4434 	/* cancel ALL T3-send timer if accum moved */
4435 	/*******************************************/
4436 	if (asoc->sctp_cmt_on_off > 0) {
4437 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4438 			if (net->new_pseudo_cumack)
4439 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4440 				    stcb, net,
4441 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4442 
4443 		}
4444 	} else {
4445 		if (accum_moved) {
4446 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4447 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4448 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4449 			}
4450 		}
4451 	}
4452 	/********************************************/
4453 	/* drop the acked chunks from the sentqueue */
4454 	/********************************************/
4455 	asoc->last_acked_seq = cum_ack;
4456 
4457 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4458 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4459 			break;
4460 		}
4461 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4462 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4463 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4464 #ifdef INVARIANTS
4465 			} else {
4466 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4467 #endif
4468 			}
4469 		}
4470 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4471 		if (PR_SCTP_ENABLED(tp1->flags)) {
4472 			if (asoc->pr_sctp_cnt != 0)
4473 				asoc->pr_sctp_cnt--;
4474 		}
4475 		asoc->sent_queue_cnt--;
4476 		if (tp1->data) {
4477 			/* sa_ignore NO_NULL_CHK */
4478 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4479 			sctp_m_freem(tp1->data);
4480 			tp1->data = NULL;
4481 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4482 				asoc->sent_queue_cnt_removeable--;
4483 			}
4484 		}
4485 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4486 			sctp_log_sack(asoc->last_acked_seq,
4487 			    cum_ack,
4488 			    tp1->rec.data.TSN_seq,
4489 			    0,
4490 			    0,
4491 			    SCTP_LOG_FREE_SENT);
4492 		}
4493 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4494 		wake_him++;
4495 	}
4496 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4497 #ifdef INVARIANTS
4498 		panic("Warning flight size is postive and should be 0");
4499 #else
4500 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4501 		    asoc->total_flight);
4502 #endif
4503 		asoc->total_flight = 0;
4504 	}
4505 	/* sa_ignore NO_NULL_CHK */
4506 	if ((wake_him) && (stcb->sctp_socket)) {
4507 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4508 		struct socket *so;
4509 
4510 #endif
4511 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4512 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4513 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4514 		}
4515 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4516 		so = SCTP_INP_SO(stcb->sctp_ep);
4517 		atomic_add_int(&stcb->asoc.refcnt, 1);
4518 		SCTP_TCB_UNLOCK(stcb);
4519 		SCTP_SOCKET_LOCK(so, 1);
4520 		SCTP_TCB_LOCK(stcb);
4521 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4522 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4523 			/* assoc was freed while we were unlocked */
4524 			SCTP_SOCKET_UNLOCK(so, 1);
4525 			return;
4526 		}
4527 #endif
4528 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4529 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4530 		SCTP_SOCKET_UNLOCK(so, 1);
4531 #endif
4532 	} else {
4533 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4534 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4535 		}
4536 	}
4537 
4538 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4539 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4540 			/* Setup so we will exit RFC2582 fast recovery */
4541 			will_exit_fast_recovery = 1;
4542 		}
4543 	}
4544 	/*
4545 	 * Check for revoked fragments:
4546 	 *
4547 	 * if Previous sack - Had no frags then we can't have any revoked if
4548 	 * Previous sack - Had frag's then - If we now have frags aka
4549 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4550 	 * some of them. else - The peer revoked all ACKED fragments, since
4551 	 * we had some before and now we have NONE.
4552 	 */
4553 
4554 	if (num_seg) {
4555 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4556 		asoc->saw_sack_with_frags = 1;
4557 	} else if (asoc->saw_sack_with_frags) {
4558 		int cnt_revoked = 0;
4559 
4560 		/* Peer revoked all dg's marked or acked */
4561 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4562 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4563 				tp1->sent = SCTP_DATAGRAM_SENT;
4564 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4565 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4566 					    tp1->whoTo->flight_size,
4567 					    tp1->book_size,
4568 					    (uintptr_t) tp1->whoTo,
4569 					    tp1->rec.data.TSN_seq);
4570 				}
4571 				sctp_flight_size_increase(tp1);
4572 				sctp_total_flight_increase(stcb, tp1);
4573 				tp1->rec.data.chunk_was_revoked = 1;
4574 				/*
4575 				 * To ensure that this increase in
4576 				 * flightsize, which is artificial, does not
4577 				 * throttle the sender, we also increase the
4578 				 * cwnd artificially.
4579 				 */
4580 				tp1->whoTo->cwnd += tp1->book_size;
4581 				cnt_revoked++;
4582 			}
4583 		}
4584 		if (cnt_revoked) {
4585 			reneged_all = 1;
4586 		}
4587 		asoc->saw_sack_with_frags = 0;
4588 	}
4589 	if (num_nr_seg > 0)
4590 		asoc->saw_sack_with_nr_frags = 1;
4591 	else
4592 		asoc->saw_sack_with_nr_frags = 0;
4593 
4594 	/* JRS - Use the congestion control given in the CC module */
4595 	if (ecne_seen == 0) {
4596 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4597 			if (net->net_ack2 > 0) {
4598 				/*
4599 				 * Karn's rule applies to clearing error
4600 				 * count, this is optional.
4601 				 */
4602 				net->error_count = 0;
4603 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4604 					/* addr came good */
4605 					net->dest_state |= SCTP_ADDR_REACHABLE;
4606 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4607 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4608 				}
4609 				if (net == stcb->asoc.primary_destination) {
4610 					if (stcb->asoc.alternate) {
4611 						/*
4612 						 * release the alternate,
4613 						 * primary is good
4614 						 */
4615 						sctp_free_remote_addr(stcb->asoc.alternate);
4616 						stcb->asoc.alternate = NULL;
4617 					}
4618 				}
4619 				if (net->dest_state & SCTP_ADDR_PF) {
4620 					net->dest_state &= ~SCTP_ADDR_PF;
4621 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4622 					    stcb->sctp_ep, stcb, net,
4623 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4624 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4625 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4626 					/* Done with this net */
4627 					net->net_ack = 0;
4628 				}
4629 				/* restore any doubled timers */
4630 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4631 				if (net->RTO < stcb->asoc.minrto) {
4632 					net->RTO = stcb->asoc.minrto;
4633 				}
4634 				if (net->RTO > stcb->asoc.maxrto) {
4635 					net->RTO = stcb->asoc.maxrto;
4636 				}
4637 			}
4638 		}
4639 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4640 	}
4641 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4642 		/* nothing left in-flight */
4643 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4644 			/* stop all timers */
4645 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4646 			    stcb, net,
4647 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4648 			net->flight_size = 0;
4649 			net->partial_bytes_acked = 0;
4650 		}
4651 		asoc->total_flight = 0;
4652 		asoc->total_flight_count = 0;
4653 	}
4654 	/**********************************/
4655 	/* Now what about shutdown issues */
4656 	/**********************************/
4657 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4658 		/* nothing left on sendqueue.. consider done */
4659 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4660 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4661 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4662 		}
4663 		asoc->peers_rwnd = a_rwnd;
4664 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4665 			/* SWS sender side engages */
4666 			asoc->peers_rwnd = 0;
4667 		}
4668 		/* clean up */
4669 		if ((asoc->stream_queue_cnt == 1) &&
4670 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4671 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4672 		    (asoc->locked_on_sending)
4673 		    ) {
4674 			struct sctp_stream_queue_pending *sp;
4675 
4676 			/*
4677 			 * I may be in a state where we got all across.. but
4678 			 * cannot write more due to a shutdown... we abort
4679 			 * since the user did not indicate EOR in this case.
4680 			 */
4681 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4682 			    sctp_streamhead);
4683 			if ((sp) && (sp->length == 0)) {
4684 				asoc->locked_on_sending = NULL;
4685 				if (sp->msg_is_complete) {
4686 					asoc->stream_queue_cnt--;
4687 				} else {
4688 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4689 					asoc->stream_queue_cnt--;
4690 				}
4691 			}
4692 		}
4693 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4694 		    (asoc->stream_queue_cnt == 0)) {
4695 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4696 				/* Need to abort here */
4697 				struct mbuf *op_err;
4698 
4699 		abort_out_now:
4700 				*abort_now = 1;
4701 				/* XXX */
4702 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4703 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4704 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4705 				return;
4706 			} else {
4707 				struct sctp_nets *netp;
4708 
4709 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4710 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4711 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4712 				}
4713 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4714 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4715 				sctp_stop_timers_for_shutdown(stcb);
4716 				if (asoc->alternate) {
4717 					netp = asoc->alternate;
4718 				} else {
4719 					netp = asoc->primary_destination;
4720 				}
4721 				sctp_send_shutdown(stcb, netp);
4722 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4723 				    stcb->sctp_ep, stcb, netp);
4724 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4725 				    stcb->sctp_ep, stcb, netp);
4726 			}
4727 			return;
4728 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4729 		    (asoc->stream_queue_cnt == 0)) {
4730 			struct sctp_nets *netp;
4731 
4732 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4733 				goto abort_out_now;
4734 			}
4735 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4736 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4737 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4738 			sctp_stop_timers_for_shutdown(stcb);
4739 			if (asoc->alternate) {
4740 				netp = asoc->alternate;
4741 			} else {
4742 				netp = asoc->primary_destination;
4743 			}
4744 			sctp_send_shutdown_ack(stcb, netp);
4745 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4746 			    stcb->sctp_ep, stcb, netp);
4747 			return;
4748 		}
4749 	}
4750 	/*
4751 	 * Now here we are going to recycle net_ack for a different use...
4752 	 * HEADS UP.
4753 	 */
4754 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4755 		net->net_ack = 0;
4756 	}
4757 
4758 	/*
4759 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4760 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4761 	 * automatically ensure that.
4762 	 */
4763 	if ((asoc->sctp_cmt_on_off > 0) &&
4764 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4765 	    (cmt_dac_flag == 0)) {
4766 		this_sack_lowest_newack = cum_ack;
4767 	}
4768 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4769 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4770 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4771 	}
4772 	/* JRS - Use the congestion control given in the CC module */
4773 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4774 
4775 	/* Now are we exiting loss recovery ? */
4776 	if (will_exit_fast_recovery) {
4777 		/* Ok, we must exit fast recovery */
4778 		asoc->fast_retran_loss_recovery = 0;
4779 	}
4780 	if ((asoc->sat_t3_loss_recovery) &&
4781 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4782 		/* end satellite t3 loss recovery */
4783 		asoc->sat_t3_loss_recovery = 0;
4784 	}
4785 	/*
4786 	 * CMT Fast recovery
4787 	 */
4788 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4789 		if (net->will_exit_fast_recovery) {
4790 			/* Ok, we must exit fast recovery */
4791 			net->fast_retran_loss_recovery = 0;
4792 		}
4793 	}
4794 
4795 	/* Adjust and set the new rwnd value */
4796 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4797 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4798 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4799 	}
4800 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4801 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4802 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4803 		/* SWS sender side engages */
4804 		asoc->peers_rwnd = 0;
4805 	}
4806 	if (asoc->peers_rwnd > old_rwnd) {
4807 		win_probe_recovery = 1;
4808 	}
4809 	/*
4810 	 * Now we must setup so we have a timer up for anyone with
4811 	 * outstanding data.
4812 	 */
4813 	done_once = 0;
4814 again:
4815 	j = 0;
4816 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4817 		if (win_probe_recovery && (net->window_probe)) {
4818 			win_probe_recovered = 1;
4819 			/*-
4820 			 * Find first chunk that was used with
4821 			 * window probe and clear the event. Put
4822 			 * it back into the send queue as if has
4823 			 * not been sent.
4824 			 */
4825 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4826 				if (tp1->window_probe) {
4827 					sctp_window_probe_recovery(stcb, asoc, tp1);
4828 					break;
4829 				}
4830 			}
4831 		}
4832 		if (net->flight_size) {
4833 			j++;
4834 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4835 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4836 				    stcb->sctp_ep, stcb, net);
4837 			}
4838 			if (net->window_probe) {
4839 				net->window_probe = 0;
4840 			}
4841 		} else {
4842 			if (net->window_probe) {
4843 				/*
4844 				 * In window probes we must assure a timer
4845 				 * is still running there
4846 				 */
4847 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4848 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4849 					    stcb->sctp_ep, stcb, net);
4850 
4851 				}
4852 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4853 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4854 				    stcb, net,
4855 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4856 			}
4857 		}
4858 	}
4859 	if ((j == 0) &&
4860 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4861 	    (asoc->sent_queue_retran_cnt == 0) &&
4862 	    (win_probe_recovered == 0) &&
4863 	    (done_once == 0)) {
4864 		/*
4865 		 * huh, this should not happen unless all packets are
4866 		 * PR-SCTP and marked to skip of course.
4867 		 */
4868 		if (sctp_fs_audit(asoc)) {
4869 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4870 				net->flight_size = 0;
4871 			}
4872 			asoc->total_flight = 0;
4873 			asoc->total_flight_count = 0;
4874 			asoc->sent_queue_retran_cnt = 0;
4875 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4876 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4877 					sctp_flight_size_increase(tp1);
4878 					sctp_total_flight_increase(stcb, tp1);
4879 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4880 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4881 				}
4882 			}
4883 		}
4884 		done_once = 1;
4885 		goto again;
4886 	}
4887 	/*********************************************/
4888 	/* Here we perform PR-SCTP procedures        */
4889 	/* (section 4.2)                             */
4890 	/*********************************************/
4891 	/* C1. update advancedPeerAckPoint */
4892 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4893 		asoc->advanced_peer_ack_point = cum_ack;
4894 	}
4895 	/* C2. try to further move advancedPeerAckPoint ahead */
4896 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4897 		struct sctp_tmit_chunk *lchk;
4898 		uint32_t old_adv_peer_ack_point;
4899 
4900 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4901 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4902 		/* C3. See if we need to send a Fwd-TSN */
4903 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4904 			/*
4905 			 * ISSUE with ECN, see FWD-TSN processing.
4906 			 */
4907 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4908 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4909 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4910 				    old_adv_peer_ack_point);
4911 			}
4912 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4913 				send_forward_tsn(stcb, asoc);
4914 			} else if (lchk) {
4915 				/* try to FR fwd-tsn's that get lost too */
4916 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4917 					send_forward_tsn(stcb, asoc);
4918 				}
4919 			}
4920 		}
4921 		if (lchk) {
4922 			/* Assure a timer is up */
4923 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4924 			    stcb->sctp_ep, stcb, lchk->whoTo);
4925 		}
4926 	}
4927 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4928 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4929 		    a_rwnd,
4930 		    stcb->asoc.peers_rwnd,
4931 		    stcb->asoc.total_flight,
4932 		    stcb->asoc.total_output_queue_size);
4933 	}
4934 }
4935 
4936 void
4937 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4938 {
4939 	/* Copy cum-ack */
4940 	uint32_t cum_ack, a_rwnd;
4941 
4942 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4943 	/* Arrange so a_rwnd does NOT change */
4944 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4945 
4946 	/* Now call the express sack handling */
4947 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4948 }
4949 
4950 static void
4951 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4952     struct sctp_stream_in *strmin)
4953 {
4954 	struct sctp_queued_to_read *ctl, *nctl;
4955 	struct sctp_association *asoc;
4956 	uint16_t tt;
4957 
4958 	asoc = &stcb->asoc;
4959 	tt = strmin->last_sequence_delivered;
4960 	/*
4961 	 * First deliver anything prior to and including the stream no that
4962 	 * came in
4963 	 */
4964 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4965 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4966 			/* this is deliverable now */
4967 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4968 			/* subtract pending on streams */
4969 			asoc->size_on_all_streams -= ctl->length;
4970 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4971 			/* deliver it to at least the delivery-q */
4972 			if (stcb->sctp_socket) {
4973 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4974 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4975 				    ctl,
4976 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4977 			}
4978 		} else {
4979 			/* no more delivery now. */
4980 			break;
4981 		}
4982 	}
4983 	/*
4984 	 * now we must deliver things in queue the normal way  if any are
4985 	 * now ready.
4986 	 */
4987 	tt = strmin->last_sequence_delivered + 1;
4988 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4989 		if (tt == ctl->sinfo_ssn) {
4990 			/* this is deliverable now */
4991 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4992 			/* subtract pending on streams */
4993 			asoc->size_on_all_streams -= ctl->length;
4994 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4995 			/* deliver it to at least the delivery-q */
4996 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4997 			if (stcb->sctp_socket) {
4998 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4999 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5000 				    ctl,
5001 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5002 
5003 			}
5004 			tt = strmin->last_sequence_delivered + 1;
5005 		} else {
5006 			break;
5007 		}
5008 	}
5009 }
5010 
5011 static void
5012 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5013     struct sctp_association *asoc,
5014     uint16_t stream, uint16_t seq)
5015 {
5016 	struct sctp_tmit_chunk *chk, *nchk;
5017 
5018 	/* For each one on here see if we need to toss it */
5019 	/*
5020 	 * For now large messages held on the reasmqueue that are complete
5021 	 * will be tossed too. We could in theory do more work to spin
5022 	 * through and stop after dumping one msg aka seeing the start of a
5023 	 * new msg at the head, and call the delivery function... to see if
5024 	 * it can be delivered... But for now we just dump everything on the
5025 	 * queue.
5026 	 */
5027 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5028 		/*
5029 		 * Do not toss it if on a different stream or marked for
5030 		 * unordered delivery in which case the stream sequence
5031 		 * number has no meaning.
5032 		 */
5033 		if ((chk->rec.data.stream_number != stream) ||
5034 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5035 			continue;
5036 		}
5037 		if (chk->rec.data.stream_seq == seq) {
5038 			/* It needs to be tossed */
5039 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5040 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5041 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5042 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5043 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5044 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5045 			}
5046 			asoc->size_on_reasm_queue -= chk->send_size;
5047 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5048 
5049 			/* Clear up any stream problem */
5050 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5051 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5052 				/*
5053 				 * We must dump forward this streams
5054 				 * sequence number if the chunk is not
5055 				 * unordered that is being skipped. There is
5056 				 * a chance that if the peer does not
5057 				 * include the last fragment in its FWD-TSN
5058 				 * we WILL have a problem here since you
5059 				 * would have a partial chunk in queue that
5060 				 * may not be deliverable. Also if a Partial
5061 				 * delivery API as started the user may get
5062 				 * a partial chunk. The next read returning
5063 				 * a new chunk... really ugly but I see no
5064 				 * way around it! Maybe a notify??
5065 				 */
5066 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5067 			}
5068 			if (chk->data) {
5069 				sctp_m_freem(chk->data);
5070 				chk->data = NULL;
5071 			}
5072 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5073 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5074 			/*
5075 			 * If the stream_seq is > than the purging one, we
5076 			 * are done
5077 			 */
5078 			break;
5079 		}
5080 	}
5081 }
5082 
5083 
5084 void
5085 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5086     struct sctp_forward_tsn_chunk *fwd,
5087     int *abort_flag, struct mbuf *m, int offset)
5088 {
5089 	/* The pr-sctp fwd tsn */
5090 	/*
5091 	 * here we will perform all the data receiver side steps for
5092 	 * processing FwdTSN, as required in by pr-sctp draft:
5093 	 *
5094 	 * Assume we get FwdTSN(x):
5095 	 *
5096 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5097 	 * others we have 3) examine and update re-ordering queue on
5098 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5099 	 * report where we are.
5100 	 */
5101 	struct sctp_association *asoc;
5102 	uint32_t new_cum_tsn, gap;
5103 	unsigned int i, fwd_sz, m_size;
5104 	uint32_t str_seq;
5105 	struct sctp_stream_in *strm;
5106 	struct sctp_tmit_chunk *chk, *nchk;
5107 	struct sctp_queued_to_read *ctl, *sv;
5108 
5109 	asoc = &stcb->asoc;
5110 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5111 		SCTPDBG(SCTP_DEBUG_INDATA1,
5112 		    "Bad size too small/big fwd-tsn\n");
5113 		return;
5114 	}
5115 	m_size = (stcb->asoc.mapping_array_size << 3);
5116 	/*************************************************************/
5117 	/* 1. Here we update local cumTSN and shift the bitmap array */
5118 	/*************************************************************/
5119 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5120 
5121 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5122 		/* Already got there ... */
5123 		return;
5124 	}
5125 	/*
5126 	 * now we know the new TSN is more advanced, let's find the actual
5127 	 * gap
5128 	 */
5129 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5130 	asoc->cumulative_tsn = new_cum_tsn;
5131 	if (gap >= m_size) {
5132 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5133 			struct mbuf *op_err;
5134 			char msg[SCTP_DIAG_INFO_LEN];
5135 
5136 			/*
5137 			 * out of range (of single byte chunks in the rwnd I
5138 			 * give out). This must be an attacker.
5139 			 */
5140 			*abort_flag = 1;
5141 			snprintf(msg, sizeof(msg),
5142 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5143 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5144 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5145 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5146 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5147 			return;
5148 		}
5149 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5150 
5151 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5152 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5153 		asoc->highest_tsn_inside_map = new_cum_tsn;
5154 
5155 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5156 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5157 
5158 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5159 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5160 		}
5161 	} else {
5162 		SCTP_TCB_LOCK_ASSERT(stcb);
5163 		for (i = 0; i <= gap; i++) {
5164 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5165 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5166 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5167 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5168 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5169 				}
5170 			}
5171 		}
5172 	}
5173 	/*************************************************************/
5174 	/* 2. Clear up re-assembly queue                             */
5175 	/*************************************************************/
5176 	/*
5177 	 * First service it if pd-api is up, just in case we can progress it
5178 	 * forward
5179 	 */
5180 	if (asoc->fragmented_delivery_inprogress) {
5181 		sctp_service_reassembly(stcb, asoc);
5182 	}
5183 	/* For each one on here see if we need to toss it */
5184 	/*
5185 	 * For now large messages held on the reasmqueue that are complete
5186 	 * will be tossed too. We could in theory do more work to spin
5187 	 * through and stop after dumping one msg aka seeing the start of a
5188 	 * new msg at the head, and call the delivery function... to see if
5189 	 * it can be delivered... But for now we just dump everything on the
5190 	 * queue.
5191 	 */
5192 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5193 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5194 			/* It needs to be tossed */
5195 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5196 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5197 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5198 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5199 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5200 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5201 			}
5202 			asoc->size_on_reasm_queue -= chk->send_size;
5203 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5204 
5205 			/* Clear up any stream problem */
5206 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5207 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5208 				/*
5209 				 * We must dump forward this streams
5210 				 * sequence number if the chunk is not
5211 				 * unordered that is being skipped. There is
5212 				 * a chance that if the peer does not
5213 				 * include the last fragment in its FWD-TSN
5214 				 * we WILL have a problem here since you
5215 				 * would have a partial chunk in queue that
5216 				 * may not be deliverable. Also if a Partial
5217 				 * delivery API as started the user may get
5218 				 * a partial chunk. The next read returning
5219 				 * a new chunk... really ugly but I see no
5220 				 * way around it! Maybe a notify??
5221 				 */
5222 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5223 			}
5224 			if (chk->data) {
5225 				sctp_m_freem(chk->data);
5226 				chk->data = NULL;
5227 			}
5228 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5229 		} else {
5230 			/*
5231 			 * Ok we have gone beyond the end of the fwd-tsn's
5232 			 * mark.
5233 			 */
5234 			break;
5235 		}
5236 	}
5237 	/*******************************************************/
5238 	/* 3. Update the PR-stream re-ordering queues and fix  */
5239 	/* delivery issues as needed.                       */
5240 	/*******************************************************/
5241 	fwd_sz -= sizeof(*fwd);
5242 	if (m && fwd_sz) {
5243 		/* New method. */
5244 		unsigned int num_str;
5245 		struct sctp_strseq *stseq, strseqbuf;
5246 
5247 		offset += sizeof(*fwd);
5248 
5249 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5250 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5251 		for (i = 0; i < num_str; i++) {
5252 			uint16_t st;
5253 
5254 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5255 			    sizeof(struct sctp_strseq),
5256 			    (uint8_t *) & strseqbuf);
5257 			offset += sizeof(struct sctp_strseq);
5258 			if (stseq == NULL) {
5259 				break;
5260 			}
5261 			/* Convert */
5262 			st = ntohs(stseq->stream);
5263 			stseq->stream = st;
5264 			st = ntohs(stseq->sequence);
5265 			stseq->sequence = st;
5266 
5267 			/* now process */
5268 
5269 			/*
5270 			 * Ok we now look for the stream/seq on the read
5271 			 * queue where its not all delivered. If we find it
5272 			 * we transmute the read entry into a PDI_ABORTED.
5273 			 */
5274 			if (stseq->stream >= asoc->streamincnt) {
5275 				/* screwed up streams, stop!  */
5276 				break;
5277 			}
5278 			if ((asoc->str_of_pdapi == stseq->stream) &&
5279 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5280 				/*
5281 				 * If this is the one we were partially
5282 				 * delivering now then we no longer are.
5283 				 * Note this will change with the reassembly
5284 				 * re-write.
5285 				 */
5286 				asoc->fragmented_delivery_inprogress = 0;
5287 			}
5288 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5289 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5290 				if ((ctl->sinfo_stream == stseq->stream) &&
5291 				    (ctl->sinfo_ssn == stseq->sequence)) {
5292 					str_seq = (stseq->stream << 16) | stseq->sequence;
5293 					ctl->end_added = 1;
5294 					ctl->pdapi_aborted = 1;
5295 					sv = stcb->asoc.control_pdapi;
5296 					stcb->asoc.control_pdapi = ctl;
5297 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5298 					    stcb,
5299 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5300 					    (void *)&str_seq,
5301 					    SCTP_SO_NOT_LOCKED);
5302 					stcb->asoc.control_pdapi = sv;
5303 					break;
5304 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5305 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5306 					/* We are past our victim SSN */
5307 					break;
5308 				}
5309 			}
5310 			strm = &asoc->strmin[stseq->stream];
5311 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5312 				/* Update the sequence number */
5313 				strm->last_sequence_delivered = stseq->sequence;
5314 			}
5315 			/* now kick the stream the new way */
5316 			/* sa_ignore NO_NULL_CHK */
5317 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5318 		}
5319 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5320 	}
5321 	/*
5322 	 * Now slide thing forward.
5323 	 */
5324 	sctp_slide_mapping_arrays(stcb);
5325 
5326 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5327 		/* now lets kick out and check for more fragmented delivery */
5328 		/* sa_ignore NO_NULL_CHK */
5329 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5330 	}
5331 }
5332