xref: /freebsd/sys/netinet/sctp_indata.c (revision b2d48be1bc7df45ddd13b143a160d0acb5a383c5)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	/*
254 	 * Make sure that there is no un-initialized padding between the
255 	 * cmsg header and cmsg data and after the cmsg data.
256 	 */
257 	memset(cmh, 0, len);
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 		cmh->cmsg_type = SCTP_RCVINFO;
262 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 		rcvinfo->rcv_context = sinfo->sinfo_context;
270 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 	}
274 	if (provide_nxt) {
275 		cmh->cmsg_level = IPPROTO_SCTP;
276 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 		cmh->cmsg_type = SCTP_NXTINFO;
278 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 		nxtinfo->nxt_flags = 0;
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 		}
287 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 		}
290 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 	}
296 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 		cmh->cmsg_level = IPPROTO_SCTP;
298 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 		if (use_extended) {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 			cmh->cmsg_type = SCTP_EXTRCV;
302 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 		} else {
305 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 			cmh->cmsg_type = SCTP_SNDRCV;
307 			*outinfo = *sinfo;
308 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 		}
310 	}
311 	return (ret);
312 }
313 
314 
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 	uint32_t gap, i, cumackp1;
319 	int fnd = 0;
320 
321 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 		return;
323 	}
324 	cumackp1 = asoc->cumulative_tsn + 1;
325 	if (SCTP_TSN_GT(cumackp1, tsn)) {
326 		/*
327 		 * this tsn is behind the cum ack and thus we don't need to
328 		 * worry about it being moved from one to the other.
329 		 */
330 		return;
331 	}
332 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 		sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 		panic("Things are really messed up now!!");
338 #endif
339 	}
340 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 		asoc->highest_tsn_inside_nr_map = tsn;
344 	}
345 	if (tsn == asoc->highest_tsn_inside_map) {
346 		/* We must back down to see what the new highest is */
347 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 				asoc->highest_tsn_inside_map = i;
351 				fnd = 1;
352 				break;
353 			}
354 		}
355 		if (!fnd) {
356 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 		}
358 	}
359 }
360 
361 
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 	struct sctp_tmit_chunk *chk, *nchk;
371 	uint16_t nxt_todel;
372 	uint16_t stream_no;
373 	int end = 0;
374 	int cntDel;
375 	struct sctp_queued_to_read *control, *ctl, *nctl;
376 
377 	if (stcb == NULL)
378 		return;
379 
380 	cntDel = stream_no = 0;
381 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 		/* socket above is long gone or going.. */
385 abandon:
386 		asoc->fragmented_delivery_inprogress = 0;
387 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 			asoc->size_on_reasm_queue -= chk->send_size;
390 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 			/*
392 			 * Lose the data pointer, since its in the socket
393 			 * buffer
394 			 */
395 			if (chk->data) {
396 				sctp_m_freem(chk->data);
397 				chk->data = NULL;
398 			}
399 			/* Now free the address and data */
400 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 			/* sa_ignore FREED_MEMORY */
402 		}
403 		return;
404 	}
405 	SCTP_TCB_LOCK_ASSERT(stcb);
406 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 			/* Can't deliver more :< */
409 			return;
410 		}
411 		stream_no = chk->rec.data.stream_number;
412 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 		if (nxt_todel != chk->rec.data.stream_seq &&
414 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 			/*
416 			 * Not the next sequence to deliver in its stream OR
417 			 * unordered
418 			 */
419 			return;
420 		}
421 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422 
423 			control = sctp_build_readq_entry_chk(stcb, chk);
424 			if (control == NULL) {
425 				/* out of memory? */
426 				return;
427 			}
428 			/* save it off for our future deliveries */
429 			stcb->asoc.control_pdapi = control;
430 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 				end = 1;
432 			else
433 				end = 0;
434 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 			sctp_add_to_readq(stcb->sctp_ep,
436 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 			cntDel++;
439 		} else {
440 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 				end = 1;
442 			else
443 				end = 0;
444 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 			    stcb->asoc.control_pdapi,
447 			    chk->data, end, chk->rec.data.TSN_seq,
448 			    &stcb->sctp_socket->so_rcv)) {
449 				/*
450 				 * something is very wrong, either
451 				 * control_pdapi is NULL, or the tail_mbuf
452 				 * is corrupt, or there is a EOM already on
453 				 * the mbuf chain.
454 				 */
455 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 					goto abandon;
457 				} else {
458 #ifdef INVARIANTS
459 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 						panic("This should not happen control_pdapi NULL?");
461 					}
462 					/* if we did not panic, it was a EOM */
463 					panic("Bad chunking ??");
464 #else
465 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 					}
468 					SCTP_PRINTF("Bad chunking ??\n");
469 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470 
471 #endif
472 					goto abandon;
473 				}
474 			}
475 			cntDel++;
476 		}
477 		/* pull it we did it */
478 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 			asoc->fragmented_delivery_inprogress = 0;
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 				asoc->strmin[stream_no].last_sequence_delivered++;
483 			}
484 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 			}
487 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 			/*
489 			 * turn the flag back on since we just  delivered
490 			 * yet another one.
491 			 */
492 			asoc->fragmented_delivery_inprogress = 1;
493 		}
494 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498 
499 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 		asoc->size_on_reasm_queue -= chk->send_size;
501 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 		/* free up the chk */
503 		chk->data = NULL;
504 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505 
506 		if (asoc->fragmented_delivery_inprogress == 0) {
507 			/*
508 			 * Now lets see if we can deliver the next one on
509 			 * the stream
510 			 */
511 			struct sctp_stream_in *strm;
512 
513 			strm = &asoc->strmin[stream_no];
514 			nxt_todel = strm->last_sequence_delivered + 1;
515 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 				/* Deliver more if we can. */
517 				if (nxt_todel == ctl->sinfo_ssn) {
518 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 					asoc->size_on_all_streams -= ctl->length;
520 					sctp_ucount_decr(asoc->cnt_on_all_streams);
521 					strm->last_sequence_delivered++;
522 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 					sctp_add_to_readq(stcb->sctp_ep, stcb,
524 					    ctl,
525 					    &stcb->sctp_socket->so_rcv, 1,
526 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 				} else {
528 					break;
529 				}
530 				nxt_todel = strm->last_sequence_delivered + 1;
531 			}
532 			break;
533 		}
534 	}
535 }
536 
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 	/*
548 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 	 * all the data in one stream this could happen quite rapidly. One
550 	 * could use the TSN to keep track of things, but this scheme breaks
551 	 * down in the other type of stream useage that could occur. Send a
552 	 * single msg to stream 0, send 4Billion messages to stream 1, now
553 	 * send a message to stream 0. You have a situation where the TSN
554 	 * has wrapped but not in the stream. Is this worth worrying about
555 	 * or should we just change our queue sort at the bottom to be by
556 	 * TSN.
557 	 *
558 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 	 * assignment this could happen... and I don't see how this would be
561 	 * a violation. So for now I am undecided an will leave the sort by
562 	 * SSN alone. Maybe a hybred approach is the answer
563 	 *
564 	 */
565 	struct sctp_stream_in *strm;
566 	struct sctp_queued_to_read *at;
567 	int queue_needed;
568 	uint16_t nxt_todel;
569 	struct mbuf *op_err;
570 	char msg[SCTP_DIAG_INFO_LEN];
571 
572 	queue_needed = 1;
573 	asoc->size_on_all_streams += control->length;
574 	sctp_ucount_incr(asoc->cnt_on_all_streams);
575 	strm = &asoc->strmin[control->sinfo_stream];
576 	nxt_todel = strm->last_sequence_delivered + 1;
577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 	}
580 	SCTPDBG(SCTP_DEBUG_INDATA1,
581 	    "queue to stream called for sid:%u ssn:%u tsn:%u lastdel:%u nxt:%u\n",
582 	    (uint32_t) control->sinfo_stream, (uint32_t) control->sinfo_ssn,
583 	    (uint32_t) control->sinfo_tsn,
584 	    (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
585 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 		/* The incoming sseq is behind where we last delivered? */
587 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 		    control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590 		/*
591 		 * throw it in the stream so it gets cleaned up in
592 		 * association destruction
593 		 */
594 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 		    strm->last_sequence_delivered, control->sinfo_tsn,
597 		    control->sinfo_stream, control->sinfo_ssn);
598 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601 		*abort_flag = 1;
602 		return;
603 
604 	}
605 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
606 	struct socket *so;
607 
608 	so = SCTP_INP_SO(stcb->sctp_ep);
609 	atomic_add_int(&stcb->asoc.refcnt, 1);
610 	SCTP_TCB_UNLOCK(stcb);
611 	SCTP_SOCKET_LOCK(so, 1);
612 	SCTP_TCB_LOCK(stcb);
613 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
614 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
615 		SCTP_SOCKET_UNLOCK(so, 1);
616 		return;
617 	}
618 #endif
619 	if (nxt_todel == control->sinfo_ssn) {
620 		/* can be delivered right away? */
621 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
622 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
623 		}
624 		/* EY it wont be queued if it could be delivered directly */
625 		queue_needed = 0;
626 		asoc->size_on_all_streams -= control->length;
627 		sctp_ucount_decr(asoc->cnt_on_all_streams);
628 		strm->last_sequence_delivered++;
629 
630 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631 		sctp_add_to_readq(stcb->sctp_ep, stcb,
632 		    control,
633 		    &stcb->sctp_socket->so_rcv, 1,
634 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
635 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
636 			/* all delivered */
637 			nxt_todel = strm->last_sequence_delivered + 1;
638 			if (nxt_todel == control->sinfo_ssn) {
639 				TAILQ_REMOVE(&strm->inqueue, control, next);
640 				asoc->size_on_all_streams -= control->length;
641 				sctp_ucount_decr(asoc->cnt_on_all_streams);
642 				strm->last_sequence_delivered++;
643 				/*
644 				 * We ignore the return of deliver_data here
645 				 * since we always can hold the chunk on the
646 				 * d-queue. And we have a finite number that
647 				 * can be delivered from the strq.
648 				 */
649 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
650 					sctp_log_strm_del(control, NULL,
651 					    SCTP_STR_LOG_FROM_IMMED_DEL);
652 				}
653 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
654 				sctp_add_to_readq(stcb->sctp_ep, stcb,
655 				    control,
656 				    &stcb->sctp_socket->so_rcv, 1,
657 				    SCTP_READ_LOCK_NOT_HELD,
658 				    SCTP_SO_LOCKED);
659 				continue;
660 			}
661 			break;
662 		}
663 	}
664 	if (queue_needed) {
665 		/*
666 		 * Ok, we did not deliver this guy, find the correct place
667 		 * to put it on the queue.
668 		 */
669 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
670 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
671 			SCTP_SOCKET_UNLOCK(so, 1);
672 #endif
673 			goto protocol_error;
674 		}
675 		if (TAILQ_EMPTY(&strm->inqueue)) {
676 			/* Empty queue */
677 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
678 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
679 			}
680 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
681 		} else {
682 			TAILQ_FOREACH(at, &strm->inqueue, next) {
683 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
684 					/*
685 					 * one in queue is bigger than the
686 					 * new one, insert before this one
687 					 */
688 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
689 						sctp_log_strm_del(control, at,
690 						    SCTP_STR_LOG_FROM_INSERT_MD);
691 					}
692 					TAILQ_INSERT_BEFORE(at, control, next);
693 					break;
694 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
695 					/*
696 					 * Gak, He sent me a duplicate str
697 					 * seq number
698 					 */
699 					/*
700 					 * foo bar, I guess I will just free
701 					 * this new guy, should we abort
702 					 * too? FIX ME MAYBE? Or it COULD be
703 					 * that the SSN's have wrapped.
704 					 * Maybe I should compare to TSN
705 					 * somehow... sigh for now just blow
706 					 * away the chunk!
707 					 */
708 
709 					if (control->data)
710 						sctp_m_freem(control->data);
711 					control->data = NULL;
712 					asoc->size_on_all_streams -= control->length;
713 					sctp_ucount_decr(asoc->cnt_on_all_streams);
714 					if (control->whoFrom) {
715 						sctp_free_remote_addr(control->whoFrom);
716 						control->whoFrom = NULL;
717 					}
718 					sctp_free_a_readq(stcb, control);
719 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
720 					SCTP_SOCKET_UNLOCK(so, 1);
721 #endif
722 					return;
723 				} else {
724 					if (TAILQ_NEXT(at, next) == NULL) {
725 						/*
726 						 * We are at the end, insert
727 						 * it after this one
728 						 */
729 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
730 							sctp_log_strm_del(control, at,
731 							    SCTP_STR_LOG_FROM_INSERT_TL);
732 						}
733 						TAILQ_INSERT_AFTER(&strm->inqueue,
734 						    at, control, next);
735 						break;
736 					}
737 				}
738 			}
739 		}
740 	}
741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
742 	SCTP_SOCKET_UNLOCK(so, 1);
743 #endif
744 }
745 
746 /*
747  * Returns two things: You get the total size of the deliverable parts of the
748  * first fragmented message on the reassembly queue. And you get a 1 back if
749  * all of the message is ready or a 0 back if the message is still incomplete
750  */
751 static int
752 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
753 {
754 	struct sctp_tmit_chunk *chk;
755 	uint32_t tsn;
756 
757 	*t_size = 0;
758 	chk = TAILQ_FIRST(&asoc->reasmqueue);
759 	if (chk == NULL) {
760 		/* nothing on the queue */
761 		return (0);
762 	}
763 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
764 		/* Not a first on the queue */
765 		return (0);
766 	}
767 	tsn = chk->rec.data.TSN_seq;
768 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
769 		if (tsn != chk->rec.data.TSN_seq) {
770 			return (0);
771 		}
772 		*t_size += chk->send_size;
773 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
774 			return (1);
775 		}
776 		tsn++;
777 	}
778 	return (0);
779 }
780 
781 static void
782 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
783 {
784 	struct sctp_tmit_chunk *chk;
785 	uint16_t nxt_todel;
786 	uint32_t tsize, pd_point;
787 
788 doit_again:
789 	chk = TAILQ_FIRST(&asoc->reasmqueue);
790 	if (chk == NULL) {
791 		/* Huh? */
792 		asoc->size_on_reasm_queue = 0;
793 		asoc->cnt_on_reasm_queue = 0;
794 		return;
795 	}
796 	if (asoc->fragmented_delivery_inprogress == 0) {
797 		nxt_todel =
798 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
799 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
800 		    (nxt_todel == chk->rec.data.stream_seq ||
801 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
802 			/*
803 			 * Yep the first one is here and its ok to deliver
804 			 * but should we?
805 			 */
806 			if (stcb->sctp_socket) {
807 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
808 				    stcb->sctp_ep->partial_delivery_point);
809 			} else {
810 				pd_point = stcb->sctp_ep->partial_delivery_point;
811 			}
812 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
813 				/*
814 				 * Yes, we setup to start reception, by
815 				 * backing down the TSN just in case we
816 				 * can't deliver. If we
817 				 */
818 				asoc->fragmented_delivery_inprogress = 1;
819 				asoc->tsn_last_delivered =
820 				    chk->rec.data.TSN_seq - 1;
821 				asoc->str_of_pdapi =
822 				    chk->rec.data.stream_number;
823 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
824 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
825 				asoc->fragment_flags = chk->rec.data.rcv_flags;
826 				sctp_service_reassembly(stcb, asoc);
827 			}
828 		}
829 	} else {
830 		/*
831 		 * Service re-assembly will deliver stream data queued at
832 		 * the end of fragmented delivery.. but it wont know to go
833 		 * back and call itself again... we do that here with the
834 		 * got doit_again
835 		 */
836 		sctp_service_reassembly(stcb, asoc);
837 		if (asoc->fragmented_delivery_inprogress == 0) {
838 			/*
839 			 * finished our Fragmented delivery, could be more
840 			 * waiting?
841 			 */
842 			goto doit_again;
843 		}
844 	}
845 }
846 
847 /*
848  * Dump onto the re-assembly queue, in its proper place. After dumping on the
849  * queue, see if anthing can be delivered. If so pull it off (or as much as
850  * we can. If we run out of space then we must dump what we can and set the
851  * appropriate flag to say we queued what we could.
852  */
853 static void
854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
855     struct sctp_tmit_chunk *chk, int *abort_flag)
856 {
857 	struct mbuf *op_err;
858 	char msg[SCTP_DIAG_INFO_LEN];
859 	uint32_t cum_ackp1, prev_tsn, post_tsn;
860 	struct sctp_tmit_chunk *at, *prev, *next;
861 
862 	prev = next = NULL;
863 	cum_ackp1 = asoc->tsn_last_delivered + 1;
864 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
865 		/* This is the first one on the queue */
866 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
867 		/*
868 		 * we do not check for delivery of anything when only one
869 		 * fragment is here
870 		 */
871 		asoc->size_on_reasm_queue = chk->send_size;
872 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
873 		if (chk->rec.data.TSN_seq == cum_ackp1) {
874 			if (asoc->fragmented_delivery_inprogress == 0 &&
875 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
876 			    SCTP_DATA_FIRST_FRAG) {
877 				/*
878 				 * An empty queue, no delivery inprogress,
879 				 * we hit the next one and it does NOT have
880 				 * a FIRST fragment mark.
881 				 */
882 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
883 				snprintf(msg, sizeof(msg),
884 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
885 				    chk->rec.data.TSN_seq,
886 				    chk->rec.data.stream_number,
887 				    chk->rec.data.stream_seq);
888 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
889 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
891 				*abort_flag = 1;
892 			} else if (asoc->fragmented_delivery_inprogress &&
893 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
894 				/*
895 				 * We are doing a partial delivery and the
896 				 * NEXT chunk MUST be either the LAST or
897 				 * MIDDLE fragment NOT a FIRST
898 				 */
899 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 				snprintf(msg, sizeof(msg),
901 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
902 				    chk->rec.data.TSN_seq,
903 				    chk->rec.data.stream_number,
904 				    chk->rec.data.stream_seq);
905 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
906 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
908 				*abort_flag = 1;
909 			} else if (asoc->fragmented_delivery_inprogress) {
910 				/*
911 				 * Here we are ok with a MIDDLE or LAST
912 				 * piece
913 				 */
914 				if (chk->rec.data.stream_number !=
915 				    asoc->str_of_pdapi) {
916 					/* Got to be the right STR No */
917 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
918 					    chk->rec.data.stream_number,
919 					    asoc->str_of_pdapi);
920 					snprintf(msg, sizeof(msg),
921 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
922 					    asoc->str_of_pdapi,
923 					    chk->rec.data.TSN_seq,
924 					    chk->rec.data.stream_number,
925 					    chk->rec.data.stream_seq);
926 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
927 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
928 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
929 					*abort_flag = 1;
930 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
931 					    SCTP_DATA_UNORDERED &&
932 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
933 					/* Got to be the right STR Seq */
934 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
935 					    chk->rec.data.stream_seq,
936 					    asoc->ssn_of_pdapi);
937 					snprintf(msg, sizeof(msg),
938 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
939 					    asoc->ssn_of_pdapi,
940 					    chk->rec.data.TSN_seq,
941 					    chk->rec.data.stream_number,
942 					    chk->rec.data.stream_seq);
943 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
944 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
945 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
946 					*abort_flag = 1;
947 				}
948 			}
949 		}
950 		return;
951 	}
952 	/* Find its place */
953 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
954 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
955 			/*
956 			 * one in queue is bigger than the new one, insert
957 			 * before this one
958 			 */
959 			/* A check */
960 			asoc->size_on_reasm_queue += chk->send_size;
961 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962 			next = at;
963 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
964 			break;
965 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
966 			/* Gak, He sent me a duplicate str seq number */
967 			/*
968 			 * foo bar, I guess I will just free this new guy,
969 			 * should we abort too? FIX ME MAYBE? Or it COULD be
970 			 * that the SSN's have wrapped. Maybe I should
971 			 * compare to TSN somehow... sigh for now just blow
972 			 * away the chunk!
973 			 */
974 			if (chk->data) {
975 				sctp_m_freem(chk->data);
976 				chk->data = NULL;
977 			}
978 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
979 			return;
980 		} else {
981 			prev = at;
982 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
983 				/*
984 				 * We are at the end, insert it after this
985 				 * one
986 				 */
987 				/* check it first */
988 				asoc->size_on_reasm_queue += chk->send_size;
989 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
990 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
991 				break;
992 			}
993 		}
994 	}
995 	/* Now the audits */
996 	if (prev) {
997 		prev_tsn = chk->rec.data.TSN_seq - 1;
998 		if (prev_tsn == prev->rec.data.TSN_seq) {
999 			/*
1000 			 * Ok the one I am dropping onto the end is the
1001 			 * NEXT. A bit of valdiation here.
1002 			 */
1003 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1004 			    SCTP_DATA_FIRST_FRAG ||
1005 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1006 			    SCTP_DATA_MIDDLE_FRAG) {
1007 				/*
1008 				 * Insert chk MUST be a MIDDLE or LAST
1009 				 * fragment
1010 				 */
1011 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1012 				    SCTP_DATA_FIRST_FRAG) {
1013 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1014 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1015 					snprintf(msg, sizeof(msg),
1016 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1017 					    chk->rec.data.TSN_seq,
1018 					    chk->rec.data.stream_number,
1019 					    chk->rec.data.stream_seq);
1020 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1021 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1022 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1023 					*abort_flag = 1;
1024 					return;
1025 				}
1026 				if (chk->rec.data.stream_number !=
1027 				    prev->rec.data.stream_number) {
1028 					/*
1029 					 * Huh, need the correct STR here,
1030 					 * they must be the same.
1031 					 */
1032 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1033 					    chk->rec.data.stream_number,
1034 					    prev->rec.data.stream_number);
1035 					snprintf(msg, sizeof(msg),
1036 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1037 					    prev->rec.data.stream_number,
1038 					    chk->rec.data.TSN_seq,
1039 					    chk->rec.data.stream_number,
1040 					    chk->rec.data.stream_seq);
1041 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1042 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1043 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1044 					*abort_flag = 1;
1045 					return;
1046 				}
1047 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1048 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1049 					/*
1050 					 * Huh, need the same ordering here,
1051 					 * they must be the same.
1052 					 */
1053 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1054 					snprintf(msg, sizeof(msg),
1055 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1056 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1057 					    chk->rec.data.TSN_seq,
1058 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1059 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1060 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1061 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1062 					*abort_flag = 1;
1063 					return;
1064 				}
1065 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1066 				    chk->rec.data.stream_seq !=
1067 				    prev->rec.data.stream_seq) {
1068 					/*
1069 					 * Huh, need the correct STR here,
1070 					 * they must be the same.
1071 					 */
1072 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1073 					    chk->rec.data.stream_seq,
1074 					    prev->rec.data.stream_seq);
1075 					snprintf(msg, sizeof(msg),
1076 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077 					    prev->rec.data.stream_seq,
1078 					    chk->rec.data.TSN_seq,
1079 					    chk->rec.data.stream_number,
1080 					    chk->rec.data.stream_seq);
1081 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1082 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1083 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1084 					*abort_flag = 1;
1085 					return;
1086 				}
1087 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1088 			    SCTP_DATA_LAST_FRAG) {
1089 				/* Insert chk MUST be a FIRST */
1090 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1091 				    SCTP_DATA_FIRST_FRAG) {
1092 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1093 					snprintf(msg, sizeof(msg),
1094 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1095 					    chk->rec.data.TSN_seq,
1096 					    chk->rec.data.stream_number,
1097 					    chk->rec.data.stream_seq);
1098 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1099 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1100 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1101 					*abort_flag = 1;
1102 					return;
1103 				}
1104 			}
1105 		}
1106 	}
1107 	if (next) {
1108 		post_tsn = chk->rec.data.TSN_seq + 1;
1109 		if (post_tsn == next->rec.data.TSN_seq) {
1110 			/*
1111 			 * Ok the one I am inserting ahead of is my NEXT
1112 			 * one. A bit of valdiation here.
1113 			 */
1114 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1115 				/* Insert chk MUST be a last fragment */
1116 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1117 				    != SCTP_DATA_LAST_FRAG) {
1118 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1119 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1120 					snprintf(msg, sizeof(msg),
1121 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 					    chk->rec.data.TSN_seq,
1123 					    chk->rec.data.stream_number,
1124 					    chk->rec.data.stream_seq);
1125 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128 					*abort_flag = 1;
1129 					return;
1130 				}
1131 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1132 				    SCTP_DATA_MIDDLE_FRAG ||
1133 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1134 			    SCTP_DATA_LAST_FRAG) {
1135 				/*
1136 				 * Insert chk CAN be MIDDLE or FIRST NOT
1137 				 * LAST
1138 				 */
1139 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1140 				    SCTP_DATA_LAST_FRAG) {
1141 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1142 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1143 					snprintf(msg, sizeof(msg),
1144 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1145 					    chk->rec.data.TSN_seq,
1146 					    chk->rec.data.stream_number,
1147 					    chk->rec.data.stream_seq);
1148 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1149 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1150 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1151 					*abort_flag = 1;
1152 					return;
1153 				}
1154 				if (chk->rec.data.stream_number !=
1155 				    next->rec.data.stream_number) {
1156 					/*
1157 					 * Huh, need the correct STR here,
1158 					 * they must be the same.
1159 					 */
1160 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1161 					    chk->rec.data.stream_number,
1162 					    next->rec.data.stream_number);
1163 					snprintf(msg, sizeof(msg),
1164 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1165 					    next->rec.data.stream_number,
1166 					    chk->rec.data.TSN_seq,
1167 					    chk->rec.data.stream_number,
1168 					    chk->rec.data.stream_seq);
1169 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1170 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1171 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1172 					*abort_flag = 1;
1173 					return;
1174 				}
1175 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1176 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1177 					/*
1178 					 * Huh, need the same ordering here,
1179 					 * they must be the same.
1180 					 */
1181 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1182 					snprintf(msg, sizeof(msg),
1183 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1184 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1185 					    chk->rec.data.TSN_seq,
1186 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1187 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1188 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1189 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1190 					*abort_flag = 1;
1191 					return;
1192 				}
1193 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1194 				    chk->rec.data.stream_seq !=
1195 				    next->rec.data.stream_seq) {
1196 					/*
1197 					 * Huh, need the correct STR here,
1198 					 * they must be the same.
1199 					 */
1200 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1201 					    chk->rec.data.stream_seq,
1202 					    next->rec.data.stream_seq);
1203 					snprintf(msg, sizeof(msg),
1204 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1205 					    next->rec.data.stream_seq,
1206 					    chk->rec.data.TSN_seq,
1207 					    chk->rec.data.stream_number,
1208 					    chk->rec.data.stream_seq);
1209 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1210 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1211 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1212 					*abort_flag = 1;
1213 					return;
1214 				}
1215 			}
1216 		}
1217 	}
1218 	/* Do we need to do some delivery? check */
1219 	sctp_deliver_reasm_check(stcb, asoc);
1220 }
1221 
1222 /*
1223  * This is an unfortunate routine. It checks to make sure a evil guy is not
1224  * stuffing us full of bad packet fragments. A broken peer could also do this
1225  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1226  * :< more cycles.
1227  */
1228 static int
1229 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1230     uint32_t TSN_seq)
1231 {
1232 	struct sctp_tmit_chunk *at;
1233 	uint32_t tsn_est;
1234 
1235 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1236 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1237 			/* is it one bigger? */
1238 			tsn_est = at->rec.data.TSN_seq + 1;
1239 			if (tsn_est == TSN_seq) {
1240 				/* yep. It better be a last then */
1241 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1242 				    SCTP_DATA_LAST_FRAG) {
1243 					/*
1244 					 * Ok this guy belongs next to a guy
1245 					 * that is NOT last, it should be a
1246 					 * middle/last, not a complete
1247 					 * chunk.
1248 					 */
1249 					return (1);
1250 				} else {
1251 					/*
1252 					 * This guy is ok since its a LAST
1253 					 * and the new chunk is a fully
1254 					 * self- contained one.
1255 					 */
1256 					return (0);
1257 				}
1258 			}
1259 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1260 			/* Software error since I have a dup? */
1261 			return (1);
1262 		} else {
1263 			/*
1264 			 * Ok, 'at' is larger than new chunk but does it
1265 			 * need to be right before it.
1266 			 */
1267 			tsn_est = TSN_seq + 1;
1268 			if (tsn_est == at->rec.data.TSN_seq) {
1269 				/* Yep, It better be a first */
1270 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1271 				    SCTP_DATA_FIRST_FRAG) {
1272 					return (1);
1273 				} else {
1274 					return (0);
1275 				}
1276 			}
1277 		}
1278 	}
1279 	return (0);
1280 }
1281 
1282 static int
1283 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1284     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1285     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1286     int *break_flag, int last_chunk)
1287 {
1288 	/* Process a data chunk */
1289 	/* struct sctp_tmit_chunk *chk; */
1290 	struct sctp_tmit_chunk *chk;
1291 	uint32_t tsn, gap;
1292 	struct mbuf *dmbuf;
1293 	int the_len;
1294 	int need_reasm_check = 0;
1295 	uint16_t strmno, strmseq;
1296 	struct mbuf *op_err;
1297 	char msg[SCTP_DIAG_INFO_LEN];
1298 	struct sctp_queued_to_read *control;
1299 	int ordered;
1300 	uint32_t protocol_id;
1301 	uint8_t chunk_flags;
1302 	struct sctp_stream_reset_list *liste;
1303 
1304 	chk = NULL;
1305 	tsn = ntohl(ch->dp.tsn);
1306 	chunk_flags = ch->ch.chunk_flags;
1307 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1308 		asoc->send_sack = 1;
1309 	}
1310 	protocol_id = ch->dp.protocol_id;
1311 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1312 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1313 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1314 	}
1315 	if (stcb == NULL) {
1316 		return (0);
1317 	}
1318 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1319 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1320 		/* It is a duplicate */
1321 		SCTP_STAT_INCR(sctps_recvdupdata);
1322 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323 			/* Record a dup for the next outbound sack */
1324 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1325 			asoc->numduptsns++;
1326 		}
1327 		asoc->send_sack = 1;
1328 		return (0);
1329 	}
1330 	/* Calculate the number of TSN's between the base and this TSN */
1331 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1332 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1333 		/* Can't hold the bit in the mapping at max array, toss it */
1334 		return (0);
1335 	}
1336 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1337 		SCTP_TCB_LOCK_ASSERT(stcb);
1338 		if (sctp_expand_mapping_array(asoc, gap)) {
1339 			/* Can't expand, drop it */
1340 			return (0);
1341 		}
1342 	}
1343 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1344 		*high_tsn = tsn;
1345 	}
1346 	/* See if we have received this one already */
1347 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1348 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1349 		SCTP_STAT_INCR(sctps_recvdupdata);
1350 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1351 			/* Record a dup for the next outbound sack */
1352 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1353 			asoc->numduptsns++;
1354 		}
1355 		asoc->send_sack = 1;
1356 		return (0);
1357 	}
1358 	/*
1359 	 * Check to see about the GONE flag, duplicates would cause a sack
1360 	 * to be sent up above
1361 	 */
1362 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1363 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1364 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1365 		/*
1366 		 * wait a minute, this guy is gone, there is no longer a
1367 		 * receiver. Send peer an ABORT!
1368 		 */
1369 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1370 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1371 		*abort_flag = 1;
1372 		return (0);
1373 	}
1374 	/*
1375 	 * Now before going further we see if there is room. If NOT then we
1376 	 * MAY let one through only IF this TSN is the one we are waiting
1377 	 * for on a partial delivery API.
1378 	 */
1379 
1380 	/* now do the tests */
1381 	if (((asoc->cnt_on_all_streams +
1382 	    asoc->cnt_on_reasm_queue +
1383 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1384 	    (((int)asoc->my_rwnd) <= 0)) {
1385 		/*
1386 		 * When we have NO room in the rwnd we check to make sure
1387 		 * the reader is doing its job...
1388 		 */
1389 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1390 			/* some to read, wake-up */
1391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1392 			struct socket *so;
1393 
1394 			so = SCTP_INP_SO(stcb->sctp_ep);
1395 			atomic_add_int(&stcb->asoc.refcnt, 1);
1396 			SCTP_TCB_UNLOCK(stcb);
1397 			SCTP_SOCKET_LOCK(so, 1);
1398 			SCTP_TCB_LOCK(stcb);
1399 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1400 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1401 				/* assoc was freed while we were unlocked */
1402 				SCTP_SOCKET_UNLOCK(so, 1);
1403 				return (0);
1404 			}
1405 #endif
1406 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1407 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1408 			SCTP_SOCKET_UNLOCK(so, 1);
1409 #endif
1410 		}
1411 		/* now is it in the mapping array of what we have accepted? */
1412 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1413 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1414 			/* Nope not in the valid range dump it */
1415 			sctp_set_rwnd(stcb, asoc);
1416 			if ((asoc->cnt_on_all_streams +
1417 			    asoc->cnt_on_reasm_queue +
1418 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1419 				SCTP_STAT_INCR(sctps_datadropchklmt);
1420 			} else {
1421 				SCTP_STAT_INCR(sctps_datadroprwnd);
1422 			}
1423 			*break_flag = 1;
1424 			return (0);
1425 		}
1426 	}
1427 	strmno = ntohs(ch->dp.stream_id);
1428 	if (strmno >= asoc->streamincnt) {
1429 		struct sctp_error_invalid_stream *cause;
1430 
1431 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1432 		    0, M_NOWAIT, 1, MT_DATA);
1433 		if (op_err != NULL) {
1434 			/* add some space up front so prepend will work well */
1435 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1436 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1437 			/*
1438 			 * Error causes are just param's and this one has
1439 			 * two back to back phdr, one with the error type
1440 			 * and size, the other with the streamid and a rsvd
1441 			 */
1442 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1443 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1444 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1445 			cause->stream_id = ch->dp.stream_id;
1446 			cause->reserved = htons(0);
1447 			sctp_queue_op_err(stcb, op_err);
1448 		}
1449 		SCTP_STAT_INCR(sctps_badsid);
1450 		SCTP_TCB_LOCK_ASSERT(stcb);
1451 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1452 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1453 			asoc->highest_tsn_inside_nr_map = tsn;
1454 		}
1455 		if (tsn == (asoc->cumulative_tsn + 1)) {
1456 			/* Update cum-ack */
1457 			asoc->cumulative_tsn = tsn;
1458 		}
1459 		return (0);
1460 	}
1461 	/*
1462 	 * Before we continue lets validate that we are not being fooled by
1463 	 * an evil attacker. We can only have 4k chunks based on our TSN
1464 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1465 	 * way our stream sequence numbers could have wrapped. We of course
1466 	 * only validate the FIRST fragment so the bit must be set.
1467 	 */
1468 	strmseq = ntohs(ch->dp.stream_sequence);
1469 #ifdef SCTP_ASOCLOG_OF_TSNS
1470 	SCTP_TCB_LOCK_ASSERT(stcb);
1471 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1472 		asoc->tsn_in_at = 0;
1473 		asoc->tsn_in_wrapped = 1;
1474 	}
1475 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1476 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1477 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1478 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1479 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1480 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1481 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1482 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1483 	asoc->tsn_in_at++;
1484 #endif
1485 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1486 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1487 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1488 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1489 		/* The incoming sseq is behind where we last delivered? */
1490 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1491 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1492 
1493 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1494 		    asoc->strmin[strmno].last_sequence_delivered,
1495 		    tsn, strmno, strmseq);
1496 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1497 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1498 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1499 		*abort_flag = 1;
1500 		return (0);
1501 	}
1502 	/************************************
1503 	 * From here down we may find ch-> invalid
1504 	 * so its a good idea NOT to use it.
1505 	 *************************************/
1506 
1507 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1508 	if (last_chunk == 0) {
1509 		dmbuf = SCTP_M_COPYM(*m,
1510 		    (offset + sizeof(struct sctp_data_chunk)),
1511 		    the_len, M_NOWAIT);
1512 #ifdef SCTP_MBUF_LOGGING
1513 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1514 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1515 		}
1516 #endif
1517 	} else {
1518 		/* We can steal the last chunk */
1519 		int l_len;
1520 
1521 		dmbuf = *m;
1522 		/* lop off the top part */
1523 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1524 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1525 			l_len = SCTP_BUF_LEN(dmbuf);
1526 		} else {
1527 			/*
1528 			 * need to count up the size hopefully does not hit
1529 			 * this to often :-0
1530 			 */
1531 			struct mbuf *lat;
1532 
1533 			l_len = 0;
1534 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1535 				l_len += SCTP_BUF_LEN(lat);
1536 			}
1537 		}
1538 		if (l_len > the_len) {
1539 			/* Trim the end round bytes off  too */
1540 			m_adj(dmbuf, -(l_len - the_len));
1541 		}
1542 	}
1543 	if (dmbuf == NULL) {
1544 		SCTP_STAT_INCR(sctps_nomem);
1545 		return (0);
1546 	}
1547 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1548 	    asoc->fragmented_delivery_inprogress == 0 &&
1549 	    TAILQ_EMPTY(&asoc->resetHead) &&
1550 	    ((ordered == 0) ||
1551 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1552 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1553 		/* Candidate for express delivery */
1554 		/*
1555 		 * Its not fragmented, No PD-API is up, Nothing in the
1556 		 * delivery queue, Its un-ordered OR ordered and the next to
1557 		 * deliver AND nothing else is stuck on the stream queue,
1558 		 * And there is room for it in the socket buffer. Lets just
1559 		 * stuff it up the buffer....
1560 		 */
1561 
1562 		/* It would be nice to avoid this copy if we could :< */
1563 		sctp_alloc_a_readq(stcb, control);
1564 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1565 		    protocol_id,
1566 		    strmno, strmseq,
1567 		    chunk_flags,
1568 		    dmbuf);
1569 		if (control == NULL) {
1570 			goto failed_express_del;
1571 		}
1572 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1573 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1574 			asoc->highest_tsn_inside_nr_map = tsn;
1575 		}
1576 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1577 		    control, &stcb->sctp_socket->so_rcv,
1578 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1579 
1580 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1581 			/* for ordered, bump what we delivered */
1582 			asoc->strmin[strmno].last_sequence_delivered++;
1583 		}
1584 		SCTP_STAT_INCR(sctps_recvexpress);
1585 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1586 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1587 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1588 		}
1589 		control = NULL;
1590 
1591 		goto finish_express_del;
1592 	}
1593 failed_express_del:
1594 	/* If we reach here this is a new chunk */
1595 	chk = NULL;
1596 	control = NULL;
1597 	/* Express for fragmented delivery? */
1598 	if ((asoc->fragmented_delivery_inprogress) &&
1599 	    (stcb->asoc.control_pdapi) &&
1600 	    (asoc->str_of_pdapi == strmno) &&
1601 	    (asoc->ssn_of_pdapi == strmseq)
1602 	    ) {
1603 		control = stcb->asoc.control_pdapi;
1604 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1605 			/* Can't be another first? */
1606 			goto failed_pdapi_express_del;
1607 		}
1608 		if (tsn == (control->sinfo_tsn + 1)) {
1609 			/* Yep, we can add it on */
1610 			int end = 0;
1611 
1612 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1613 				end = 1;
1614 			}
1615 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1616 			    tsn,
1617 			    &stcb->sctp_socket->so_rcv)) {
1618 				SCTP_PRINTF("Append fails end:%d\n", end);
1619 				goto failed_pdapi_express_del;
1620 			}
1621 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1622 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1623 				asoc->highest_tsn_inside_nr_map = tsn;
1624 			}
1625 			SCTP_STAT_INCR(sctps_recvexpressm);
1626 			asoc->tsn_last_delivered = tsn;
1627 			asoc->fragment_flags = chunk_flags;
1628 			asoc->tsn_of_pdapi_last_delivered = tsn;
1629 			asoc->last_flags_delivered = chunk_flags;
1630 			asoc->last_strm_seq_delivered = strmseq;
1631 			asoc->last_strm_no_delivered = strmno;
1632 			if (end) {
1633 				/* clean up the flags and such */
1634 				asoc->fragmented_delivery_inprogress = 0;
1635 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1636 					asoc->strmin[strmno].last_sequence_delivered++;
1637 				}
1638 				stcb->asoc.control_pdapi = NULL;
1639 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1640 					/*
1641 					 * There could be another message
1642 					 * ready
1643 					 */
1644 					need_reasm_check = 1;
1645 				}
1646 			}
1647 			control = NULL;
1648 			goto finish_express_del;
1649 		}
1650 	}
1651 failed_pdapi_express_del:
1652 	control = NULL;
1653 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1654 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1655 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1656 			asoc->highest_tsn_inside_nr_map = tsn;
1657 		}
1658 	} else {
1659 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1660 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1661 			asoc->highest_tsn_inside_map = tsn;
1662 		}
1663 	}
1664 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1665 		sctp_alloc_a_chunk(stcb, chk);
1666 		if (chk == NULL) {
1667 			/* No memory so we drop the chunk */
1668 			SCTP_STAT_INCR(sctps_nomem);
1669 			if (last_chunk == 0) {
1670 				/* we copied it, free the copy */
1671 				sctp_m_freem(dmbuf);
1672 			}
1673 			return (0);
1674 		}
1675 		chk->rec.data.TSN_seq = tsn;
1676 		chk->no_fr_allowed = 0;
1677 		chk->rec.data.stream_seq = strmseq;
1678 		chk->rec.data.stream_number = strmno;
1679 		chk->rec.data.payloadtype = protocol_id;
1680 		chk->rec.data.context = stcb->asoc.context;
1681 		chk->rec.data.doing_fast_retransmit = 0;
1682 		chk->rec.data.rcv_flags = chunk_flags;
1683 		chk->asoc = asoc;
1684 		chk->send_size = the_len;
1685 		chk->whoTo = net;
1686 		atomic_add_int(&net->ref_count, 1);
1687 		chk->data = dmbuf;
1688 	} else {
1689 		sctp_alloc_a_readq(stcb, control);
1690 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1691 		    protocol_id,
1692 		    strmno, strmseq,
1693 		    chunk_flags,
1694 		    dmbuf);
1695 		if (control == NULL) {
1696 			/* No memory so we drop the chunk */
1697 			SCTP_STAT_INCR(sctps_nomem);
1698 			if (last_chunk == 0) {
1699 				/* we copied it, free the copy */
1700 				sctp_m_freem(dmbuf);
1701 			}
1702 			return (0);
1703 		}
1704 		control->length = the_len;
1705 	}
1706 
1707 	/* Mark it as received */
1708 	/* Now queue it where it belongs */
1709 	if (control != NULL) {
1710 		/* First a sanity check */
1711 		if (asoc->fragmented_delivery_inprogress) {
1712 			/*
1713 			 * Ok, we have a fragmented delivery in progress if
1714 			 * this chunk is next to deliver OR belongs in our
1715 			 * view to the reassembly, the peer is evil or
1716 			 * broken.
1717 			 */
1718 			uint32_t estimate_tsn;
1719 
1720 			estimate_tsn = asoc->tsn_last_delivered + 1;
1721 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1722 			    (estimate_tsn == control->sinfo_tsn)) {
1723 				/* Evil/Broke peer */
1724 				sctp_m_freem(control->data);
1725 				control->data = NULL;
1726 				if (control->whoFrom) {
1727 					sctp_free_remote_addr(control->whoFrom);
1728 					control->whoFrom = NULL;
1729 				}
1730 				sctp_free_a_readq(stcb, control);
1731 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1732 				    tsn, strmno, strmseq);
1733 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1734 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1735 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1736 				*abort_flag = 1;
1737 				if (last_chunk) {
1738 					*m = NULL;
1739 				}
1740 				return (0);
1741 			} else {
1742 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1743 					sctp_m_freem(control->data);
1744 					control->data = NULL;
1745 					if (control->whoFrom) {
1746 						sctp_free_remote_addr(control->whoFrom);
1747 						control->whoFrom = NULL;
1748 					}
1749 					sctp_free_a_readq(stcb, control);
1750 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1751 					    tsn, strmno, strmseq);
1752 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1753 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1754 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1755 					*abort_flag = 1;
1756 					if (last_chunk) {
1757 						*m = NULL;
1758 					}
1759 					return (0);
1760 				}
1761 			}
1762 		} else {
1763 			/* No PDAPI running */
1764 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1765 				/*
1766 				 * Reassembly queue is NOT empty validate
1767 				 * that this tsn does not need to be in
1768 				 * reasembly queue. If it does then our peer
1769 				 * is broken or evil.
1770 				 */
1771 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1772 					sctp_m_freem(control->data);
1773 					control->data = NULL;
1774 					if (control->whoFrom) {
1775 						sctp_free_remote_addr(control->whoFrom);
1776 						control->whoFrom = NULL;
1777 					}
1778 					sctp_free_a_readq(stcb, control);
1779 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1780 					    tsn, strmno, strmseq);
1781 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1782 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1783 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1784 					*abort_flag = 1;
1785 					if (last_chunk) {
1786 						*m = NULL;
1787 					}
1788 					return (0);
1789 				}
1790 			}
1791 		}
1792 		/* ok, if we reach here we have passed the sanity checks */
1793 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1794 			/* queue directly into socket buffer */
1795 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1796 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1797 			    control,
1798 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1799 		} else {
1800 			/*
1801 			 * Special check for when streams are resetting. We
1802 			 * could be more smart about this and check the
1803 			 * actual stream to see if it is not being reset..
1804 			 * that way we would not create a HOLB when amongst
1805 			 * streams being reset and those not being reset.
1806 			 *
1807 			 * We take complete messages that have a stream reset
1808 			 * intervening (aka the TSN is after where our
1809 			 * cum-ack needs to be) off and put them on a
1810 			 * pending_reply_queue. The reassembly ones we do
1811 			 * not have to worry about since they are all sorted
1812 			 * and proceessed by TSN order. It is only the
1813 			 * singletons I must worry about.
1814 			 */
1815 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1816 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1817 				/*
1818 				 * yep its past where we need to reset... go
1819 				 * ahead and queue it.
1820 				 */
1821 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1822 					/* first one on */
1823 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1824 				} else {
1825 					struct sctp_queued_to_read *ctlOn,
1826 					                   *nctlOn;
1827 					unsigned char inserted = 0;
1828 
1829 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1830 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1831 							continue;
1832 						} else {
1833 							/* found it */
1834 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1835 							inserted = 1;
1836 							break;
1837 						}
1838 					}
1839 					if (inserted == 0) {
1840 						/*
1841 						 * must be put at end, use
1842 						 * prevP (all setup from
1843 						 * loop) to setup nextP.
1844 						 */
1845 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1846 					}
1847 				}
1848 			} else {
1849 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1850 				if (*abort_flag) {
1851 					if (last_chunk) {
1852 						*m = NULL;
1853 					}
1854 					return (0);
1855 				}
1856 			}
1857 		}
1858 	} else {
1859 		/* Into the re-assembly queue */
1860 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1861 		if (*abort_flag) {
1862 			/*
1863 			 * the assoc is now gone and chk was put onto the
1864 			 * reasm queue, which has all been freed.
1865 			 */
1866 			if (last_chunk) {
1867 				*m = NULL;
1868 			}
1869 			return (0);
1870 		}
1871 	}
1872 finish_express_del:
1873 	if (tsn == (asoc->cumulative_tsn + 1)) {
1874 		/* Update cum-ack */
1875 		asoc->cumulative_tsn = tsn;
1876 	}
1877 	if (last_chunk) {
1878 		*m = NULL;
1879 	}
1880 	if (ordered) {
1881 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1882 	} else {
1883 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1884 	}
1885 	SCTP_STAT_INCR(sctps_recvdata);
1886 	/* Set it present please */
1887 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1888 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1889 	}
1890 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1891 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1892 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1893 	}
1894 	/* check the special flag for stream resets */
1895 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1896 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1897 		/*
1898 		 * we have finished working through the backlogged TSN's now
1899 		 * time to reset streams. 1: call reset function. 2: free
1900 		 * pending_reply space 3: distribute any chunks in
1901 		 * pending_reply_queue.
1902 		 */
1903 		struct sctp_queued_to_read *ctl, *nctl;
1904 
1905 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1906 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1907 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
1908 		SCTP_FREE(liste, SCTP_M_STRESET);
1909 		/* sa_ignore FREED_MEMORY */
1910 		liste = TAILQ_FIRST(&asoc->resetHead);
1911 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1912 			/* All can be removed */
1913 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1914 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1915 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1916 				if (*abort_flag) {
1917 					return (0);
1918 				}
1919 			}
1920 		} else {
1921 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1922 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1923 					break;
1924 				}
1925 				/*
1926 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1927 				 * process it which is the NOT of
1928 				 * ctl->sinfo_tsn > liste->tsn
1929 				 */
1930 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1931 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1932 				if (*abort_flag) {
1933 					return (0);
1934 				}
1935 			}
1936 		}
1937 		/*
1938 		 * Now service re-assembly to pick up anything that has been
1939 		 * held on reassembly queue?
1940 		 */
1941 		sctp_deliver_reasm_check(stcb, asoc);
1942 		need_reasm_check = 0;
1943 	}
1944 	if (need_reasm_check) {
1945 		/* Another one waits ? */
1946 		sctp_deliver_reasm_check(stcb, asoc);
1947 	}
1948 	return (1);
1949 }
1950 
1951 int8_t sctp_map_lookup_tab[256] = {
1952 	0, 1, 0, 2, 0, 1, 0, 3,
1953 	0, 1, 0, 2, 0, 1, 0, 4,
1954 	0, 1, 0, 2, 0, 1, 0, 3,
1955 	0, 1, 0, 2, 0, 1, 0, 5,
1956 	0, 1, 0, 2, 0, 1, 0, 3,
1957 	0, 1, 0, 2, 0, 1, 0, 4,
1958 	0, 1, 0, 2, 0, 1, 0, 3,
1959 	0, 1, 0, 2, 0, 1, 0, 6,
1960 	0, 1, 0, 2, 0, 1, 0, 3,
1961 	0, 1, 0, 2, 0, 1, 0, 4,
1962 	0, 1, 0, 2, 0, 1, 0, 3,
1963 	0, 1, 0, 2, 0, 1, 0, 5,
1964 	0, 1, 0, 2, 0, 1, 0, 3,
1965 	0, 1, 0, 2, 0, 1, 0, 4,
1966 	0, 1, 0, 2, 0, 1, 0, 3,
1967 	0, 1, 0, 2, 0, 1, 0, 7,
1968 	0, 1, 0, 2, 0, 1, 0, 3,
1969 	0, 1, 0, 2, 0, 1, 0, 4,
1970 	0, 1, 0, 2, 0, 1, 0, 3,
1971 	0, 1, 0, 2, 0, 1, 0, 5,
1972 	0, 1, 0, 2, 0, 1, 0, 3,
1973 	0, 1, 0, 2, 0, 1, 0, 4,
1974 	0, 1, 0, 2, 0, 1, 0, 3,
1975 	0, 1, 0, 2, 0, 1, 0, 6,
1976 	0, 1, 0, 2, 0, 1, 0, 3,
1977 	0, 1, 0, 2, 0, 1, 0, 4,
1978 	0, 1, 0, 2, 0, 1, 0, 3,
1979 	0, 1, 0, 2, 0, 1, 0, 5,
1980 	0, 1, 0, 2, 0, 1, 0, 3,
1981 	0, 1, 0, 2, 0, 1, 0, 4,
1982 	0, 1, 0, 2, 0, 1, 0, 3,
1983 	0, 1, 0, 2, 0, 1, 0, 8
1984 };
1985 
1986 
1987 void
1988 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1989 {
1990 	/*
1991 	 * Now we also need to check the mapping array in a couple of ways.
1992 	 * 1) Did we move the cum-ack point?
1993 	 *
1994 	 * When you first glance at this you might think that all entries that
1995 	 * make up the postion of the cum-ack would be in the nr-mapping
1996 	 * array only.. i.e. things up to the cum-ack are always
1997 	 * deliverable. Thats true with one exception, when its a fragmented
1998 	 * message we may not deliver the data until some threshold (or all
1999 	 * of it) is in place. So we must OR the nr_mapping_array and
2000 	 * mapping_array to get a true picture of the cum-ack.
2001 	 */
2002 	struct sctp_association *asoc;
2003 	int at;
2004 	uint8_t val;
2005 	int slide_from, slide_end, lgap, distance;
2006 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2007 
2008 	asoc = &stcb->asoc;
2009 
2010 	old_cumack = asoc->cumulative_tsn;
2011 	old_base = asoc->mapping_array_base_tsn;
2012 	old_highest = asoc->highest_tsn_inside_map;
2013 	/*
2014 	 * We could probably improve this a small bit by calculating the
2015 	 * offset of the current cum-ack as the starting point.
2016 	 */
2017 	at = 0;
2018 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2019 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2020 		if (val == 0xff) {
2021 			at += 8;
2022 		} else {
2023 			/* there is a 0 bit */
2024 			at += sctp_map_lookup_tab[val];
2025 			break;
2026 		}
2027 	}
2028 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2029 
2030 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2031 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2032 #ifdef INVARIANTS
2033 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2034 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2035 #else
2036 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2037 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2038 		sctp_print_mapping_array(asoc);
2039 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2040 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2041 		}
2042 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2043 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2044 #endif
2045 	}
2046 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2047 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2048 	} else {
2049 		highest_tsn = asoc->highest_tsn_inside_map;
2050 	}
2051 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2052 		/* The complete array was completed by a single FR */
2053 		/* highest becomes the cum-ack */
2054 		int clr;
2055 
2056 #ifdef INVARIANTS
2057 		unsigned int i;
2058 
2059 #endif
2060 
2061 		/* clear the array */
2062 		clr = ((at + 7) >> 3);
2063 		if (clr > asoc->mapping_array_size) {
2064 			clr = asoc->mapping_array_size;
2065 		}
2066 		memset(asoc->mapping_array, 0, clr);
2067 		memset(asoc->nr_mapping_array, 0, clr);
2068 #ifdef INVARIANTS
2069 		for (i = 0; i < asoc->mapping_array_size; i++) {
2070 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2071 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2072 				sctp_print_mapping_array(asoc);
2073 			}
2074 		}
2075 #endif
2076 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2077 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2078 	} else if (at >= 8) {
2079 		/* we can slide the mapping array down */
2080 		/* slide_from holds where we hit the first NON 0xff byte */
2081 
2082 		/*
2083 		 * now calculate the ceiling of the move using our highest
2084 		 * TSN value
2085 		 */
2086 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2087 		slide_end = (lgap >> 3);
2088 		if (slide_end < slide_from) {
2089 			sctp_print_mapping_array(asoc);
2090 #ifdef INVARIANTS
2091 			panic("impossible slide");
2092 #else
2093 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2094 			    lgap, slide_end, slide_from, at);
2095 			return;
2096 #endif
2097 		}
2098 		if (slide_end > asoc->mapping_array_size) {
2099 #ifdef INVARIANTS
2100 			panic("would overrun buffer");
2101 #else
2102 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2103 			    asoc->mapping_array_size, slide_end);
2104 			slide_end = asoc->mapping_array_size;
2105 #endif
2106 		}
2107 		distance = (slide_end - slide_from) + 1;
2108 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2109 			sctp_log_map(old_base, old_cumack, old_highest,
2110 			    SCTP_MAP_PREPARE_SLIDE);
2111 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2112 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2113 		}
2114 		if (distance + slide_from > asoc->mapping_array_size ||
2115 		    distance < 0) {
2116 			/*
2117 			 * Here we do NOT slide forward the array so that
2118 			 * hopefully when more data comes in to fill it up
2119 			 * we will be able to slide it forward. Really I
2120 			 * don't think this should happen :-0
2121 			 */
2122 
2123 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2124 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2125 				    (uint32_t) asoc->mapping_array_size,
2126 				    SCTP_MAP_SLIDE_NONE);
2127 			}
2128 		} else {
2129 			int ii;
2130 
2131 			for (ii = 0; ii < distance; ii++) {
2132 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2133 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2134 
2135 			}
2136 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2137 				asoc->mapping_array[ii] = 0;
2138 				asoc->nr_mapping_array[ii] = 0;
2139 			}
2140 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2141 				asoc->highest_tsn_inside_map += (slide_from << 3);
2142 			}
2143 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2144 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2145 			}
2146 			asoc->mapping_array_base_tsn += (slide_from << 3);
2147 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2148 				sctp_log_map(asoc->mapping_array_base_tsn,
2149 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2150 				    SCTP_MAP_SLIDE_RESULT);
2151 			}
2152 		}
2153 	}
2154 }
2155 
2156 void
2157 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2158 {
2159 	struct sctp_association *asoc;
2160 	uint32_t highest_tsn;
2161 
2162 	asoc = &stcb->asoc;
2163 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2164 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2165 	} else {
2166 		highest_tsn = asoc->highest_tsn_inside_map;
2167 	}
2168 
2169 	/*
2170 	 * Now we need to see if we need to queue a sack or just start the
2171 	 * timer (if allowed).
2172 	 */
2173 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2174 		/*
2175 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2176 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2177 		 * SACK
2178 		 */
2179 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2180 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2181 			    stcb->sctp_ep, stcb, NULL,
2182 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2183 		}
2184 		sctp_send_shutdown(stcb,
2185 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2186 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2187 	} else {
2188 		int is_a_gap;
2189 
2190 		/* is there a gap now ? */
2191 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2192 
2193 		/*
2194 		 * CMT DAC algorithm: increase number of packets received
2195 		 * since last ack
2196 		 */
2197 		stcb->asoc.cmt_dac_pkts_rcvd++;
2198 
2199 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2200 							 * SACK */
2201 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2202 							 * longer is one */
2203 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2204 		    (is_a_gap) ||	/* is still a gap */
2205 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2206 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2207 		    ) {
2208 
2209 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2210 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2211 			    (stcb->asoc.send_sack == 0) &&
2212 			    (stcb->asoc.numduptsns == 0) &&
2213 			    (stcb->asoc.delayed_ack) &&
2214 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2215 
2216 				/*
2217 				 * CMT DAC algorithm: With CMT, delay acks
2218 				 * even in the face of
2219 				 *
2220 				 * reordering. Therefore, if acks that do not
2221 				 * have to be sent because of the above
2222 				 * reasons, will be delayed. That is, acks
2223 				 * that would have been sent due to gap
2224 				 * reports will be delayed with DAC. Start
2225 				 * the delayed ack timer.
2226 				 */
2227 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2228 				    stcb->sctp_ep, stcb, NULL);
2229 			} else {
2230 				/*
2231 				 * Ok we must build a SACK since the timer
2232 				 * is pending, we got our first packet OR
2233 				 * there are gaps or duplicates.
2234 				 */
2235 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2236 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2237 			}
2238 		} else {
2239 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2240 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2241 				    stcb->sctp_ep, stcb, NULL);
2242 			}
2243 		}
2244 	}
2245 }
2246 
2247 void
2248 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2249 {
2250 	struct sctp_tmit_chunk *chk;
2251 	uint32_t tsize, pd_point;
2252 	uint16_t nxt_todel;
2253 
2254 	if (asoc->fragmented_delivery_inprogress) {
2255 		sctp_service_reassembly(stcb, asoc);
2256 	}
2257 	/* Can we proceed further, i.e. the PD-API is complete */
2258 	if (asoc->fragmented_delivery_inprogress) {
2259 		/* no */
2260 		return;
2261 	}
2262 	/*
2263 	 * Now is there some other chunk I can deliver from the reassembly
2264 	 * queue.
2265 	 */
2266 doit_again:
2267 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2268 	if (chk == NULL) {
2269 		asoc->size_on_reasm_queue = 0;
2270 		asoc->cnt_on_reasm_queue = 0;
2271 		return;
2272 	}
2273 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2274 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2275 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2276 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2277 		/*
2278 		 * Yep the first one is here. We setup to start reception,
2279 		 * by backing down the TSN just in case we can't deliver.
2280 		 */
2281 
2282 		/*
2283 		 * Before we start though either all of the message should
2284 		 * be here or the socket buffer max or nothing on the
2285 		 * delivery queue and something can be delivered.
2286 		 */
2287 		if (stcb->sctp_socket) {
2288 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2289 			    stcb->sctp_ep->partial_delivery_point);
2290 		} else {
2291 			pd_point = stcb->sctp_ep->partial_delivery_point;
2292 		}
2293 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2294 			asoc->fragmented_delivery_inprogress = 1;
2295 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2296 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2297 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2298 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2299 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2300 			sctp_service_reassembly(stcb, asoc);
2301 			if (asoc->fragmented_delivery_inprogress == 0) {
2302 				goto doit_again;
2303 			}
2304 		}
2305 	}
2306 }
2307 
2308 int
2309 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2310     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2311     struct sctp_nets *net, uint32_t * high_tsn)
2312 {
2313 	struct sctp_data_chunk *ch, chunk_buf;
2314 	struct sctp_association *asoc;
2315 	int num_chunks = 0;	/* number of control chunks processed */
2316 	int stop_proc = 0;
2317 	int chk_length, break_flag, last_chunk;
2318 	int abort_flag = 0, was_a_gap;
2319 	struct mbuf *m;
2320 	uint32_t highest_tsn;
2321 
2322 	/* set the rwnd */
2323 	sctp_set_rwnd(stcb, &stcb->asoc);
2324 
2325 	m = *mm;
2326 	SCTP_TCB_LOCK_ASSERT(stcb);
2327 	asoc = &stcb->asoc;
2328 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2329 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2330 	} else {
2331 		highest_tsn = asoc->highest_tsn_inside_map;
2332 	}
2333 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2334 	/*
2335 	 * setup where we got the last DATA packet from for any SACK that
2336 	 * may need to go out. Don't bump the net. This is done ONLY when a
2337 	 * chunk is assigned.
2338 	 */
2339 	asoc->last_data_chunk_from = net;
2340 
2341 	/*-
2342 	 * Now before we proceed we must figure out if this is a wasted
2343 	 * cluster... i.e. it is a small packet sent in and yet the driver
2344 	 * underneath allocated a full cluster for it. If so we must copy it
2345 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2346 	 * with cluster starvation. Note for __Panda__ we don't do this
2347 	 * since it has clusters all the way down to 64 bytes.
2348 	 */
2349 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2350 		/* we only handle mbufs that are singletons.. not chains */
2351 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2352 		if (m) {
2353 			/* ok lets see if we can copy the data up */
2354 			caddr_t *from, *to;
2355 
2356 			/* get the pointers and copy */
2357 			to = mtod(m, caddr_t *);
2358 			from = mtod((*mm), caddr_t *);
2359 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2360 			/* copy the length and free up the old */
2361 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2362 			sctp_m_freem(*mm);
2363 			/* sucess, back copy */
2364 			*mm = m;
2365 		} else {
2366 			/* We are in trouble in the mbuf world .. yikes */
2367 			m = *mm;
2368 		}
2369 	}
2370 	/* get pointer to the first chunk header */
2371 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2372 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2373 	if (ch == NULL) {
2374 		return (1);
2375 	}
2376 	/*
2377 	 * process all DATA chunks...
2378 	 */
2379 	*high_tsn = asoc->cumulative_tsn;
2380 	break_flag = 0;
2381 	asoc->data_pkts_seen++;
2382 	while (stop_proc == 0) {
2383 		/* validate chunk length */
2384 		chk_length = ntohs(ch->ch.chunk_length);
2385 		if (length - *offset < chk_length) {
2386 			/* all done, mutulated chunk */
2387 			stop_proc = 1;
2388 			continue;
2389 		}
2390 		if (ch->ch.chunk_type == SCTP_DATA) {
2391 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2392 				/*
2393 				 * Need to send an abort since we had a
2394 				 * invalid data chunk.
2395 				 */
2396 				struct mbuf *op_err;
2397 				char msg[SCTP_DIAG_INFO_LEN];
2398 
2399 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2400 				    chk_length);
2401 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2402 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2403 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2404 				return (2);
2405 			}
2406 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2407 				/*
2408 				 * Need to send an abort since we had an
2409 				 * empty data chunk.
2410 				 */
2411 				struct mbuf *op_err;
2412 
2413 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2414 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2415 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2416 				return (2);
2417 			}
2418 #ifdef SCTP_AUDITING_ENABLED
2419 			sctp_audit_log(0xB1, 0);
2420 #endif
2421 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2422 				last_chunk = 1;
2423 			} else {
2424 				last_chunk = 0;
2425 			}
2426 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2427 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2428 			    last_chunk)) {
2429 				num_chunks++;
2430 			}
2431 			if (abort_flag)
2432 				return (2);
2433 
2434 			if (break_flag) {
2435 				/*
2436 				 * Set because of out of rwnd space and no
2437 				 * drop rep space left.
2438 				 */
2439 				stop_proc = 1;
2440 				continue;
2441 			}
2442 		} else {
2443 			/* not a data chunk in the data region */
2444 			switch (ch->ch.chunk_type) {
2445 			case SCTP_INITIATION:
2446 			case SCTP_INITIATION_ACK:
2447 			case SCTP_SELECTIVE_ACK:
2448 			case SCTP_NR_SELECTIVE_ACK:
2449 			case SCTP_HEARTBEAT_REQUEST:
2450 			case SCTP_HEARTBEAT_ACK:
2451 			case SCTP_ABORT_ASSOCIATION:
2452 			case SCTP_SHUTDOWN:
2453 			case SCTP_SHUTDOWN_ACK:
2454 			case SCTP_OPERATION_ERROR:
2455 			case SCTP_COOKIE_ECHO:
2456 			case SCTP_COOKIE_ACK:
2457 			case SCTP_ECN_ECHO:
2458 			case SCTP_ECN_CWR:
2459 			case SCTP_SHUTDOWN_COMPLETE:
2460 			case SCTP_AUTHENTICATION:
2461 			case SCTP_ASCONF_ACK:
2462 			case SCTP_PACKET_DROPPED:
2463 			case SCTP_STREAM_RESET:
2464 			case SCTP_FORWARD_CUM_TSN:
2465 			case SCTP_ASCONF:
2466 				/*
2467 				 * Now, what do we do with KNOWN chunks that
2468 				 * are NOT in the right place?
2469 				 *
2470 				 * For now, I do nothing but ignore them. We
2471 				 * may later want to add sysctl stuff to
2472 				 * switch out and do either an ABORT() or
2473 				 * possibly process them.
2474 				 */
2475 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2476 					struct mbuf *op_err;
2477 					char msg[SCTP_DIAG_INFO_LEN];
2478 
2479 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2480 					    ch->ch.chunk_type);
2481 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2482 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2483 					return (2);
2484 				}
2485 				break;
2486 			default:
2487 				/* unknown chunk type, use bit rules */
2488 				if (ch->ch.chunk_type & 0x40) {
2489 					/* Add a error report to the queue */
2490 					struct mbuf *op_err;
2491 					struct sctp_gen_error_cause *cause;
2492 
2493 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2494 					    0, M_NOWAIT, 1, MT_DATA);
2495 					if (op_err != NULL) {
2496 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2497 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2498 						cause->length = htons(chk_length + sizeof(struct sctp_gen_error_cause));
2499 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2500 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2501 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2502 							sctp_queue_op_err(stcb, op_err);
2503 						} else {
2504 							sctp_m_freem(op_err);
2505 						}
2506 					}
2507 				}
2508 				if ((ch->ch.chunk_type & 0x80) == 0) {
2509 					/* discard the rest of this packet */
2510 					stop_proc = 1;
2511 				}	/* else skip this bad chunk and
2512 					 * continue... */
2513 				break;
2514 			}	/* switch of chunk type */
2515 		}
2516 		*offset += SCTP_SIZE32(chk_length);
2517 		if ((*offset >= length) || stop_proc) {
2518 			/* no more data left in the mbuf chain */
2519 			stop_proc = 1;
2520 			continue;
2521 		}
2522 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2523 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2524 		if (ch == NULL) {
2525 			*offset = length;
2526 			stop_proc = 1;
2527 			continue;
2528 		}
2529 	}
2530 	if (break_flag) {
2531 		/*
2532 		 * we need to report rwnd overrun drops.
2533 		 */
2534 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2535 	}
2536 	if (num_chunks) {
2537 		/*
2538 		 * Did we get data, if so update the time for auto-close and
2539 		 * give peer credit for being alive.
2540 		 */
2541 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2542 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2543 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2544 			    stcb->asoc.overall_error_count,
2545 			    0,
2546 			    SCTP_FROM_SCTP_INDATA,
2547 			    __LINE__);
2548 		}
2549 		stcb->asoc.overall_error_count = 0;
2550 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2551 	}
2552 	/* now service all of the reassm queue if needed */
2553 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2554 		sctp_service_queues(stcb, asoc);
2555 
2556 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2557 		/* Assure that we ack right away */
2558 		stcb->asoc.send_sack = 1;
2559 	}
2560 	/* Start a sack timer or QUEUE a SACK for sending */
2561 	sctp_sack_check(stcb, was_a_gap);
2562 	return (0);
2563 }
2564 
2565 static int
2566 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2567     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2568     int *num_frs,
2569     uint32_t * biggest_newly_acked_tsn,
2570     uint32_t * this_sack_lowest_newack,
2571     int *rto_ok)
2572 {
2573 	struct sctp_tmit_chunk *tp1;
2574 	unsigned int theTSN;
2575 	int j, wake_him = 0, circled = 0;
2576 
2577 	/* Recover the tp1 we last saw */
2578 	tp1 = *p_tp1;
2579 	if (tp1 == NULL) {
2580 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2581 	}
2582 	for (j = frag_strt; j <= frag_end; j++) {
2583 		theTSN = j + last_tsn;
2584 		while (tp1) {
2585 			if (tp1->rec.data.doing_fast_retransmit)
2586 				(*num_frs) += 1;
2587 
2588 			/*-
2589 			 * CMT: CUCv2 algorithm. For each TSN being
2590 			 * processed from the sent queue, track the
2591 			 * next expected pseudo-cumack, or
2592 			 * rtx_pseudo_cumack, if required. Separate
2593 			 * cumack trackers for first transmissions,
2594 			 * and retransmissions.
2595 			 */
2596 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2597 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2598 			    (tp1->snd_count == 1)) {
2599 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2600 				tp1->whoTo->find_pseudo_cumack = 0;
2601 			}
2602 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2603 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2604 			    (tp1->snd_count > 1)) {
2605 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2606 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2607 			}
2608 			if (tp1->rec.data.TSN_seq == theTSN) {
2609 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2610 					/*-
2611 					 * must be held until
2612 					 * cum-ack passes
2613 					 */
2614 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2615 						/*-
2616 						 * If it is less than RESEND, it is
2617 						 * now no-longer in flight.
2618 						 * Higher values may already be set
2619 						 * via previous Gap Ack Blocks...
2620 						 * i.e. ACKED or RESEND.
2621 						 */
2622 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2623 						    *biggest_newly_acked_tsn)) {
2624 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2625 						}
2626 						/*-
2627 						 * CMT: SFR algo (and HTNA) - set
2628 						 * saw_newack to 1 for dest being
2629 						 * newly acked. update
2630 						 * this_sack_highest_newack if
2631 						 * appropriate.
2632 						 */
2633 						if (tp1->rec.data.chunk_was_revoked == 0)
2634 							tp1->whoTo->saw_newack = 1;
2635 
2636 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2637 						    tp1->whoTo->this_sack_highest_newack)) {
2638 							tp1->whoTo->this_sack_highest_newack =
2639 							    tp1->rec.data.TSN_seq;
2640 						}
2641 						/*-
2642 						 * CMT DAC algo: also update
2643 						 * this_sack_lowest_newack
2644 						 */
2645 						if (*this_sack_lowest_newack == 0) {
2646 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2647 								sctp_log_sack(*this_sack_lowest_newack,
2648 								    last_tsn,
2649 								    tp1->rec.data.TSN_seq,
2650 								    0,
2651 								    0,
2652 								    SCTP_LOG_TSN_ACKED);
2653 							}
2654 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2655 						}
2656 						/*-
2657 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2658 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2659 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2660 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2661 						 * Separate pseudo_cumack trackers for first transmissions and
2662 						 * retransmissions.
2663 						 */
2664 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2665 							if (tp1->rec.data.chunk_was_revoked == 0) {
2666 								tp1->whoTo->new_pseudo_cumack = 1;
2667 							}
2668 							tp1->whoTo->find_pseudo_cumack = 1;
2669 						}
2670 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2671 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2672 						}
2673 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2674 							if (tp1->rec.data.chunk_was_revoked == 0) {
2675 								tp1->whoTo->new_pseudo_cumack = 1;
2676 							}
2677 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2678 						}
2679 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2680 							sctp_log_sack(*biggest_newly_acked_tsn,
2681 							    last_tsn,
2682 							    tp1->rec.data.TSN_seq,
2683 							    frag_strt,
2684 							    frag_end,
2685 							    SCTP_LOG_TSN_ACKED);
2686 						}
2687 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2688 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2689 							    tp1->whoTo->flight_size,
2690 							    tp1->book_size,
2691 							    (uintptr_t) tp1->whoTo,
2692 							    tp1->rec.data.TSN_seq);
2693 						}
2694 						sctp_flight_size_decrease(tp1);
2695 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2696 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2697 							    tp1);
2698 						}
2699 						sctp_total_flight_decrease(stcb, tp1);
2700 
2701 						tp1->whoTo->net_ack += tp1->send_size;
2702 						if (tp1->snd_count < 2) {
2703 							/*-
2704 							 * True non-retransmited chunk
2705 							 */
2706 							tp1->whoTo->net_ack2 += tp1->send_size;
2707 
2708 							/*-
2709 							 * update RTO too ?
2710 							 */
2711 							if (tp1->do_rtt) {
2712 								if (*rto_ok) {
2713 									tp1->whoTo->RTO =
2714 									    sctp_calculate_rto(stcb,
2715 									    &stcb->asoc,
2716 									    tp1->whoTo,
2717 									    &tp1->sent_rcv_time,
2718 									    sctp_align_safe_nocopy,
2719 									    SCTP_RTT_FROM_DATA);
2720 									*rto_ok = 0;
2721 								}
2722 								if (tp1->whoTo->rto_needed == 0) {
2723 									tp1->whoTo->rto_needed = 1;
2724 								}
2725 								tp1->do_rtt = 0;
2726 							}
2727 						}
2728 					}
2729 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2730 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2731 						    stcb->asoc.this_sack_highest_gap)) {
2732 							stcb->asoc.this_sack_highest_gap =
2733 							    tp1->rec.data.TSN_seq;
2734 						}
2735 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2736 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2737 #ifdef SCTP_AUDITING_ENABLED
2738 							sctp_audit_log(0xB2,
2739 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2740 #endif
2741 						}
2742 					}
2743 					/*-
2744 					 * All chunks NOT UNSENT fall through here and are marked
2745 					 * (leave PR-SCTP ones that are to skip alone though)
2746 					 */
2747 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2748 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2749 						tp1->sent = SCTP_DATAGRAM_MARKED;
2750 					}
2751 					if (tp1->rec.data.chunk_was_revoked) {
2752 						/* deflate the cwnd */
2753 						tp1->whoTo->cwnd -= tp1->book_size;
2754 						tp1->rec.data.chunk_was_revoked = 0;
2755 					}
2756 					/* NR Sack code here */
2757 					if (nr_sacking &&
2758 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2759 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2760 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2761 #ifdef INVARIANTS
2762 						} else {
2763 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2764 #endif
2765 						}
2766 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2767 						if (tp1->data) {
2768 							/*
2769 							 * sa_ignore
2770 							 * NO_NULL_CHK
2771 							 */
2772 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2773 							sctp_m_freem(tp1->data);
2774 							tp1->data = NULL;
2775 						}
2776 						wake_him++;
2777 					}
2778 				}
2779 				break;
2780 			}	/* if (tp1->TSN_seq == theTSN) */
2781 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2782 				break;
2783 			}
2784 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2785 			if ((tp1 == NULL) && (circled == 0)) {
2786 				circled++;
2787 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2788 			}
2789 		}		/* end while (tp1) */
2790 		if (tp1 == NULL) {
2791 			circled = 0;
2792 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2793 		}
2794 		/* In case the fragments were not in order we must reset */
2795 	}			/* end for (j = fragStart */
2796 	*p_tp1 = tp1;
2797 	return (wake_him);	/* Return value only used for nr-sack */
2798 }
2799 
2800 
2801 static int
2802 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2803     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2804     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2805     int num_seg, int num_nr_seg, int *rto_ok)
2806 {
2807 	struct sctp_gap_ack_block *frag, block;
2808 	struct sctp_tmit_chunk *tp1;
2809 	int i;
2810 	int num_frs = 0;
2811 	int chunk_freed;
2812 	int non_revocable;
2813 	uint16_t frag_strt, frag_end, prev_frag_end;
2814 
2815 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2816 	prev_frag_end = 0;
2817 	chunk_freed = 0;
2818 
2819 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2820 		if (i == num_seg) {
2821 			prev_frag_end = 0;
2822 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2823 		}
2824 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2825 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2826 		*offset += sizeof(block);
2827 		if (frag == NULL) {
2828 			return (chunk_freed);
2829 		}
2830 		frag_strt = ntohs(frag->start);
2831 		frag_end = ntohs(frag->end);
2832 
2833 		if (frag_strt > frag_end) {
2834 			/* This gap report is malformed, skip it. */
2835 			continue;
2836 		}
2837 		if (frag_strt <= prev_frag_end) {
2838 			/* This gap report is not in order, so restart. */
2839 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2840 		}
2841 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2842 			*biggest_tsn_acked = last_tsn + frag_end;
2843 		}
2844 		if (i < num_seg) {
2845 			non_revocable = 0;
2846 		} else {
2847 			non_revocable = 1;
2848 		}
2849 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2850 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2851 		    this_sack_lowest_newack, rto_ok)) {
2852 			chunk_freed = 1;
2853 		}
2854 		prev_frag_end = frag_end;
2855 	}
2856 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2857 		if (num_frs)
2858 			sctp_log_fr(*biggest_tsn_acked,
2859 			    *biggest_newly_acked_tsn,
2860 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2861 	}
2862 	return (chunk_freed);
2863 }
2864 
2865 static void
2866 sctp_check_for_revoked(struct sctp_tcb *stcb,
2867     struct sctp_association *asoc, uint32_t cumack,
2868     uint32_t biggest_tsn_acked)
2869 {
2870 	struct sctp_tmit_chunk *tp1;
2871 
2872 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2873 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2874 			/*
2875 			 * ok this guy is either ACK or MARKED. If it is
2876 			 * ACKED it has been previously acked but not this
2877 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2878 			 * again.
2879 			 */
2880 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2881 				break;
2882 			}
2883 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2884 				/* it has been revoked */
2885 				tp1->sent = SCTP_DATAGRAM_SENT;
2886 				tp1->rec.data.chunk_was_revoked = 1;
2887 				/*
2888 				 * We must add this stuff back in to assure
2889 				 * timers and such get started.
2890 				 */
2891 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2892 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2893 					    tp1->whoTo->flight_size,
2894 					    tp1->book_size,
2895 					    (uintptr_t) tp1->whoTo,
2896 					    tp1->rec.data.TSN_seq);
2897 				}
2898 				sctp_flight_size_increase(tp1);
2899 				sctp_total_flight_increase(stcb, tp1);
2900 				/*
2901 				 * We inflate the cwnd to compensate for our
2902 				 * artificial inflation of the flight_size.
2903 				 */
2904 				tp1->whoTo->cwnd += tp1->book_size;
2905 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2906 					sctp_log_sack(asoc->last_acked_seq,
2907 					    cumack,
2908 					    tp1->rec.data.TSN_seq,
2909 					    0,
2910 					    0,
2911 					    SCTP_LOG_TSN_REVOKED);
2912 				}
2913 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2914 				/* it has been re-acked in this SACK */
2915 				tp1->sent = SCTP_DATAGRAM_ACKED;
2916 			}
2917 		}
2918 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2919 			break;
2920 	}
2921 }
2922 
2923 
2924 static void
2925 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2926     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2927 {
2928 	struct sctp_tmit_chunk *tp1;
2929 	int strike_flag = 0;
2930 	struct timeval now;
2931 	int tot_retrans = 0;
2932 	uint32_t sending_seq;
2933 	struct sctp_nets *net;
2934 	int num_dests_sacked = 0;
2935 
2936 	/*
2937 	 * select the sending_seq, this is either the next thing ready to be
2938 	 * sent but not transmitted, OR, the next seq we assign.
2939 	 */
2940 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2941 	if (tp1 == NULL) {
2942 		sending_seq = asoc->sending_seq;
2943 	} else {
2944 		sending_seq = tp1->rec.data.TSN_seq;
2945 	}
2946 
2947 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2948 	if ((asoc->sctp_cmt_on_off > 0) &&
2949 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2950 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2951 			if (net->saw_newack)
2952 				num_dests_sacked++;
2953 		}
2954 	}
2955 	if (stcb->asoc.prsctp_supported) {
2956 		(void)SCTP_GETTIME_TIMEVAL(&now);
2957 	}
2958 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2959 		strike_flag = 0;
2960 		if (tp1->no_fr_allowed) {
2961 			/* this one had a timeout or something */
2962 			continue;
2963 		}
2964 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2965 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2966 				sctp_log_fr(biggest_tsn_newly_acked,
2967 				    tp1->rec.data.TSN_seq,
2968 				    tp1->sent,
2969 				    SCTP_FR_LOG_CHECK_STRIKE);
2970 		}
2971 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2972 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2973 			/* done */
2974 			break;
2975 		}
2976 		if (stcb->asoc.prsctp_supported) {
2977 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2978 				/* Is it expired? */
2979 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2980 					/* Yes so drop it */
2981 					if (tp1->data != NULL) {
2982 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2983 						    SCTP_SO_NOT_LOCKED);
2984 					}
2985 					continue;
2986 				}
2987 			}
2988 		}
2989 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2990 			/* we are beyond the tsn in the sack  */
2991 			break;
2992 		}
2993 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2994 			/* either a RESEND, ACKED, or MARKED */
2995 			/* skip */
2996 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
2997 				/* Continue strikin FWD-TSN chunks */
2998 				tp1->rec.data.fwd_tsn_cnt++;
2999 			}
3000 			continue;
3001 		}
3002 		/*
3003 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3004 		 */
3005 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3006 			/*
3007 			 * No new acks were receieved for data sent to this
3008 			 * dest. Therefore, according to the SFR algo for
3009 			 * CMT, no data sent to this dest can be marked for
3010 			 * FR using this SACK.
3011 			 */
3012 			continue;
3013 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3014 		    tp1->whoTo->this_sack_highest_newack)) {
3015 			/*
3016 			 * CMT: New acks were receieved for data sent to
3017 			 * this dest. But no new acks were seen for data
3018 			 * sent after tp1. Therefore, according to the SFR
3019 			 * algo for CMT, tp1 cannot be marked for FR using
3020 			 * this SACK. This step covers part of the DAC algo
3021 			 * and the HTNA algo as well.
3022 			 */
3023 			continue;
3024 		}
3025 		/*
3026 		 * Here we check to see if we were have already done a FR
3027 		 * and if so we see if the biggest TSN we saw in the sack is
3028 		 * smaller than the recovery point. If so we don't strike
3029 		 * the tsn... otherwise we CAN strike the TSN.
3030 		 */
3031 		/*
3032 		 * @@@ JRI: Check for CMT if (accum_moved &&
3033 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3034 		 * 0)) {
3035 		 */
3036 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3037 			/*
3038 			 * Strike the TSN if in fast-recovery and cum-ack
3039 			 * moved.
3040 			 */
3041 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3042 				sctp_log_fr(biggest_tsn_newly_acked,
3043 				    tp1->rec.data.TSN_seq,
3044 				    tp1->sent,
3045 				    SCTP_FR_LOG_STRIKE_CHUNK);
3046 			}
3047 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3048 				tp1->sent++;
3049 			}
3050 			if ((asoc->sctp_cmt_on_off > 0) &&
3051 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3052 				/*
3053 				 * CMT DAC algorithm: If SACK flag is set to
3054 				 * 0, then lowest_newack test will not pass
3055 				 * because it would have been set to the
3056 				 * cumack earlier. If not already to be
3057 				 * rtx'd, If not a mixed sack and if tp1 is
3058 				 * not between two sacked TSNs, then mark by
3059 				 * one more. NOTE that we are marking by one
3060 				 * additional time since the SACK DAC flag
3061 				 * indicates that two packets have been
3062 				 * received after this missing TSN.
3063 				 */
3064 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3065 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3066 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3067 						sctp_log_fr(16 + num_dests_sacked,
3068 						    tp1->rec.data.TSN_seq,
3069 						    tp1->sent,
3070 						    SCTP_FR_LOG_STRIKE_CHUNK);
3071 					}
3072 					tp1->sent++;
3073 				}
3074 			}
3075 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3076 		    (asoc->sctp_cmt_on_off == 0)) {
3077 			/*
3078 			 * For those that have done a FR we must take
3079 			 * special consideration if we strike. I.e the
3080 			 * biggest_newly_acked must be higher than the
3081 			 * sending_seq at the time we did the FR.
3082 			 */
3083 			if (
3084 #ifdef SCTP_FR_TO_ALTERNATE
3085 			/*
3086 			 * If FR's go to new networks, then we must only do
3087 			 * this for singly homed asoc's. However if the FR's
3088 			 * go to the same network (Armando's work) then its
3089 			 * ok to FR multiple times.
3090 			 */
3091 			    (asoc->numnets < 2)
3092 #else
3093 			    (1)
3094 #endif
3095 			    ) {
3096 
3097 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3098 				    tp1->rec.data.fast_retran_tsn)) {
3099 					/*
3100 					 * Strike the TSN, since this ack is
3101 					 * beyond where things were when we
3102 					 * did a FR.
3103 					 */
3104 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3105 						sctp_log_fr(biggest_tsn_newly_acked,
3106 						    tp1->rec.data.TSN_seq,
3107 						    tp1->sent,
3108 						    SCTP_FR_LOG_STRIKE_CHUNK);
3109 					}
3110 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3111 						tp1->sent++;
3112 					}
3113 					strike_flag = 1;
3114 					if ((asoc->sctp_cmt_on_off > 0) &&
3115 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3116 						/*
3117 						 * CMT DAC algorithm: If
3118 						 * SACK flag is set to 0,
3119 						 * then lowest_newack test
3120 						 * will not pass because it
3121 						 * would have been set to
3122 						 * the cumack earlier. If
3123 						 * not already to be rtx'd,
3124 						 * If not a mixed sack and
3125 						 * if tp1 is not between two
3126 						 * sacked TSNs, then mark by
3127 						 * one more. NOTE that we
3128 						 * are marking by one
3129 						 * additional time since the
3130 						 * SACK DAC flag indicates
3131 						 * that two packets have
3132 						 * been received after this
3133 						 * missing TSN.
3134 						 */
3135 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3136 						    (num_dests_sacked == 1) &&
3137 						    SCTP_TSN_GT(this_sack_lowest_newack,
3138 						    tp1->rec.data.TSN_seq)) {
3139 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3140 								sctp_log_fr(32 + num_dests_sacked,
3141 								    tp1->rec.data.TSN_seq,
3142 								    tp1->sent,
3143 								    SCTP_FR_LOG_STRIKE_CHUNK);
3144 							}
3145 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3146 								tp1->sent++;
3147 							}
3148 						}
3149 					}
3150 				}
3151 			}
3152 			/*
3153 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3154 			 * algo covers HTNA.
3155 			 */
3156 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3157 		    biggest_tsn_newly_acked)) {
3158 			/*
3159 			 * We don't strike these: This is the  HTNA
3160 			 * algorithm i.e. we don't strike If our TSN is
3161 			 * larger than the Highest TSN Newly Acked.
3162 			 */
3163 			;
3164 		} else {
3165 			/* Strike the TSN */
3166 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3167 				sctp_log_fr(biggest_tsn_newly_acked,
3168 				    tp1->rec.data.TSN_seq,
3169 				    tp1->sent,
3170 				    SCTP_FR_LOG_STRIKE_CHUNK);
3171 			}
3172 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3173 				tp1->sent++;
3174 			}
3175 			if ((asoc->sctp_cmt_on_off > 0) &&
3176 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3177 				/*
3178 				 * CMT DAC algorithm: If SACK flag is set to
3179 				 * 0, then lowest_newack test will not pass
3180 				 * because it would have been set to the
3181 				 * cumack earlier. If not already to be
3182 				 * rtx'd, If not a mixed sack and if tp1 is
3183 				 * not between two sacked TSNs, then mark by
3184 				 * one more. NOTE that we are marking by one
3185 				 * additional time since the SACK DAC flag
3186 				 * indicates that two packets have been
3187 				 * received after this missing TSN.
3188 				 */
3189 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3190 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3191 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3192 						sctp_log_fr(48 + num_dests_sacked,
3193 						    tp1->rec.data.TSN_seq,
3194 						    tp1->sent,
3195 						    SCTP_FR_LOG_STRIKE_CHUNK);
3196 					}
3197 					tp1->sent++;
3198 				}
3199 			}
3200 		}
3201 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3202 			struct sctp_nets *alt;
3203 
3204 			/* fix counts and things */
3205 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3206 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3207 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3208 				    tp1->book_size,
3209 				    (uintptr_t) tp1->whoTo,
3210 				    tp1->rec.data.TSN_seq);
3211 			}
3212 			if (tp1->whoTo) {
3213 				tp1->whoTo->net_ack++;
3214 				sctp_flight_size_decrease(tp1);
3215 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3216 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3217 					    tp1);
3218 				}
3219 			}
3220 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3221 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3222 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3223 			}
3224 			/* add back to the rwnd */
3225 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3226 
3227 			/* remove from the total flight */
3228 			sctp_total_flight_decrease(stcb, tp1);
3229 
3230 			if ((stcb->asoc.prsctp_supported) &&
3231 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3232 				/*
3233 				 * Has it been retransmitted tv_sec times? -
3234 				 * we store the retran count there.
3235 				 */
3236 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3237 					/* Yes, so drop it */
3238 					if (tp1->data != NULL) {
3239 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3240 						    SCTP_SO_NOT_LOCKED);
3241 					}
3242 					/* Make sure to flag we had a FR */
3243 					tp1->whoTo->net_ack++;
3244 					continue;
3245 				}
3246 			}
3247 			/*
3248 			 * SCTP_PRINTF("OK, we are now ready to FR this
3249 			 * guy\n");
3250 			 */
3251 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3253 				    0, SCTP_FR_MARKED);
3254 			}
3255 			if (strike_flag) {
3256 				/* This is a subsequent FR */
3257 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3258 			}
3259 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3260 			if (asoc->sctp_cmt_on_off > 0) {
3261 				/*
3262 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3263 				 * If CMT is being used, then pick dest with
3264 				 * largest ssthresh for any retransmission.
3265 				 */
3266 				tp1->no_fr_allowed = 1;
3267 				alt = tp1->whoTo;
3268 				/* sa_ignore NO_NULL_CHK */
3269 				if (asoc->sctp_cmt_pf > 0) {
3270 					/*
3271 					 * JRS 5/18/07 - If CMT PF is on,
3272 					 * use the PF version of
3273 					 * find_alt_net()
3274 					 */
3275 					alt = sctp_find_alternate_net(stcb, alt, 2);
3276 				} else {
3277 					/*
3278 					 * JRS 5/18/07 - If only CMT is on,
3279 					 * use the CMT version of
3280 					 * find_alt_net()
3281 					 */
3282 					/* sa_ignore NO_NULL_CHK */
3283 					alt = sctp_find_alternate_net(stcb, alt, 1);
3284 				}
3285 				if (alt == NULL) {
3286 					alt = tp1->whoTo;
3287 				}
3288 				/*
3289 				 * CUCv2: If a different dest is picked for
3290 				 * the retransmission, then new
3291 				 * (rtx-)pseudo_cumack needs to be tracked
3292 				 * for orig dest. Let CUCv2 track new (rtx-)
3293 				 * pseudo-cumack always.
3294 				 */
3295 				if (tp1->whoTo) {
3296 					tp1->whoTo->find_pseudo_cumack = 1;
3297 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3298 				}
3299 			} else {/* CMT is OFF */
3300 
3301 #ifdef SCTP_FR_TO_ALTERNATE
3302 				/* Can we find an alternate? */
3303 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3304 #else
3305 				/*
3306 				 * default behavior is to NOT retransmit
3307 				 * FR's to an alternate. Armando Caro's
3308 				 * paper details why.
3309 				 */
3310 				alt = tp1->whoTo;
3311 #endif
3312 			}
3313 
3314 			tp1->rec.data.doing_fast_retransmit = 1;
3315 			tot_retrans++;
3316 			/* mark the sending seq for possible subsequent FR's */
3317 			/*
3318 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3319 			 * (uint32_t)tpi->rec.data.TSN_seq);
3320 			 */
3321 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3322 				/*
3323 				 * If the queue of send is empty then its
3324 				 * the next sequence number that will be
3325 				 * assigned so we subtract one from this to
3326 				 * get the one we last sent.
3327 				 */
3328 				tp1->rec.data.fast_retran_tsn = sending_seq;
3329 			} else {
3330 				/*
3331 				 * If there are chunks on the send queue
3332 				 * (unsent data that has made it from the
3333 				 * stream queues but not out the door, we
3334 				 * take the first one (which will have the
3335 				 * lowest TSN) and subtract one to get the
3336 				 * one we last sent.
3337 				 */
3338 				struct sctp_tmit_chunk *ttt;
3339 
3340 				ttt = TAILQ_FIRST(&asoc->send_queue);
3341 				tp1->rec.data.fast_retran_tsn =
3342 				    ttt->rec.data.TSN_seq;
3343 			}
3344 
3345 			if (tp1->do_rtt) {
3346 				/*
3347 				 * this guy had a RTO calculation pending on
3348 				 * it, cancel it
3349 				 */
3350 				if ((tp1->whoTo != NULL) &&
3351 				    (tp1->whoTo->rto_needed == 0)) {
3352 					tp1->whoTo->rto_needed = 1;
3353 				}
3354 				tp1->do_rtt = 0;
3355 			}
3356 			if (alt != tp1->whoTo) {
3357 				/* yes, there is an alternate. */
3358 				sctp_free_remote_addr(tp1->whoTo);
3359 				/* sa_ignore FREED_MEMORY */
3360 				tp1->whoTo = alt;
3361 				atomic_add_int(&alt->ref_count, 1);
3362 			}
3363 		}
3364 	}
3365 }
3366 
3367 struct sctp_tmit_chunk *
3368 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3369     struct sctp_association *asoc)
3370 {
3371 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3372 	struct timeval now;
3373 	int now_filled = 0;
3374 
3375 	if (asoc->prsctp_supported == 0) {
3376 		return (NULL);
3377 	}
3378 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3379 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3380 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3381 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3382 			/* no chance to advance, out of here */
3383 			break;
3384 		}
3385 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3386 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3387 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3388 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3389 				    asoc->advanced_peer_ack_point,
3390 				    tp1->rec.data.TSN_seq, 0, 0);
3391 			}
3392 		}
3393 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3394 			/*
3395 			 * We can't fwd-tsn past any that are reliable aka
3396 			 * retransmitted until the asoc fails.
3397 			 */
3398 			break;
3399 		}
3400 		if (!now_filled) {
3401 			(void)SCTP_GETTIME_TIMEVAL(&now);
3402 			now_filled = 1;
3403 		}
3404 		/*
3405 		 * now we got a chunk which is marked for another
3406 		 * retransmission to a PR-stream but has run out its chances
3407 		 * already maybe OR has been marked to skip now. Can we skip
3408 		 * it if its a resend?
3409 		 */
3410 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3411 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3412 			/*
3413 			 * Now is this one marked for resend and its time is
3414 			 * now up?
3415 			 */
3416 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3417 				/* Yes so drop it */
3418 				if (tp1->data) {
3419 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3420 					    1, SCTP_SO_NOT_LOCKED);
3421 				}
3422 			} else {
3423 				/*
3424 				 * No, we are done when hit one for resend
3425 				 * whos time as not expired.
3426 				 */
3427 				break;
3428 			}
3429 		}
3430 		/*
3431 		 * Ok now if this chunk is marked to drop it we can clean up
3432 		 * the chunk, advance our peer ack point and we can check
3433 		 * the next chunk.
3434 		 */
3435 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3436 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3437 			/* advance PeerAckPoint goes forward */
3438 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3439 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3440 				a_adv = tp1;
3441 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3442 				/* No update but we do save the chk */
3443 				a_adv = tp1;
3444 			}
3445 		} else {
3446 			/*
3447 			 * If it is still in RESEND we can advance no
3448 			 * further
3449 			 */
3450 			break;
3451 		}
3452 	}
3453 	return (a_adv);
3454 }
3455 
3456 static int
3457 sctp_fs_audit(struct sctp_association *asoc)
3458 {
3459 	struct sctp_tmit_chunk *chk;
3460 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3461 	int ret;
3462 
3463 #ifndef INVARIANTS
3464 	int entry_flight, entry_cnt;
3465 
3466 #endif
3467 
3468 	ret = 0;
3469 #ifndef INVARIANTS
3470 	entry_flight = asoc->total_flight;
3471 	entry_cnt = asoc->total_flight_count;
3472 #endif
3473 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3474 		return (0);
3475 
3476 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3477 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3478 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3479 			    chk->rec.data.TSN_seq,
3480 			    chk->send_size,
3481 			    chk->snd_count);
3482 			inflight++;
3483 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3484 			resend++;
3485 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3486 			inbetween++;
3487 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3488 			above++;
3489 		} else {
3490 			acked++;
3491 		}
3492 	}
3493 
3494 	if ((inflight > 0) || (inbetween > 0)) {
3495 #ifdef INVARIANTS
3496 		panic("Flight size-express incorrect? \n");
3497 #else
3498 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3499 		    entry_flight, entry_cnt);
3500 
3501 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3502 		    inflight, inbetween, resend, above, acked);
3503 		ret = 1;
3504 #endif
3505 	}
3506 	return (ret);
3507 }
3508 
3509 
3510 static void
3511 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3512     struct sctp_association *asoc,
3513     struct sctp_tmit_chunk *tp1)
3514 {
3515 	tp1->window_probe = 0;
3516 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3517 		/* TSN's skipped we do NOT move back. */
3518 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3519 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3520 		    tp1->book_size,
3521 		    (uintptr_t) tp1->whoTo,
3522 		    tp1->rec.data.TSN_seq);
3523 		return;
3524 	}
3525 	/* First setup this by shrinking flight */
3526 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3527 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3528 		    tp1);
3529 	}
3530 	sctp_flight_size_decrease(tp1);
3531 	sctp_total_flight_decrease(stcb, tp1);
3532 	/* Now mark for resend */
3533 	tp1->sent = SCTP_DATAGRAM_RESEND;
3534 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3535 
3536 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3537 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3538 		    tp1->whoTo->flight_size,
3539 		    tp1->book_size,
3540 		    (uintptr_t) tp1->whoTo,
3541 		    tp1->rec.data.TSN_seq);
3542 	}
3543 }
3544 
3545 void
3546 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3547     uint32_t rwnd, int *abort_now, int ecne_seen)
3548 {
3549 	struct sctp_nets *net;
3550 	struct sctp_association *asoc;
3551 	struct sctp_tmit_chunk *tp1, *tp2;
3552 	uint32_t old_rwnd;
3553 	int win_probe_recovery = 0;
3554 	int win_probe_recovered = 0;
3555 	int j, done_once = 0;
3556 	int rto_ok = 1;
3557 
3558 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3559 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3560 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3561 	}
3562 	SCTP_TCB_LOCK_ASSERT(stcb);
3563 #ifdef SCTP_ASOCLOG_OF_TSNS
3564 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3565 	stcb->asoc.cumack_log_at++;
3566 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3567 		stcb->asoc.cumack_log_at = 0;
3568 	}
3569 #endif
3570 	asoc = &stcb->asoc;
3571 	old_rwnd = asoc->peers_rwnd;
3572 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3573 		/* old ack */
3574 		return;
3575 	} else if (asoc->last_acked_seq == cumack) {
3576 		/* Window update sack */
3577 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3578 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3579 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3580 			/* SWS sender side engages */
3581 			asoc->peers_rwnd = 0;
3582 		}
3583 		if (asoc->peers_rwnd > old_rwnd) {
3584 			goto again;
3585 		}
3586 		return;
3587 	}
3588 	/* First setup for CC stuff */
3589 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3590 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3591 			/* Drag along the window_tsn for cwr's */
3592 			net->cwr_window_tsn = cumack;
3593 		}
3594 		net->prev_cwnd = net->cwnd;
3595 		net->net_ack = 0;
3596 		net->net_ack2 = 0;
3597 
3598 		/*
3599 		 * CMT: Reset CUC and Fast recovery algo variables before
3600 		 * SACK processing
3601 		 */
3602 		net->new_pseudo_cumack = 0;
3603 		net->will_exit_fast_recovery = 0;
3604 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3605 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3606 		}
3607 	}
3608 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3609 		uint32_t send_s;
3610 
3611 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3612 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3613 			    sctpchunk_listhead);
3614 			send_s = tp1->rec.data.TSN_seq + 1;
3615 		} else {
3616 			send_s = asoc->sending_seq;
3617 		}
3618 		if (SCTP_TSN_GE(cumack, send_s)) {
3619 			struct mbuf *op_err;
3620 			char msg[SCTP_DIAG_INFO_LEN];
3621 
3622 			*abort_now = 1;
3623 			/* XXX */
3624 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3625 			    cumack, send_s);
3626 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3627 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3628 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3629 			return;
3630 		}
3631 	}
3632 	asoc->this_sack_highest_gap = cumack;
3633 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3634 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3635 		    stcb->asoc.overall_error_count,
3636 		    0,
3637 		    SCTP_FROM_SCTP_INDATA,
3638 		    __LINE__);
3639 	}
3640 	stcb->asoc.overall_error_count = 0;
3641 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3642 		/* process the new consecutive TSN first */
3643 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3644 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3645 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3646 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3647 				}
3648 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3649 					/*
3650 					 * If it is less than ACKED, it is
3651 					 * now no-longer in flight. Higher
3652 					 * values may occur during marking
3653 					 */
3654 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3655 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3656 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3657 							    tp1->whoTo->flight_size,
3658 							    tp1->book_size,
3659 							    (uintptr_t) tp1->whoTo,
3660 							    tp1->rec.data.TSN_seq);
3661 						}
3662 						sctp_flight_size_decrease(tp1);
3663 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3664 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3665 							    tp1);
3666 						}
3667 						/* sa_ignore NO_NULL_CHK */
3668 						sctp_total_flight_decrease(stcb, tp1);
3669 					}
3670 					tp1->whoTo->net_ack += tp1->send_size;
3671 					if (tp1->snd_count < 2) {
3672 						/*
3673 						 * True non-retransmited
3674 						 * chunk
3675 						 */
3676 						tp1->whoTo->net_ack2 +=
3677 						    tp1->send_size;
3678 
3679 						/* update RTO too? */
3680 						if (tp1->do_rtt) {
3681 							if (rto_ok) {
3682 								tp1->whoTo->RTO =
3683 								/*
3684 								 * sa_ignore
3685 								 * NO_NULL_CH
3686 								 * K
3687 								 */
3688 								    sctp_calculate_rto(stcb,
3689 								    asoc, tp1->whoTo,
3690 								    &tp1->sent_rcv_time,
3691 								    sctp_align_safe_nocopy,
3692 								    SCTP_RTT_FROM_DATA);
3693 								rto_ok = 0;
3694 							}
3695 							if (tp1->whoTo->rto_needed == 0) {
3696 								tp1->whoTo->rto_needed = 1;
3697 							}
3698 							tp1->do_rtt = 0;
3699 						}
3700 					}
3701 					/*
3702 					 * CMT: CUCv2 algorithm. From the
3703 					 * cumack'd TSNs, for each TSN being
3704 					 * acked for the first time, set the
3705 					 * following variables for the
3706 					 * corresp destination.
3707 					 * new_pseudo_cumack will trigger a
3708 					 * cwnd update.
3709 					 * find_(rtx_)pseudo_cumack will
3710 					 * trigger search for the next
3711 					 * expected (rtx-)pseudo-cumack.
3712 					 */
3713 					tp1->whoTo->new_pseudo_cumack = 1;
3714 					tp1->whoTo->find_pseudo_cumack = 1;
3715 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3716 
3717 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3718 						/* sa_ignore NO_NULL_CHK */
3719 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3720 					}
3721 				}
3722 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3723 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3724 				}
3725 				if (tp1->rec.data.chunk_was_revoked) {
3726 					/* deflate the cwnd */
3727 					tp1->whoTo->cwnd -= tp1->book_size;
3728 					tp1->rec.data.chunk_was_revoked = 0;
3729 				}
3730 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3731 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3732 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3733 #ifdef INVARIANTS
3734 					} else {
3735 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3736 #endif
3737 					}
3738 				}
3739 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3740 				if (tp1->data) {
3741 					/* sa_ignore NO_NULL_CHK */
3742 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3743 					sctp_m_freem(tp1->data);
3744 					tp1->data = NULL;
3745 				}
3746 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3747 					sctp_log_sack(asoc->last_acked_seq,
3748 					    cumack,
3749 					    tp1->rec.data.TSN_seq,
3750 					    0,
3751 					    0,
3752 					    SCTP_LOG_FREE_SENT);
3753 				}
3754 				asoc->sent_queue_cnt--;
3755 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3756 			} else {
3757 				break;
3758 			}
3759 		}
3760 
3761 	}
3762 	/* sa_ignore NO_NULL_CHK */
3763 	if (stcb->sctp_socket) {
3764 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3765 		struct socket *so;
3766 
3767 #endif
3768 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3769 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3770 			/* sa_ignore NO_NULL_CHK */
3771 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3772 		}
3773 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3774 		so = SCTP_INP_SO(stcb->sctp_ep);
3775 		atomic_add_int(&stcb->asoc.refcnt, 1);
3776 		SCTP_TCB_UNLOCK(stcb);
3777 		SCTP_SOCKET_LOCK(so, 1);
3778 		SCTP_TCB_LOCK(stcb);
3779 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3780 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3781 			/* assoc was freed while we were unlocked */
3782 			SCTP_SOCKET_UNLOCK(so, 1);
3783 			return;
3784 		}
3785 #endif
3786 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3787 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3788 		SCTP_SOCKET_UNLOCK(so, 1);
3789 #endif
3790 	} else {
3791 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3792 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3793 		}
3794 	}
3795 
3796 	/* JRS - Use the congestion control given in the CC module */
3797 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3798 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3799 			if (net->net_ack2 > 0) {
3800 				/*
3801 				 * Karn's rule applies to clearing error
3802 				 * count, this is optional.
3803 				 */
3804 				net->error_count = 0;
3805 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3806 					/* addr came good */
3807 					net->dest_state |= SCTP_ADDR_REACHABLE;
3808 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3809 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3810 				}
3811 				if (net == stcb->asoc.primary_destination) {
3812 					if (stcb->asoc.alternate) {
3813 						/*
3814 						 * release the alternate,
3815 						 * primary is good
3816 						 */
3817 						sctp_free_remote_addr(stcb->asoc.alternate);
3818 						stcb->asoc.alternate = NULL;
3819 					}
3820 				}
3821 				if (net->dest_state & SCTP_ADDR_PF) {
3822 					net->dest_state &= ~SCTP_ADDR_PF;
3823 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3824 					    stcb->sctp_ep, stcb, net,
3825 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3826 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3827 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3828 					/* Done with this net */
3829 					net->net_ack = 0;
3830 				}
3831 				/* restore any doubled timers */
3832 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3833 				if (net->RTO < stcb->asoc.minrto) {
3834 					net->RTO = stcb->asoc.minrto;
3835 				}
3836 				if (net->RTO > stcb->asoc.maxrto) {
3837 					net->RTO = stcb->asoc.maxrto;
3838 				}
3839 			}
3840 		}
3841 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3842 	}
3843 	asoc->last_acked_seq = cumack;
3844 
3845 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3846 		/* nothing left in-flight */
3847 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3848 			net->flight_size = 0;
3849 			net->partial_bytes_acked = 0;
3850 		}
3851 		asoc->total_flight = 0;
3852 		asoc->total_flight_count = 0;
3853 	}
3854 	/* RWND update */
3855 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3856 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3857 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3858 		/* SWS sender side engages */
3859 		asoc->peers_rwnd = 0;
3860 	}
3861 	if (asoc->peers_rwnd > old_rwnd) {
3862 		win_probe_recovery = 1;
3863 	}
3864 	/* Now assure a timer where data is queued at */
3865 again:
3866 	j = 0;
3867 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3868 		int to_ticks;
3869 
3870 		if (win_probe_recovery && (net->window_probe)) {
3871 			win_probe_recovered = 1;
3872 			/*
3873 			 * Find first chunk that was used with window probe
3874 			 * and clear the sent
3875 			 */
3876 			/* sa_ignore FREED_MEMORY */
3877 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3878 				if (tp1->window_probe) {
3879 					/* move back to data send queue */
3880 					sctp_window_probe_recovery(stcb, asoc, tp1);
3881 					break;
3882 				}
3883 			}
3884 		}
3885 		if (net->RTO == 0) {
3886 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3887 		} else {
3888 			to_ticks = MSEC_TO_TICKS(net->RTO);
3889 		}
3890 		if (net->flight_size) {
3891 			j++;
3892 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3893 			    sctp_timeout_handler, &net->rxt_timer);
3894 			if (net->window_probe) {
3895 				net->window_probe = 0;
3896 			}
3897 		} else {
3898 			if (net->window_probe) {
3899 				/*
3900 				 * In window probes we must assure a timer
3901 				 * is still running there
3902 				 */
3903 				net->window_probe = 0;
3904 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3905 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3906 					    sctp_timeout_handler, &net->rxt_timer);
3907 				}
3908 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3909 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3910 				    stcb, net,
3911 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3912 			}
3913 		}
3914 	}
3915 	if ((j == 0) &&
3916 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3917 	    (asoc->sent_queue_retran_cnt == 0) &&
3918 	    (win_probe_recovered == 0) &&
3919 	    (done_once == 0)) {
3920 		/*
3921 		 * huh, this should not happen unless all packets are
3922 		 * PR-SCTP and marked to skip of course.
3923 		 */
3924 		if (sctp_fs_audit(asoc)) {
3925 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3926 				net->flight_size = 0;
3927 			}
3928 			asoc->total_flight = 0;
3929 			asoc->total_flight_count = 0;
3930 			asoc->sent_queue_retran_cnt = 0;
3931 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3932 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3933 					sctp_flight_size_increase(tp1);
3934 					sctp_total_flight_increase(stcb, tp1);
3935 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3936 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3937 				}
3938 			}
3939 		}
3940 		done_once = 1;
3941 		goto again;
3942 	}
3943 	/**********************************/
3944 	/* Now what about shutdown issues */
3945 	/**********************************/
3946 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3947 		/* nothing left on sendqueue.. consider done */
3948 		/* clean up */
3949 		if ((asoc->stream_queue_cnt == 1) &&
3950 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3951 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3952 		    (asoc->locked_on_sending)
3953 		    ) {
3954 			struct sctp_stream_queue_pending *sp;
3955 
3956 			/*
3957 			 * I may be in a state where we got all across.. but
3958 			 * cannot write more due to a shutdown... we abort
3959 			 * since the user did not indicate EOR in this case.
3960 			 * The sp will be cleaned during free of the asoc.
3961 			 */
3962 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3963 			    sctp_streamhead);
3964 			if ((sp) && (sp->length == 0)) {
3965 				/* Let cleanup code purge it */
3966 				if (sp->msg_is_complete) {
3967 					asoc->stream_queue_cnt--;
3968 				} else {
3969 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3970 					asoc->locked_on_sending = NULL;
3971 					asoc->stream_queue_cnt--;
3972 				}
3973 			}
3974 		}
3975 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3976 		    (asoc->stream_queue_cnt == 0)) {
3977 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3978 				/* Need to abort here */
3979 				struct mbuf *op_err;
3980 
3981 		abort_out_now:
3982 				*abort_now = 1;
3983 				/* XXX */
3984 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3985 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
3986 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3987 			} else {
3988 				struct sctp_nets *netp;
3989 
3990 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3991 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3992 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3993 				}
3994 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3995 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3996 				sctp_stop_timers_for_shutdown(stcb);
3997 				if (asoc->alternate) {
3998 					netp = asoc->alternate;
3999 				} else {
4000 					netp = asoc->primary_destination;
4001 				}
4002 				sctp_send_shutdown(stcb, netp);
4003 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4004 				    stcb->sctp_ep, stcb, netp);
4005 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4006 				    stcb->sctp_ep, stcb, netp);
4007 			}
4008 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4009 		    (asoc->stream_queue_cnt == 0)) {
4010 			struct sctp_nets *netp;
4011 
4012 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4013 				goto abort_out_now;
4014 			}
4015 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4016 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4017 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4018 			sctp_stop_timers_for_shutdown(stcb);
4019 			if (asoc->alternate) {
4020 				netp = asoc->alternate;
4021 			} else {
4022 				netp = asoc->primary_destination;
4023 			}
4024 			sctp_send_shutdown_ack(stcb, netp);
4025 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4026 			    stcb->sctp_ep, stcb, netp);
4027 		}
4028 	}
4029 	/*********************************************/
4030 	/* Here we perform PR-SCTP procedures        */
4031 	/* (section 4.2)                             */
4032 	/*********************************************/
4033 	/* C1. update advancedPeerAckPoint */
4034 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4035 		asoc->advanced_peer_ack_point = cumack;
4036 	}
4037 	/* PR-Sctp issues need to be addressed too */
4038 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4039 		struct sctp_tmit_chunk *lchk;
4040 		uint32_t old_adv_peer_ack_point;
4041 
4042 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4043 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4044 		/* C3. See if we need to send a Fwd-TSN */
4045 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4046 			/*
4047 			 * ISSUE with ECN, see FWD-TSN processing.
4048 			 */
4049 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4050 				send_forward_tsn(stcb, asoc);
4051 			} else if (lchk) {
4052 				/* try to FR fwd-tsn's that get lost too */
4053 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4054 					send_forward_tsn(stcb, asoc);
4055 				}
4056 			}
4057 		}
4058 		if (lchk) {
4059 			/* Assure a timer is up */
4060 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4061 			    stcb->sctp_ep, stcb, lchk->whoTo);
4062 		}
4063 	}
4064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4065 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4066 		    rwnd,
4067 		    stcb->asoc.peers_rwnd,
4068 		    stcb->asoc.total_flight,
4069 		    stcb->asoc.total_output_queue_size);
4070 	}
4071 }
4072 
4073 void
4074 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4075     struct sctp_tcb *stcb,
4076     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4077     int *abort_now, uint8_t flags,
4078     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4079 {
4080 	struct sctp_association *asoc;
4081 	struct sctp_tmit_chunk *tp1, *tp2;
4082 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4083 	uint16_t wake_him = 0;
4084 	uint32_t send_s = 0;
4085 	long j;
4086 	int accum_moved = 0;
4087 	int will_exit_fast_recovery = 0;
4088 	uint32_t a_rwnd, old_rwnd;
4089 	int win_probe_recovery = 0;
4090 	int win_probe_recovered = 0;
4091 	struct sctp_nets *net = NULL;
4092 	int done_once;
4093 	int rto_ok = 1;
4094 	uint8_t reneged_all = 0;
4095 	uint8_t cmt_dac_flag;
4096 
4097 	/*
4098 	 * we take any chance we can to service our queues since we cannot
4099 	 * get awoken when the socket is read from :<
4100 	 */
4101 	/*
4102 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4103 	 * old sack, if so discard. 2) If there is nothing left in the send
4104 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4105 	 * too, update any rwnd change and verify no timers are running.
4106 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4107 	 * moved process these first and note that it moved. 4) Process any
4108 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4109 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4110 	 * sync up flightsizes and things, stop all timers and also check
4111 	 * for shutdown_pending state. If so then go ahead and send off the
4112 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4113 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4114 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4115 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4116 	 * if in shutdown_recv state.
4117 	 */
4118 	SCTP_TCB_LOCK_ASSERT(stcb);
4119 	/* CMT DAC algo */
4120 	this_sack_lowest_newack = 0;
4121 	SCTP_STAT_INCR(sctps_slowpath_sack);
4122 	last_tsn = cum_ack;
4123 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4124 #ifdef SCTP_ASOCLOG_OF_TSNS
4125 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4126 	stcb->asoc.cumack_log_at++;
4127 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4128 		stcb->asoc.cumack_log_at = 0;
4129 	}
4130 #endif
4131 	a_rwnd = rwnd;
4132 
4133 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4134 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4135 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4136 	}
4137 	old_rwnd = stcb->asoc.peers_rwnd;
4138 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4139 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4140 		    stcb->asoc.overall_error_count,
4141 		    0,
4142 		    SCTP_FROM_SCTP_INDATA,
4143 		    __LINE__);
4144 	}
4145 	stcb->asoc.overall_error_count = 0;
4146 	asoc = &stcb->asoc;
4147 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4148 		sctp_log_sack(asoc->last_acked_seq,
4149 		    cum_ack,
4150 		    0,
4151 		    num_seg,
4152 		    num_dup,
4153 		    SCTP_LOG_NEW_SACK);
4154 	}
4155 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4156 		uint16_t i;
4157 		uint32_t *dupdata, dblock;
4158 
4159 		for (i = 0; i < num_dup; i++) {
4160 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4161 			    sizeof(uint32_t), (uint8_t *) & dblock);
4162 			if (dupdata == NULL) {
4163 				break;
4164 			}
4165 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4166 		}
4167 	}
4168 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4169 		/* reality check */
4170 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4171 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4172 			    sctpchunk_listhead);
4173 			send_s = tp1->rec.data.TSN_seq + 1;
4174 		} else {
4175 			tp1 = NULL;
4176 			send_s = asoc->sending_seq;
4177 		}
4178 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4179 			struct mbuf *op_err;
4180 			char msg[SCTP_DIAG_INFO_LEN];
4181 
4182 			/*
4183 			 * no way, we have not even sent this TSN out yet.
4184 			 * Peer is hopelessly messed up with us.
4185 			 */
4186 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4187 			    cum_ack, send_s);
4188 			if (tp1) {
4189 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4190 				    tp1->rec.data.TSN_seq, (void *)tp1);
4191 			}
4192 	hopeless_peer:
4193 			*abort_now = 1;
4194 			/* XXX */
4195 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4196 			    cum_ack, send_s);
4197 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4198 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4199 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4200 			return;
4201 		}
4202 	}
4203 	/**********************/
4204 	/* 1) check the range */
4205 	/**********************/
4206 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4207 		/* acking something behind */
4208 		return;
4209 	}
4210 	/* update the Rwnd of the peer */
4211 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4212 	    TAILQ_EMPTY(&asoc->send_queue) &&
4213 	    (asoc->stream_queue_cnt == 0)) {
4214 		/* nothing left on send/sent and strmq */
4215 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4216 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4217 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4218 		}
4219 		asoc->peers_rwnd = a_rwnd;
4220 		if (asoc->sent_queue_retran_cnt) {
4221 			asoc->sent_queue_retran_cnt = 0;
4222 		}
4223 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4224 			/* SWS sender side engages */
4225 			asoc->peers_rwnd = 0;
4226 		}
4227 		/* stop any timers */
4228 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4229 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4230 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4231 			net->partial_bytes_acked = 0;
4232 			net->flight_size = 0;
4233 		}
4234 		asoc->total_flight = 0;
4235 		asoc->total_flight_count = 0;
4236 		return;
4237 	}
4238 	/*
4239 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4240 	 * things. The total byte count acked is tracked in netAckSz AND
4241 	 * netAck2 is used to track the total bytes acked that are un-
4242 	 * amibguious and were never retransmitted. We track these on a per
4243 	 * destination address basis.
4244 	 */
4245 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4246 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4247 			/* Drag along the window_tsn for cwr's */
4248 			net->cwr_window_tsn = cum_ack;
4249 		}
4250 		net->prev_cwnd = net->cwnd;
4251 		net->net_ack = 0;
4252 		net->net_ack2 = 0;
4253 
4254 		/*
4255 		 * CMT: Reset CUC and Fast recovery algo variables before
4256 		 * SACK processing
4257 		 */
4258 		net->new_pseudo_cumack = 0;
4259 		net->will_exit_fast_recovery = 0;
4260 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4261 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4262 		}
4263 	}
4264 	/* process the new consecutive TSN first */
4265 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4266 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4267 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4268 				accum_moved = 1;
4269 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4270 					/*
4271 					 * If it is less than ACKED, it is
4272 					 * now no-longer in flight. Higher
4273 					 * values may occur during marking
4274 					 */
4275 					if ((tp1->whoTo->dest_state &
4276 					    SCTP_ADDR_UNCONFIRMED) &&
4277 					    (tp1->snd_count < 2)) {
4278 						/*
4279 						 * If there was no retran
4280 						 * and the address is
4281 						 * un-confirmed and we sent
4282 						 * there and are now
4283 						 * sacked.. its confirmed,
4284 						 * mark it so.
4285 						 */
4286 						tp1->whoTo->dest_state &=
4287 						    ~SCTP_ADDR_UNCONFIRMED;
4288 					}
4289 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4290 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4291 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4292 							    tp1->whoTo->flight_size,
4293 							    tp1->book_size,
4294 							    (uintptr_t) tp1->whoTo,
4295 							    tp1->rec.data.TSN_seq);
4296 						}
4297 						sctp_flight_size_decrease(tp1);
4298 						sctp_total_flight_decrease(stcb, tp1);
4299 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4300 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4301 							    tp1);
4302 						}
4303 					}
4304 					tp1->whoTo->net_ack += tp1->send_size;
4305 
4306 					/* CMT SFR and DAC algos */
4307 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4308 					tp1->whoTo->saw_newack = 1;
4309 
4310 					if (tp1->snd_count < 2) {
4311 						/*
4312 						 * True non-retransmited
4313 						 * chunk
4314 						 */
4315 						tp1->whoTo->net_ack2 +=
4316 						    tp1->send_size;
4317 
4318 						/* update RTO too? */
4319 						if (tp1->do_rtt) {
4320 							if (rto_ok) {
4321 								tp1->whoTo->RTO =
4322 								    sctp_calculate_rto(stcb,
4323 								    asoc, tp1->whoTo,
4324 								    &tp1->sent_rcv_time,
4325 								    sctp_align_safe_nocopy,
4326 								    SCTP_RTT_FROM_DATA);
4327 								rto_ok = 0;
4328 							}
4329 							if (tp1->whoTo->rto_needed == 0) {
4330 								tp1->whoTo->rto_needed = 1;
4331 							}
4332 							tp1->do_rtt = 0;
4333 						}
4334 					}
4335 					/*
4336 					 * CMT: CUCv2 algorithm. From the
4337 					 * cumack'd TSNs, for each TSN being
4338 					 * acked for the first time, set the
4339 					 * following variables for the
4340 					 * corresp destination.
4341 					 * new_pseudo_cumack will trigger a
4342 					 * cwnd update.
4343 					 * find_(rtx_)pseudo_cumack will
4344 					 * trigger search for the next
4345 					 * expected (rtx-)pseudo-cumack.
4346 					 */
4347 					tp1->whoTo->new_pseudo_cumack = 1;
4348 					tp1->whoTo->find_pseudo_cumack = 1;
4349 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4350 
4351 
4352 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4353 						sctp_log_sack(asoc->last_acked_seq,
4354 						    cum_ack,
4355 						    tp1->rec.data.TSN_seq,
4356 						    0,
4357 						    0,
4358 						    SCTP_LOG_TSN_ACKED);
4359 					}
4360 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4361 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4362 					}
4363 				}
4364 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4365 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4366 #ifdef SCTP_AUDITING_ENABLED
4367 					sctp_audit_log(0xB3,
4368 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4369 #endif
4370 				}
4371 				if (tp1->rec.data.chunk_was_revoked) {
4372 					/* deflate the cwnd */
4373 					tp1->whoTo->cwnd -= tp1->book_size;
4374 					tp1->rec.data.chunk_was_revoked = 0;
4375 				}
4376 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4377 					tp1->sent = SCTP_DATAGRAM_ACKED;
4378 				}
4379 			}
4380 		} else {
4381 			break;
4382 		}
4383 	}
4384 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4385 	/* always set this up to cum-ack */
4386 	asoc->this_sack_highest_gap = last_tsn;
4387 
4388 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4389 
4390 		/*
4391 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4392 		 * to be greater than the cumack. Also reset saw_newack to 0
4393 		 * for all dests.
4394 		 */
4395 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4396 			net->saw_newack = 0;
4397 			net->this_sack_highest_newack = last_tsn;
4398 		}
4399 
4400 		/*
4401 		 * thisSackHighestGap will increase while handling NEW
4402 		 * segments this_sack_highest_newack will increase while
4403 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4404 		 * used for CMT DAC algo. saw_newack will also change.
4405 		 */
4406 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4407 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4408 		    num_seg, num_nr_seg, &rto_ok)) {
4409 			wake_him++;
4410 		}
4411 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4412 			/*
4413 			 * validate the biggest_tsn_acked in the gap acks if
4414 			 * strict adherence is wanted.
4415 			 */
4416 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4417 				/*
4418 				 * peer is either confused or we are under
4419 				 * attack. We must abort.
4420 				 */
4421 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4422 				    biggest_tsn_acked, send_s);
4423 				goto hopeless_peer;
4424 			}
4425 		}
4426 	}
4427 	/*******************************************/
4428 	/* cancel ALL T3-send timer if accum moved */
4429 	/*******************************************/
4430 	if (asoc->sctp_cmt_on_off > 0) {
4431 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4432 			if (net->new_pseudo_cumack)
4433 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4434 				    stcb, net,
4435 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4436 
4437 		}
4438 	} else {
4439 		if (accum_moved) {
4440 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4441 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4442 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4443 			}
4444 		}
4445 	}
4446 	/********************************************/
4447 	/* drop the acked chunks from the sentqueue */
4448 	/********************************************/
4449 	asoc->last_acked_seq = cum_ack;
4450 
4451 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4452 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4453 			break;
4454 		}
4455 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4456 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4457 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4458 #ifdef INVARIANTS
4459 			} else {
4460 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4461 #endif
4462 			}
4463 		}
4464 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4465 		if (PR_SCTP_ENABLED(tp1->flags)) {
4466 			if (asoc->pr_sctp_cnt != 0)
4467 				asoc->pr_sctp_cnt--;
4468 		}
4469 		asoc->sent_queue_cnt--;
4470 		if (tp1->data) {
4471 			/* sa_ignore NO_NULL_CHK */
4472 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4473 			sctp_m_freem(tp1->data);
4474 			tp1->data = NULL;
4475 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4476 				asoc->sent_queue_cnt_removeable--;
4477 			}
4478 		}
4479 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4480 			sctp_log_sack(asoc->last_acked_seq,
4481 			    cum_ack,
4482 			    tp1->rec.data.TSN_seq,
4483 			    0,
4484 			    0,
4485 			    SCTP_LOG_FREE_SENT);
4486 		}
4487 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4488 		wake_him++;
4489 	}
4490 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4491 #ifdef INVARIANTS
4492 		panic("Warning flight size is postive and should be 0");
4493 #else
4494 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4495 		    asoc->total_flight);
4496 #endif
4497 		asoc->total_flight = 0;
4498 	}
4499 	/* sa_ignore NO_NULL_CHK */
4500 	if ((wake_him) && (stcb->sctp_socket)) {
4501 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4502 		struct socket *so;
4503 
4504 #endif
4505 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4506 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4507 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4508 		}
4509 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4510 		so = SCTP_INP_SO(stcb->sctp_ep);
4511 		atomic_add_int(&stcb->asoc.refcnt, 1);
4512 		SCTP_TCB_UNLOCK(stcb);
4513 		SCTP_SOCKET_LOCK(so, 1);
4514 		SCTP_TCB_LOCK(stcb);
4515 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4516 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4517 			/* assoc was freed while we were unlocked */
4518 			SCTP_SOCKET_UNLOCK(so, 1);
4519 			return;
4520 		}
4521 #endif
4522 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4523 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4524 		SCTP_SOCKET_UNLOCK(so, 1);
4525 #endif
4526 	} else {
4527 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4528 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4529 		}
4530 	}
4531 
4532 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4533 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4534 			/* Setup so we will exit RFC2582 fast recovery */
4535 			will_exit_fast_recovery = 1;
4536 		}
4537 	}
4538 	/*
4539 	 * Check for revoked fragments:
4540 	 *
4541 	 * if Previous sack - Had no frags then we can't have any revoked if
4542 	 * Previous sack - Had frag's then - If we now have frags aka
4543 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4544 	 * some of them. else - The peer revoked all ACKED fragments, since
4545 	 * we had some before and now we have NONE.
4546 	 */
4547 
4548 	if (num_seg) {
4549 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4550 		asoc->saw_sack_with_frags = 1;
4551 	} else if (asoc->saw_sack_with_frags) {
4552 		int cnt_revoked = 0;
4553 
4554 		/* Peer revoked all dg's marked or acked */
4555 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4556 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4557 				tp1->sent = SCTP_DATAGRAM_SENT;
4558 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4559 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4560 					    tp1->whoTo->flight_size,
4561 					    tp1->book_size,
4562 					    (uintptr_t) tp1->whoTo,
4563 					    tp1->rec.data.TSN_seq);
4564 				}
4565 				sctp_flight_size_increase(tp1);
4566 				sctp_total_flight_increase(stcb, tp1);
4567 				tp1->rec.data.chunk_was_revoked = 1;
4568 				/*
4569 				 * To ensure that this increase in
4570 				 * flightsize, which is artificial, does not
4571 				 * throttle the sender, we also increase the
4572 				 * cwnd artificially.
4573 				 */
4574 				tp1->whoTo->cwnd += tp1->book_size;
4575 				cnt_revoked++;
4576 			}
4577 		}
4578 		if (cnt_revoked) {
4579 			reneged_all = 1;
4580 		}
4581 		asoc->saw_sack_with_frags = 0;
4582 	}
4583 	if (num_nr_seg > 0)
4584 		asoc->saw_sack_with_nr_frags = 1;
4585 	else
4586 		asoc->saw_sack_with_nr_frags = 0;
4587 
4588 	/* JRS - Use the congestion control given in the CC module */
4589 	if (ecne_seen == 0) {
4590 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4591 			if (net->net_ack2 > 0) {
4592 				/*
4593 				 * Karn's rule applies to clearing error
4594 				 * count, this is optional.
4595 				 */
4596 				net->error_count = 0;
4597 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4598 					/* addr came good */
4599 					net->dest_state |= SCTP_ADDR_REACHABLE;
4600 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4601 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4602 				}
4603 				if (net == stcb->asoc.primary_destination) {
4604 					if (stcb->asoc.alternate) {
4605 						/*
4606 						 * release the alternate,
4607 						 * primary is good
4608 						 */
4609 						sctp_free_remote_addr(stcb->asoc.alternate);
4610 						stcb->asoc.alternate = NULL;
4611 					}
4612 				}
4613 				if (net->dest_state & SCTP_ADDR_PF) {
4614 					net->dest_state &= ~SCTP_ADDR_PF;
4615 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4616 					    stcb->sctp_ep, stcb, net,
4617 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4618 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4619 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4620 					/* Done with this net */
4621 					net->net_ack = 0;
4622 				}
4623 				/* restore any doubled timers */
4624 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4625 				if (net->RTO < stcb->asoc.minrto) {
4626 					net->RTO = stcb->asoc.minrto;
4627 				}
4628 				if (net->RTO > stcb->asoc.maxrto) {
4629 					net->RTO = stcb->asoc.maxrto;
4630 				}
4631 			}
4632 		}
4633 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4634 	}
4635 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4636 		/* nothing left in-flight */
4637 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 			/* stop all timers */
4639 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4640 			    stcb, net,
4641 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4642 			net->flight_size = 0;
4643 			net->partial_bytes_acked = 0;
4644 		}
4645 		asoc->total_flight = 0;
4646 		asoc->total_flight_count = 0;
4647 	}
4648 	/**********************************/
4649 	/* Now what about shutdown issues */
4650 	/**********************************/
4651 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4652 		/* nothing left on sendqueue.. consider done */
4653 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4654 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4655 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4656 		}
4657 		asoc->peers_rwnd = a_rwnd;
4658 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4659 			/* SWS sender side engages */
4660 			asoc->peers_rwnd = 0;
4661 		}
4662 		/* clean up */
4663 		if ((asoc->stream_queue_cnt == 1) &&
4664 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4665 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4666 		    (asoc->locked_on_sending)
4667 		    ) {
4668 			struct sctp_stream_queue_pending *sp;
4669 
4670 			/*
4671 			 * I may be in a state where we got all across.. but
4672 			 * cannot write more due to a shutdown... we abort
4673 			 * since the user did not indicate EOR in this case.
4674 			 */
4675 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4676 			    sctp_streamhead);
4677 			if ((sp) && (sp->length == 0)) {
4678 				asoc->locked_on_sending = NULL;
4679 				if (sp->msg_is_complete) {
4680 					asoc->stream_queue_cnt--;
4681 				} else {
4682 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4683 					asoc->stream_queue_cnt--;
4684 				}
4685 			}
4686 		}
4687 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4688 		    (asoc->stream_queue_cnt == 0)) {
4689 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4690 				/* Need to abort here */
4691 				struct mbuf *op_err;
4692 
4693 		abort_out_now:
4694 				*abort_now = 1;
4695 				/* XXX */
4696 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4697 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4698 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4699 				return;
4700 			} else {
4701 				struct sctp_nets *netp;
4702 
4703 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4704 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4705 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4706 				}
4707 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4708 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4709 				sctp_stop_timers_for_shutdown(stcb);
4710 				if (asoc->alternate) {
4711 					netp = asoc->alternate;
4712 				} else {
4713 					netp = asoc->primary_destination;
4714 				}
4715 				sctp_send_shutdown(stcb, netp);
4716 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4717 				    stcb->sctp_ep, stcb, netp);
4718 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4719 				    stcb->sctp_ep, stcb, netp);
4720 			}
4721 			return;
4722 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4723 		    (asoc->stream_queue_cnt == 0)) {
4724 			struct sctp_nets *netp;
4725 
4726 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4727 				goto abort_out_now;
4728 			}
4729 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4730 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4731 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4732 			sctp_stop_timers_for_shutdown(stcb);
4733 			if (asoc->alternate) {
4734 				netp = asoc->alternate;
4735 			} else {
4736 				netp = asoc->primary_destination;
4737 			}
4738 			sctp_send_shutdown_ack(stcb, netp);
4739 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4740 			    stcb->sctp_ep, stcb, netp);
4741 			return;
4742 		}
4743 	}
4744 	/*
4745 	 * Now here we are going to recycle net_ack for a different use...
4746 	 * HEADS UP.
4747 	 */
4748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4749 		net->net_ack = 0;
4750 	}
4751 
4752 	/*
4753 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4754 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4755 	 * automatically ensure that.
4756 	 */
4757 	if ((asoc->sctp_cmt_on_off > 0) &&
4758 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4759 	    (cmt_dac_flag == 0)) {
4760 		this_sack_lowest_newack = cum_ack;
4761 	}
4762 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4763 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4764 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4765 	}
4766 	/* JRS - Use the congestion control given in the CC module */
4767 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4768 
4769 	/* Now are we exiting loss recovery ? */
4770 	if (will_exit_fast_recovery) {
4771 		/* Ok, we must exit fast recovery */
4772 		asoc->fast_retran_loss_recovery = 0;
4773 	}
4774 	if ((asoc->sat_t3_loss_recovery) &&
4775 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4776 		/* end satellite t3 loss recovery */
4777 		asoc->sat_t3_loss_recovery = 0;
4778 	}
4779 	/*
4780 	 * CMT Fast recovery
4781 	 */
4782 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4783 		if (net->will_exit_fast_recovery) {
4784 			/* Ok, we must exit fast recovery */
4785 			net->fast_retran_loss_recovery = 0;
4786 		}
4787 	}
4788 
4789 	/* Adjust and set the new rwnd value */
4790 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4791 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4792 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4793 	}
4794 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4795 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4796 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4797 		/* SWS sender side engages */
4798 		asoc->peers_rwnd = 0;
4799 	}
4800 	if (asoc->peers_rwnd > old_rwnd) {
4801 		win_probe_recovery = 1;
4802 	}
4803 	/*
4804 	 * Now we must setup so we have a timer up for anyone with
4805 	 * outstanding data.
4806 	 */
4807 	done_once = 0;
4808 again:
4809 	j = 0;
4810 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4811 		if (win_probe_recovery && (net->window_probe)) {
4812 			win_probe_recovered = 1;
4813 			/*-
4814 			 * Find first chunk that was used with
4815 			 * window probe and clear the event. Put
4816 			 * it back into the send queue as if has
4817 			 * not been sent.
4818 			 */
4819 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4820 				if (tp1->window_probe) {
4821 					sctp_window_probe_recovery(stcb, asoc, tp1);
4822 					break;
4823 				}
4824 			}
4825 		}
4826 		if (net->flight_size) {
4827 			j++;
4828 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4829 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4830 				    stcb->sctp_ep, stcb, net);
4831 			}
4832 			if (net->window_probe) {
4833 				net->window_probe = 0;
4834 			}
4835 		} else {
4836 			if (net->window_probe) {
4837 				/*
4838 				 * In window probes we must assure a timer
4839 				 * is still running there
4840 				 */
4841 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4842 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4843 					    stcb->sctp_ep, stcb, net);
4844 
4845 				}
4846 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4847 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4848 				    stcb, net,
4849 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4850 			}
4851 		}
4852 	}
4853 	if ((j == 0) &&
4854 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4855 	    (asoc->sent_queue_retran_cnt == 0) &&
4856 	    (win_probe_recovered == 0) &&
4857 	    (done_once == 0)) {
4858 		/*
4859 		 * huh, this should not happen unless all packets are
4860 		 * PR-SCTP and marked to skip of course.
4861 		 */
4862 		if (sctp_fs_audit(asoc)) {
4863 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4864 				net->flight_size = 0;
4865 			}
4866 			asoc->total_flight = 0;
4867 			asoc->total_flight_count = 0;
4868 			asoc->sent_queue_retran_cnt = 0;
4869 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4870 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4871 					sctp_flight_size_increase(tp1);
4872 					sctp_total_flight_increase(stcb, tp1);
4873 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4874 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4875 				}
4876 			}
4877 		}
4878 		done_once = 1;
4879 		goto again;
4880 	}
4881 	/*********************************************/
4882 	/* Here we perform PR-SCTP procedures        */
4883 	/* (section 4.2)                             */
4884 	/*********************************************/
4885 	/* C1. update advancedPeerAckPoint */
4886 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4887 		asoc->advanced_peer_ack_point = cum_ack;
4888 	}
4889 	/* C2. try to further move advancedPeerAckPoint ahead */
4890 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4891 		struct sctp_tmit_chunk *lchk;
4892 		uint32_t old_adv_peer_ack_point;
4893 
4894 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4895 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4896 		/* C3. See if we need to send a Fwd-TSN */
4897 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4898 			/*
4899 			 * ISSUE with ECN, see FWD-TSN processing.
4900 			 */
4901 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4902 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4903 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4904 				    old_adv_peer_ack_point);
4905 			}
4906 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4907 				send_forward_tsn(stcb, asoc);
4908 			} else if (lchk) {
4909 				/* try to FR fwd-tsn's that get lost too */
4910 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4911 					send_forward_tsn(stcb, asoc);
4912 				}
4913 			}
4914 		}
4915 		if (lchk) {
4916 			/* Assure a timer is up */
4917 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4918 			    stcb->sctp_ep, stcb, lchk->whoTo);
4919 		}
4920 	}
4921 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4922 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4923 		    a_rwnd,
4924 		    stcb->asoc.peers_rwnd,
4925 		    stcb->asoc.total_flight,
4926 		    stcb->asoc.total_output_queue_size);
4927 	}
4928 }
4929 
4930 void
4931 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4932 {
4933 	/* Copy cum-ack */
4934 	uint32_t cum_ack, a_rwnd;
4935 
4936 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4937 	/* Arrange so a_rwnd does NOT change */
4938 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4939 
4940 	/* Now call the express sack handling */
4941 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4942 }
4943 
4944 static void
4945 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4946     struct sctp_stream_in *strmin)
4947 {
4948 	struct sctp_queued_to_read *ctl, *nctl;
4949 	struct sctp_association *asoc;
4950 	uint16_t tt;
4951 
4952 	asoc = &stcb->asoc;
4953 	tt = strmin->last_sequence_delivered;
4954 	/*
4955 	 * First deliver anything prior to and including the stream no that
4956 	 * came in
4957 	 */
4958 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4959 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4960 			/* this is deliverable now */
4961 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4962 			/* subtract pending on streams */
4963 			asoc->size_on_all_streams -= ctl->length;
4964 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4965 			/* deliver it to at least the delivery-q */
4966 			if (stcb->sctp_socket) {
4967 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4968 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4969 				    ctl,
4970 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4971 			}
4972 		} else {
4973 			/* no more delivery now. */
4974 			break;
4975 		}
4976 	}
4977 	/*
4978 	 * now we must deliver things in queue the normal way  if any are
4979 	 * now ready.
4980 	 */
4981 	tt = strmin->last_sequence_delivered + 1;
4982 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4983 		if (tt == ctl->sinfo_ssn) {
4984 			/* this is deliverable now */
4985 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4986 			/* subtract pending on streams */
4987 			asoc->size_on_all_streams -= ctl->length;
4988 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4989 			/* deliver it to at least the delivery-q */
4990 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4991 			if (stcb->sctp_socket) {
4992 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4993 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4994 				    ctl,
4995 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4996 
4997 			}
4998 			tt = strmin->last_sequence_delivered + 1;
4999 		} else {
5000 			break;
5001 		}
5002 	}
5003 }
5004 
5005 static void
5006 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5007     struct sctp_association *asoc,
5008     uint16_t stream, uint16_t seq)
5009 {
5010 	struct sctp_tmit_chunk *chk, *nchk;
5011 
5012 	/* For each one on here see if we need to toss it */
5013 	/*
5014 	 * For now large messages held on the reasmqueue that are complete
5015 	 * will be tossed too. We could in theory do more work to spin
5016 	 * through and stop after dumping one msg aka seeing the start of a
5017 	 * new msg at the head, and call the delivery function... to see if
5018 	 * it can be delivered... But for now we just dump everything on the
5019 	 * queue.
5020 	 */
5021 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5022 		/*
5023 		 * Do not toss it if on a different stream or marked for
5024 		 * unordered delivery in which case the stream sequence
5025 		 * number has no meaning.
5026 		 */
5027 		if ((chk->rec.data.stream_number != stream) ||
5028 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5029 			continue;
5030 		}
5031 		if (chk->rec.data.stream_seq == seq) {
5032 			/* It needs to be tossed */
5033 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5034 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5035 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5036 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5037 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5038 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5039 			}
5040 			asoc->size_on_reasm_queue -= chk->send_size;
5041 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5042 
5043 			/* Clear up any stream problem */
5044 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5045 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5046 				/*
5047 				 * We must dump forward this streams
5048 				 * sequence number if the chunk is not
5049 				 * unordered that is being skipped. There is
5050 				 * a chance that if the peer does not
5051 				 * include the last fragment in its FWD-TSN
5052 				 * we WILL have a problem here since you
5053 				 * would have a partial chunk in queue that
5054 				 * may not be deliverable. Also if a Partial
5055 				 * delivery API as started the user may get
5056 				 * a partial chunk. The next read returning
5057 				 * a new chunk... really ugly but I see no
5058 				 * way around it! Maybe a notify??
5059 				 */
5060 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5061 			}
5062 			if (chk->data) {
5063 				sctp_m_freem(chk->data);
5064 				chk->data = NULL;
5065 			}
5066 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5067 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5068 			/*
5069 			 * If the stream_seq is > than the purging one, we
5070 			 * are done
5071 			 */
5072 			break;
5073 		}
5074 	}
5075 }
5076 
5077 
5078 void
5079 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5080     struct sctp_forward_tsn_chunk *fwd,
5081     int *abort_flag, struct mbuf *m, int offset)
5082 {
5083 	/* The pr-sctp fwd tsn */
5084 	/*
5085 	 * here we will perform all the data receiver side steps for
5086 	 * processing FwdTSN, as required in by pr-sctp draft:
5087 	 *
5088 	 * Assume we get FwdTSN(x):
5089 	 *
5090 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5091 	 * others we have 3) examine and update re-ordering queue on
5092 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5093 	 * report where we are.
5094 	 */
5095 	struct sctp_association *asoc;
5096 	uint32_t new_cum_tsn, gap;
5097 	unsigned int i, fwd_sz, m_size;
5098 	uint32_t str_seq;
5099 	struct sctp_stream_in *strm;
5100 	struct sctp_tmit_chunk *chk, *nchk;
5101 	struct sctp_queued_to_read *ctl, *sv;
5102 
5103 	asoc = &stcb->asoc;
5104 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5105 		SCTPDBG(SCTP_DEBUG_INDATA1,
5106 		    "Bad size too small/big fwd-tsn\n");
5107 		return;
5108 	}
5109 	m_size = (stcb->asoc.mapping_array_size << 3);
5110 	/*************************************************************/
5111 	/* 1. Here we update local cumTSN and shift the bitmap array */
5112 	/*************************************************************/
5113 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5114 
5115 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5116 		/* Already got there ... */
5117 		return;
5118 	}
5119 	/*
5120 	 * now we know the new TSN is more advanced, let's find the actual
5121 	 * gap
5122 	 */
5123 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5124 	asoc->cumulative_tsn = new_cum_tsn;
5125 	if (gap >= m_size) {
5126 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5127 			struct mbuf *op_err;
5128 			char msg[SCTP_DIAG_INFO_LEN];
5129 
5130 			/*
5131 			 * out of range (of single byte chunks in the rwnd I
5132 			 * give out). This must be an attacker.
5133 			 */
5134 			*abort_flag = 1;
5135 			snprintf(msg, sizeof(msg),
5136 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5137 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5138 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5139 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5140 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5141 			return;
5142 		}
5143 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5144 
5145 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5146 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5147 		asoc->highest_tsn_inside_map = new_cum_tsn;
5148 
5149 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5150 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5151 
5152 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5153 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5154 		}
5155 	} else {
5156 		SCTP_TCB_LOCK_ASSERT(stcb);
5157 		for (i = 0; i <= gap; i++) {
5158 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5159 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5160 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5161 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5162 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5163 				}
5164 			}
5165 		}
5166 	}
5167 	/*************************************************************/
5168 	/* 2. Clear up re-assembly queue                             */
5169 	/*************************************************************/
5170 	/*
5171 	 * First service it if pd-api is up, just in case we can progress it
5172 	 * forward
5173 	 */
5174 	if (asoc->fragmented_delivery_inprogress) {
5175 		sctp_service_reassembly(stcb, asoc);
5176 	}
5177 	/* For each one on here see if we need to toss it */
5178 	/*
5179 	 * For now large messages held on the reasmqueue that are complete
5180 	 * will be tossed too. We could in theory do more work to spin
5181 	 * through and stop after dumping one msg aka seeing the start of a
5182 	 * new msg at the head, and call the delivery function... to see if
5183 	 * it can be delivered... But for now we just dump everything on the
5184 	 * queue.
5185 	 */
5186 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5187 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5188 			/* It needs to be tossed */
5189 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5190 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5191 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5192 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5193 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5194 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5195 			}
5196 			asoc->size_on_reasm_queue -= chk->send_size;
5197 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5198 
5199 			/* Clear up any stream problem */
5200 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5201 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5202 				/*
5203 				 * We must dump forward this streams
5204 				 * sequence number if the chunk is not
5205 				 * unordered that is being skipped. There is
5206 				 * a chance that if the peer does not
5207 				 * include the last fragment in its FWD-TSN
5208 				 * we WILL have a problem here since you
5209 				 * would have a partial chunk in queue that
5210 				 * may not be deliverable. Also if a Partial
5211 				 * delivery API as started the user may get
5212 				 * a partial chunk. The next read returning
5213 				 * a new chunk... really ugly but I see no
5214 				 * way around it! Maybe a notify??
5215 				 */
5216 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5217 			}
5218 			if (chk->data) {
5219 				sctp_m_freem(chk->data);
5220 				chk->data = NULL;
5221 			}
5222 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5223 		} else {
5224 			/*
5225 			 * Ok we have gone beyond the end of the fwd-tsn's
5226 			 * mark.
5227 			 */
5228 			break;
5229 		}
5230 	}
5231 	/*******************************************************/
5232 	/* 3. Update the PR-stream re-ordering queues and fix  */
5233 	/* delivery issues as needed.                       */
5234 	/*******************************************************/
5235 	fwd_sz -= sizeof(*fwd);
5236 	if (m && fwd_sz) {
5237 		/* New method. */
5238 		unsigned int num_str;
5239 		struct sctp_strseq *stseq, strseqbuf;
5240 
5241 		offset += sizeof(*fwd);
5242 
5243 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5244 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5245 		for (i = 0; i < num_str; i++) {
5246 			uint16_t st;
5247 
5248 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5249 			    sizeof(struct sctp_strseq),
5250 			    (uint8_t *) & strseqbuf);
5251 			offset += sizeof(struct sctp_strseq);
5252 			if (stseq == NULL) {
5253 				break;
5254 			}
5255 			/* Convert */
5256 			st = ntohs(stseq->stream);
5257 			stseq->stream = st;
5258 			st = ntohs(stseq->sequence);
5259 			stseq->sequence = st;
5260 
5261 			/* now process */
5262 
5263 			/*
5264 			 * Ok we now look for the stream/seq on the read
5265 			 * queue where its not all delivered. If we find it
5266 			 * we transmute the read entry into a PDI_ABORTED.
5267 			 */
5268 			if (stseq->stream >= asoc->streamincnt) {
5269 				/* screwed up streams, stop!  */
5270 				break;
5271 			}
5272 			if ((asoc->str_of_pdapi == stseq->stream) &&
5273 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5274 				/*
5275 				 * If this is the one we were partially
5276 				 * delivering now then we no longer are.
5277 				 * Note this will change with the reassembly
5278 				 * re-write.
5279 				 */
5280 				asoc->fragmented_delivery_inprogress = 0;
5281 			}
5282 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5283 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5284 				if ((ctl->sinfo_stream == stseq->stream) &&
5285 				    (ctl->sinfo_ssn == stseq->sequence)) {
5286 					str_seq = (stseq->stream << 16) | stseq->sequence;
5287 					ctl->end_added = 1;
5288 					ctl->pdapi_aborted = 1;
5289 					sv = stcb->asoc.control_pdapi;
5290 					stcb->asoc.control_pdapi = ctl;
5291 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5292 					    stcb,
5293 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5294 					    (void *)&str_seq,
5295 					    SCTP_SO_NOT_LOCKED);
5296 					stcb->asoc.control_pdapi = sv;
5297 					break;
5298 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5299 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5300 					/* We are past our victim SSN */
5301 					break;
5302 				}
5303 			}
5304 			strm = &asoc->strmin[stseq->stream];
5305 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5306 				/* Update the sequence number */
5307 				strm->last_sequence_delivered = stseq->sequence;
5308 			}
5309 			/* now kick the stream the new way */
5310 			/* sa_ignore NO_NULL_CHK */
5311 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5312 		}
5313 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5314 	}
5315 	/*
5316 	 * Now slide thing forward.
5317 	 */
5318 	sctp_slide_mapping_arrays(stcb);
5319 
5320 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5321 		/* now lets kick out and check for more fragmented delivery */
5322 		/* sa_ignore NO_NULL_CHK */
5323 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5324 	}
5325 }
5326