xref: /freebsd/sys/netinet/sctp_indata.c (revision 9fc5c47fa5c7fa58d61245f0408611943e613164)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	/*
254 	 * Make sure that there is no un-initialized padding between the
255 	 * cmsg header and cmsg data and after the cmsg data.
256 	 */
257 	memset(cmh, 0, len);
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 		cmh->cmsg_type = SCTP_RCVINFO;
262 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 		rcvinfo->rcv_context = sinfo->sinfo_context;
270 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 	}
274 	if (provide_nxt) {
275 		cmh->cmsg_level = IPPROTO_SCTP;
276 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 		cmh->cmsg_type = SCTP_NXTINFO;
278 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 		nxtinfo->nxt_flags = 0;
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 		}
287 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 		}
290 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 	}
296 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 		cmh->cmsg_level = IPPROTO_SCTP;
298 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 		if (use_extended) {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 			cmh->cmsg_type = SCTP_EXTRCV;
302 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 		} else {
305 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 			cmh->cmsg_type = SCTP_SNDRCV;
307 			*outinfo = *sinfo;
308 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 		}
310 	}
311 	return (ret);
312 }
313 
314 
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 	uint32_t gap, i, cumackp1;
319 	int fnd = 0;
320 
321 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 		return;
323 	}
324 	cumackp1 = asoc->cumulative_tsn + 1;
325 	if (SCTP_TSN_GT(cumackp1, tsn)) {
326 		/*
327 		 * this tsn is behind the cum ack and thus we don't need to
328 		 * worry about it being moved from one to the other.
329 		 */
330 		return;
331 	}
332 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 		sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 		panic("Things are really messed up now!!");
338 #endif
339 	}
340 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 		asoc->highest_tsn_inside_nr_map = tsn;
344 	}
345 	if (tsn == asoc->highest_tsn_inside_map) {
346 		/* We must back down to see what the new highest is */
347 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 				asoc->highest_tsn_inside_map = i;
351 				fnd = 1;
352 				break;
353 			}
354 		}
355 		if (!fnd) {
356 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 		}
358 	}
359 }
360 
361 
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 	struct sctp_tmit_chunk *chk, *nchk;
371 	uint16_t nxt_todel;
372 	uint16_t stream_no;
373 	int end = 0;
374 	int cntDel;
375 	struct sctp_queued_to_read *control, *ctl, *nctl;
376 
377 	if (stcb == NULL)
378 		return;
379 
380 	cntDel = stream_no = 0;
381 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 		/* socket above is long gone or going.. */
385 abandon:
386 		asoc->fragmented_delivery_inprogress = 0;
387 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 			asoc->size_on_reasm_queue -= chk->send_size;
390 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 			/*
392 			 * Lose the data pointer, since its in the socket
393 			 * buffer
394 			 */
395 			if (chk->data) {
396 				sctp_m_freem(chk->data);
397 				chk->data = NULL;
398 			}
399 			/* Now free the address and data */
400 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 			/* sa_ignore FREED_MEMORY */
402 		}
403 		return;
404 	}
405 	SCTP_TCB_LOCK_ASSERT(stcb);
406 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 			/* Can't deliver more :< */
409 			return;
410 		}
411 		stream_no = chk->rec.data.stream_number;
412 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 		if (nxt_todel != chk->rec.data.stream_seq &&
414 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 			/*
416 			 * Not the next sequence to deliver in its stream OR
417 			 * unordered
418 			 */
419 			return;
420 		}
421 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422 
423 			control = sctp_build_readq_entry_chk(stcb, chk);
424 			if (control == NULL) {
425 				/* out of memory? */
426 				return;
427 			}
428 			/* save it off for our future deliveries */
429 			stcb->asoc.control_pdapi = control;
430 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 				end = 1;
432 			else
433 				end = 0;
434 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 			sctp_add_to_readq(stcb->sctp_ep,
436 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 			cntDel++;
439 		} else {
440 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 				end = 1;
442 			else
443 				end = 0;
444 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 			    stcb->asoc.control_pdapi,
447 			    chk->data, end, chk->rec.data.TSN_seq,
448 			    &stcb->sctp_socket->so_rcv)) {
449 				/*
450 				 * something is very wrong, either
451 				 * control_pdapi is NULL, or the tail_mbuf
452 				 * is corrupt, or there is a EOM already on
453 				 * the mbuf chain.
454 				 */
455 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 					goto abandon;
457 				} else {
458 #ifdef INVARIANTS
459 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 						panic("This should not happen control_pdapi NULL?");
461 					}
462 					/* if we did not panic, it was a EOM */
463 					panic("Bad chunking ??");
464 #else
465 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 					}
468 					SCTP_PRINTF("Bad chunking ??\n");
469 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470 
471 #endif
472 					goto abandon;
473 				}
474 			}
475 			cntDel++;
476 		}
477 		/* pull it we did it */
478 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 			asoc->fragmented_delivery_inprogress = 0;
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 				asoc->strmin[stream_no].last_sequence_delivered++;
483 			}
484 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 			}
487 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 			/*
489 			 * turn the flag back on since we just  delivered
490 			 * yet another one.
491 			 */
492 			asoc->fragmented_delivery_inprogress = 1;
493 		}
494 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498 
499 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 		asoc->size_on_reasm_queue -= chk->send_size;
501 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 		/* free up the chk */
503 		chk->data = NULL;
504 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505 
506 		if (asoc->fragmented_delivery_inprogress == 0) {
507 			/*
508 			 * Now lets see if we can deliver the next one on
509 			 * the stream
510 			 */
511 			struct sctp_stream_in *strm;
512 
513 			strm = &asoc->strmin[stream_no];
514 			nxt_todel = strm->last_sequence_delivered + 1;
515 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 				/* Deliver more if we can. */
517 				if (nxt_todel == ctl->sinfo_ssn) {
518 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 					asoc->size_on_all_streams -= ctl->length;
520 					sctp_ucount_decr(asoc->cnt_on_all_streams);
521 					strm->last_sequence_delivered++;
522 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 					sctp_add_to_readq(stcb->sctp_ep, stcb,
524 					    ctl,
525 					    &stcb->sctp_socket->so_rcv, 1,
526 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 				} else {
528 					break;
529 				}
530 				nxt_todel = strm->last_sequence_delivered + 1;
531 			}
532 			break;
533 		}
534 	}
535 }
536 
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 	/*
548 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 	 * all the data in one stream this could happen quite rapidly. One
550 	 * could use the TSN to keep track of things, but this scheme breaks
551 	 * down in the other type of stream useage that could occur. Send a
552 	 * single msg to stream 0, send 4Billion messages to stream 1, now
553 	 * send a message to stream 0. You have a situation where the TSN
554 	 * has wrapped but not in the stream. Is this worth worrying about
555 	 * or should we just change our queue sort at the bottom to be by
556 	 * TSN.
557 	 *
558 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 	 * assignment this could happen... and I don't see how this would be
561 	 * a violation. So for now I am undecided an will leave the sort by
562 	 * SSN alone. Maybe a hybred approach is the answer
563 	 *
564 	 */
565 	struct sctp_stream_in *strm;
566 	struct sctp_queued_to_read *at;
567 	int queue_needed;
568 	uint16_t nxt_todel;
569 	struct mbuf *op_err;
570 	char msg[SCTP_DIAG_INFO_LEN];
571 
572 	queue_needed = 1;
573 	asoc->size_on_all_streams += control->length;
574 	sctp_ucount_incr(asoc->cnt_on_all_streams);
575 	strm = &asoc->strmin[control->sinfo_stream];
576 	nxt_todel = strm->last_sequence_delivered + 1;
577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 	}
580 	SCTPDBG(SCTP_DEBUG_INDATA1,
581 	    "queue to stream called for sid:%u ssn:%u tsn:%u lastdel:%u nxt:%u\n",
582 	    (uint32_t) control->sinfo_stream, (uint32_t) control->sinfo_ssn,
583 	    (uint32_t) control->sinfo_tsn,
584 	    (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
585 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 		/* The incoming sseq is behind where we last delivered? */
587 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 		    control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590 		/*
591 		 * throw it in the stream so it gets cleaned up in
592 		 * association destruction
593 		 */
594 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 		    strm->last_sequence_delivered, control->sinfo_tsn,
597 		    control->sinfo_stream, control->sinfo_ssn);
598 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601 		*abort_flag = 1;
602 		return;
603 
604 	}
605 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
606 	struct socket *so;
607 
608 	so = SCTP_INP_SO(stcb->sctp_ep);
609 	atomic_add_int(&stcb->asoc.refcnt, 1);
610 	SCTP_TCB_UNLOCK(stcb);
611 	SCTP_SOCKET_LOCK(so, 1);
612 	SCTP_TCB_LOCK(stcb);
613 	atomic_subtract_int(&stcb->asoc.refcnt, 1);
614 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
615 		SCTP_SOCKET_UNLOCK(so, 1);
616 		return;
617 	}
618 #endif
619 	if (nxt_todel == control->sinfo_ssn) {
620 		/* can be delivered right away? */
621 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
622 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
623 		}
624 		/* EY it wont be queued if it could be delivered directly */
625 		queue_needed = 0;
626 		asoc->size_on_all_streams -= control->length;
627 		sctp_ucount_decr(asoc->cnt_on_all_streams);
628 		strm->last_sequence_delivered++;
629 
630 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631 		sctp_add_to_readq(stcb->sctp_ep, stcb,
632 		    control,
633 		    &stcb->sctp_socket->so_rcv, 1,
634 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
635 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
636 			/* all delivered */
637 			nxt_todel = strm->last_sequence_delivered + 1;
638 			if (nxt_todel == control->sinfo_ssn) {
639 				TAILQ_REMOVE(&strm->inqueue, control, next);
640 				asoc->size_on_all_streams -= control->length;
641 				sctp_ucount_decr(asoc->cnt_on_all_streams);
642 				strm->last_sequence_delivered++;
643 				/*
644 				 * We ignore the return of deliver_data here
645 				 * since we always can hold the chunk on the
646 				 * d-queue. And we have a finite number that
647 				 * can be delivered from the strq.
648 				 */
649 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
650 					sctp_log_strm_del(control, NULL,
651 					    SCTP_STR_LOG_FROM_IMMED_DEL);
652 				}
653 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
654 				sctp_add_to_readq(stcb->sctp_ep, stcb,
655 				    control,
656 				    &stcb->sctp_socket->so_rcv, 1,
657 				    SCTP_READ_LOCK_NOT_HELD,
658 				    SCTP_SO_LOCKED);
659 				continue;
660 			}
661 			break;
662 		}
663 	}
664 	if (queue_needed) {
665 		/*
666 		 * Ok, we did not deliver this guy, find the correct place
667 		 * to put it on the queue.
668 		 */
669 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
670 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
671 			SCTP_SOCKET_UNLOCK(so, 1);
672 #endif
673 			goto protocol_error;
674 		}
675 		if (TAILQ_EMPTY(&strm->inqueue)) {
676 			/* Empty queue */
677 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
678 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
679 			}
680 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
681 		} else {
682 			TAILQ_FOREACH(at, &strm->inqueue, next) {
683 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
684 					/*
685 					 * one in queue is bigger than the
686 					 * new one, insert before this one
687 					 */
688 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
689 						sctp_log_strm_del(control, at,
690 						    SCTP_STR_LOG_FROM_INSERT_MD);
691 					}
692 					TAILQ_INSERT_BEFORE(at, control, next);
693 					break;
694 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
695 					/*
696 					 * Gak, He sent me a duplicate str
697 					 * seq number
698 					 */
699 					/*
700 					 * foo bar, I guess I will just free
701 					 * this new guy, should we abort
702 					 * too? FIX ME MAYBE? Or it COULD be
703 					 * that the SSN's have wrapped.
704 					 * Maybe I should compare to TSN
705 					 * somehow... sigh for now just blow
706 					 * away the chunk!
707 					 */
708 
709 					if (control->data)
710 						sctp_m_freem(control->data);
711 					control->data = NULL;
712 					asoc->size_on_all_streams -= control->length;
713 					sctp_ucount_decr(asoc->cnt_on_all_streams);
714 					if (control->whoFrom) {
715 						sctp_free_remote_addr(control->whoFrom);
716 						control->whoFrom = NULL;
717 					}
718 					sctp_free_a_readq(stcb, control);
719 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
720 					SCTP_SOCKET_UNLOCK(so, 1);
721 #endif
722 					return;
723 				} else {
724 					if (TAILQ_NEXT(at, next) == NULL) {
725 						/*
726 						 * We are at the end, insert
727 						 * it after this one
728 						 */
729 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
730 							sctp_log_strm_del(control, at,
731 							    SCTP_STR_LOG_FROM_INSERT_TL);
732 						}
733 						TAILQ_INSERT_AFTER(&strm->inqueue,
734 						    at, control, next);
735 						break;
736 					}
737 				}
738 			}
739 		}
740 	}
741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
742 	SCTP_SOCKET_UNLOCK(so, 1);
743 #endif
744 }
745 
746 /*
747  * Returns two things: You get the total size of the deliverable parts of the
748  * first fragmented message on the reassembly queue. And you get a 1 back if
749  * all of the message is ready or a 0 back if the message is still incomplete
750  */
751 static int
752 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
753 {
754 	struct sctp_tmit_chunk *chk;
755 	uint32_t tsn;
756 
757 	*t_size = 0;
758 	chk = TAILQ_FIRST(&asoc->reasmqueue);
759 	if (chk == NULL) {
760 		/* nothing on the queue */
761 		return (0);
762 	}
763 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
764 		/* Not a first on the queue */
765 		return (0);
766 	}
767 	tsn = chk->rec.data.TSN_seq;
768 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
769 		if (tsn != chk->rec.data.TSN_seq) {
770 			return (0);
771 		}
772 		*t_size += chk->send_size;
773 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
774 			return (1);
775 		}
776 		tsn++;
777 	}
778 	return (0);
779 }
780 
781 static void
782 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
783 {
784 	struct sctp_tmit_chunk *chk;
785 	uint16_t nxt_todel;
786 	uint32_t tsize, pd_point;
787 
788 doit_again:
789 	chk = TAILQ_FIRST(&asoc->reasmqueue);
790 	if (chk == NULL) {
791 		/* Huh? */
792 		asoc->size_on_reasm_queue = 0;
793 		asoc->cnt_on_reasm_queue = 0;
794 		return;
795 	}
796 	if (asoc->fragmented_delivery_inprogress == 0) {
797 		nxt_todel =
798 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
799 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
800 		    (nxt_todel == chk->rec.data.stream_seq ||
801 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
802 			/*
803 			 * Yep the first one is here and its ok to deliver
804 			 * but should we?
805 			 */
806 			if (stcb->sctp_socket) {
807 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
808 				    stcb->sctp_ep->partial_delivery_point);
809 			} else {
810 				pd_point = stcb->sctp_ep->partial_delivery_point;
811 			}
812 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
813 				/*
814 				 * Yes, we setup to start reception, by
815 				 * backing down the TSN just in case we
816 				 * can't deliver. If we
817 				 */
818 				asoc->fragmented_delivery_inprogress = 1;
819 				asoc->tsn_last_delivered =
820 				    chk->rec.data.TSN_seq - 1;
821 				asoc->str_of_pdapi =
822 				    chk->rec.data.stream_number;
823 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
824 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
825 				asoc->fragment_flags = chk->rec.data.rcv_flags;
826 				sctp_service_reassembly(stcb, asoc);
827 			}
828 		}
829 	} else {
830 		/*
831 		 * Service re-assembly will deliver stream data queued at
832 		 * the end of fragmented delivery.. but it wont know to go
833 		 * back and call itself again... we do that here with the
834 		 * got doit_again
835 		 */
836 		sctp_service_reassembly(stcb, asoc);
837 		if (asoc->fragmented_delivery_inprogress == 0) {
838 			/*
839 			 * finished our Fragmented delivery, could be more
840 			 * waiting?
841 			 */
842 			goto doit_again;
843 		}
844 	}
845 }
846 
847 /*
848  * Dump onto the re-assembly queue, in its proper place. After dumping on the
849  * queue, see if anthing can be delivered. If so pull it off (or as much as
850  * we can. If we run out of space then we must dump what we can and set the
851  * appropriate flag to say we queued what we could.
852  */
853 static void
854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
855     struct sctp_tmit_chunk *chk, int *abort_flag)
856 {
857 	struct mbuf *op_err;
858 	char msg[SCTP_DIAG_INFO_LEN];
859 	uint32_t cum_ackp1, prev_tsn, post_tsn;
860 	struct sctp_tmit_chunk *at, *prev, *next;
861 
862 	prev = next = NULL;
863 	cum_ackp1 = asoc->tsn_last_delivered + 1;
864 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
865 		/* This is the first one on the queue */
866 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
867 		/*
868 		 * we do not check for delivery of anything when only one
869 		 * fragment is here
870 		 */
871 		asoc->size_on_reasm_queue = chk->send_size;
872 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
873 		if (chk->rec.data.TSN_seq == cum_ackp1) {
874 			if (asoc->fragmented_delivery_inprogress == 0 &&
875 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
876 			    SCTP_DATA_FIRST_FRAG) {
877 				/*
878 				 * An empty queue, no delivery inprogress,
879 				 * we hit the next one and it does NOT have
880 				 * a FIRST fragment mark.
881 				 */
882 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
883 				snprintf(msg, sizeof(msg),
884 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
885 				    chk->rec.data.TSN_seq,
886 				    chk->rec.data.stream_number,
887 				    chk->rec.data.stream_seq);
888 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
889 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
891 				*abort_flag = 1;
892 			} else if (asoc->fragmented_delivery_inprogress &&
893 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
894 				/*
895 				 * We are doing a partial delivery and the
896 				 * NEXT chunk MUST be either the LAST or
897 				 * MIDDLE fragment NOT a FIRST
898 				 */
899 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 				snprintf(msg, sizeof(msg),
901 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
902 				    chk->rec.data.TSN_seq,
903 				    chk->rec.data.stream_number,
904 				    chk->rec.data.stream_seq);
905 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
906 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
908 				*abort_flag = 1;
909 			} else if (asoc->fragmented_delivery_inprogress) {
910 				/*
911 				 * Here we are ok with a MIDDLE or LAST
912 				 * piece
913 				 */
914 				if (chk->rec.data.stream_number !=
915 				    asoc->str_of_pdapi) {
916 					/* Got to be the right STR No */
917 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
918 					    chk->rec.data.stream_number,
919 					    asoc->str_of_pdapi);
920 					snprintf(msg, sizeof(msg),
921 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
922 					    asoc->str_of_pdapi,
923 					    chk->rec.data.TSN_seq,
924 					    chk->rec.data.stream_number,
925 					    chk->rec.data.stream_seq);
926 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
927 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
928 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
929 					*abort_flag = 1;
930 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
931 					    SCTP_DATA_UNORDERED &&
932 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
933 					/* Got to be the right STR Seq */
934 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
935 					    chk->rec.data.stream_seq,
936 					    asoc->ssn_of_pdapi);
937 					snprintf(msg, sizeof(msg),
938 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
939 					    asoc->ssn_of_pdapi,
940 					    chk->rec.data.TSN_seq,
941 					    chk->rec.data.stream_number,
942 					    chk->rec.data.stream_seq);
943 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
944 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
945 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
946 					*abort_flag = 1;
947 				}
948 			}
949 		}
950 		return;
951 	}
952 	/* Find its place */
953 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
954 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
955 			/*
956 			 * one in queue is bigger than the new one, insert
957 			 * before this one
958 			 */
959 			/* A check */
960 			asoc->size_on_reasm_queue += chk->send_size;
961 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962 			next = at;
963 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
964 			break;
965 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
966 			/* Gak, He sent me a duplicate str seq number */
967 			/*
968 			 * foo bar, I guess I will just free this new guy,
969 			 * should we abort too? FIX ME MAYBE? Or it COULD be
970 			 * that the SSN's have wrapped. Maybe I should
971 			 * compare to TSN somehow... sigh for now just blow
972 			 * away the chunk!
973 			 */
974 			if (chk->data) {
975 				sctp_m_freem(chk->data);
976 				chk->data = NULL;
977 			}
978 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
979 			return;
980 		} else {
981 			prev = at;
982 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
983 				/*
984 				 * We are at the end, insert it after this
985 				 * one
986 				 */
987 				/* check it first */
988 				asoc->size_on_reasm_queue += chk->send_size;
989 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
990 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
991 				break;
992 			}
993 		}
994 	}
995 	/* Now the audits */
996 	if (prev) {
997 		prev_tsn = chk->rec.data.TSN_seq - 1;
998 		if (prev_tsn == prev->rec.data.TSN_seq) {
999 			/*
1000 			 * Ok the one I am dropping onto the end is the
1001 			 * NEXT. A bit of valdiation here.
1002 			 */
1003 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1004 			    SCTP_DATA_FIRST_FRAG ||
1005 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1006 			    SCTP_DATA_MIDDLE_FRAG) {
1007 				/*
1008 				 * Insert chk MUST be a MIDDLE or LAST
1009 				 * fragment
1010 				 */
1011 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1012 				    SCTP_DATA_FIRST_FRAG) {
1013 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1014 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1015 					snprintf(msg, sizeof(msg),
1016 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1017 					    chk->rec.data.TSN_seq,
1018 					    chk->rec.data.stream_number,
1019 					    chk->rec.data.stream_seq);
1020 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1021 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1022 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1023 					*abort_flag = 1;
1024 					return;
1025 				}
1026 				if (chk->rec.data.stream_number !=
1027 				    prev->rec.data.stream_number) {
1028 					/*
1029 					 * Huh, need the correct STR here,
1030 					 * they must be the same.
1031 					 */
1032 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1033 					    chk->rec.data.stream_number,
1034 					    prev->rec.data.stream_number);
1035 					snprintf(msg, sizeof(msg),
1036 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1037 					    prev->rec.data.stream_number,
1038 					    chk->rec.data.TSN_seq,
1039 					    chk->rec.data.stream_number,
1040 					    chk->rec.data.stream_seq);
1041 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1042 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1043 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1044 					*abort_flag = 1;
1045 					return;
1046 				}
1047 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1048 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1049 					/*
1050 					 * Huh, need the same ordering here,
1051 					 * they must be the same.
1052 					 */
1053 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1054 					snprintf(msg, sizeof(msg),
1055 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1056 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1057 					    chk->rec.data.TSN_seq,
1058 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1059 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1060 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1061 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1062 					*abort_flag = 1;
1063 					return;
1064 				}
1065 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1066 				    chk->rec.data.stream_seq !=
1067 				    prev->rec.data.stream_seq) {
1068 					/*
1069 					 * Huh, need the correct STR here,
1070 					 * they must be the same.
1071 					 */
1072 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1073 					    chk->rec.data.stream_seq,
1074 					    prev->rec.data.stream_seq);
1075 					snprintf(msg, sizeof(msg),
1076 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077 					    prev->rec.data.stream_seq,
1078 					    chk->rec.data.TSN_seq,
1079 					    chk->rec.data.stream_number,
1080 					    chk->rec.data.stream_seq);
1081 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1082 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1083 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1084 					*abort_flag = 1;
1085 					return;
1086 				}
1087 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1088 			    SCTP_DATA_LAST_FRAG) {
1089 				/* Insert chk MUST be a FIRST */
1090 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1091 				    SCTP_DATA_FIRST_FRAG) {
1092 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1093 					snprintf(msg, sizeof(msg),
1094 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1095 					    chk->rec.data.TSN_seq,
1096 					    chk->rec.data.stream_number,
1097 					    chk->rec.data.stream_seq);
1098 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1099 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1100 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1101 					*abort_flag = 1;
1102 					return;
1103 				}
1104 			}
1105 		}
1106 	}
1107 	if (next) {
1108 		post_tsn = chk->rec.data.TSN_seq + 1;
1109 		if (post_tsn == next->rec.data.TSN_seq) {
1110 			/*
1111 			 * Ok the one I am inserting ahead of is my NEXT
1112 			 * one. A bit of valdiation here.
1113 			 */
1114 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1115 				/* Insert chk MUST be a last fragment */
1116 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1117 				    != SCTP_DATA_LAST_FRAG) {
1118 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1119 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1120 					snprintf(msg, sizeof(msg),
1121 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 					    chk->rec.data.TSN_seq,
1123 					    chk->rec.data.stream_number,
1124 					    chk->rec.data.stream_seq);
1125 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128 					*abort_flag = 1;
1129 					return;
1130 				}
1131 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1132 				    SCTP_DATA_MIDDLE_FRAG ||
1133 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1134 			    SCTP_DATA_LAST_FRAG) {
1135 				/*
1136 				 * Insert chk CAN be MIDDLE or FIRST NOT
1137 				 * LAST
1138 				 */
1139 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1140 				    SCTP_DATA_LAST_FRAG) {
1141 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1142 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1143 					snprintf(msg, sizeof(msg),
1144 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1145 					    chk->rec.data.TSN_seq,
1146 					    chk->rec.data.stream_number,
1147 					    chk->rec.data.stream_seq);
1148 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1149 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1150 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1151 					*abort_flag = 1;
1152 					return;
1153 				}
1154 				if (chk->rec.data.stream_number !=
1155 				    next->rec.data.stream_number) {
1156 					/*
1157 					 * Huh, need the correct STR here,
1158 					 * they must be the same.
1159 					 */
1160 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1161 					    chk->rec.data.stream_number,
1162 					    next->rec.data.stream_number);
1163 					snprintf(msg, sizeof(msg),
1164 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1165 					    next->rec.data.stream_number,
1166 					    chk->rec.data.TSN_seq,
1167 					    chk->rec.data.stream_number,
1168 					    chk->rec.data.stream_seq);
1169 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1170 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1171 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1172 					*abort_flag = 1;
1173 					return;
1174 				}
1175 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1176 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1177 					/*
1178 					 * Huh, need the same ordering here,
1179 					 * they must be the same.
1180 					 */
1181 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1182 					snprintf(msg, sizeof(msg),
1183 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1184 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1185 					    chk->rec.data.TSN_seq,
1186 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1187 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1188 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1189 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1190 					*abort_flag = 1;
1191 					return;
1192 				}
1193 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1194 				    chk->rec.data.stream_seq !=
1195 				    next->rec.data.stream_seq) {
1196 					/*
1197 					 * Huh, need the correct STR here,
1198 					 * they must be the same.
1199 					 */
1200 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1201 					    chk->rec.data.stream_seq,
1202 					    next->rec.data.stream_seq);
1203 					snprintf(msg, sizeof(msg),
1204 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1205 					    next->rec.data.stream_seq,
1206 					    chk->rec.data.TSN_seq,
1207 					    chk->rec.data.stream_number,
1208 					    chk->rec.data.stream_seq);
1209 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1210 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1211 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1212 					*abort_flag = 1;
1213 					return;
1214 				}
1215 			}
1216 		}
1217 	}
1218 	/* Do we need to do some delivery? check */
1219 	sctp_deliver_reasm_check(stcb, asoc);
1220 }
1221 
1222 /*
1223  * This is an unfortunate routine. It checks to make sure a evil guy is not
1224  * stuffing us full of bad packet fragments. A broken peer could also do this
1225  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1226  * :< more cycles.
1227  */
1228 static int
1229 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1230     uint32_t TSN_seq)
1231 {
1232 	struct sctp_tmit_chunk *at;
1233 	uint32_t tsn_est;
1234 
1235 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1236 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1237 			/* is it one bigger? */
1238 			tsn_est = at->rec.data.TSN_seq + 1;
1239 			if (tsn_est == TSN_seq) {
1240 				/* yep. It better be a last then */
1241 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1242 				    SCTP_DATA_LAST_FRAG) {
1243 					/*
1244 					 * Ok this guy belongs next to a guy
1245 					 * that is NOT last, it should be a
1246 					 * middle/last, not a complete
1247 					 * chunk.
1248 					 */
1249 					return (1);
1250 				} else {
1251 					/*
1252 					 * This guy is ok since its a LAST
1253 					 * and the new chunk is a fully
1254 					 * self- contained one.
1255 					 */
1256 					return (0);
1257 				}
1258 			}
1259 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1260 			/* Software error since I have a dup? */
1261 			return (1);
1262 		} else {
1263 			/*
1264 			 * Ok, 'at' is larger than new chunk but does it
1265 			 * need to be right before it.
1266 			 */
1267 			tsn_est = TSN_seq + 1;
1268 			if (tsn_est == at->rec.data.TSN_seq) {
1269 				/* Yep, It better be a first */
1270 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1271 				    SCTP_DATA_FIRST_FRAG) {
1272 					return (1);
1273 				} else {
1274 					return (0);
1275 				}
1276 			}
1277 		}
1278 	}
1279 	return (0);
1280 }
1281 
1282 static int
1283 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1284     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1285     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1286     int *break_flag, int last_chunk)
1287 {
1288 	/* Process a data chunk */
1289 	/* struct sctp_tmit_chunk *chk; */
1290 	struct sctp_tmit_chunk *chk;
1291 	uint32_t tsn, gap;
1292 	struct mbuf *dmbuf;
1293 	int the_len;
1294 	int need_reasm_check = 0;
1295 	uint16_t strmno, strmseq;
1296 	struct mbuf *op_err;
1297 	char msg[SCTP_DIAG_INFO_LEN];
1298 	struct sctp_queued_to_read *control;
1299 	int ordered;
1300 	uint32_t protocol_id;
1301 	uint8_t chunk_flags;
1302 	struct sctp_stream_reset_list *liste;
1303 
1304 	chk = NULL;
1305 	tsn = ntohl(ch->dp.tsn);
1306 	chunk_flags = ch->ch.chunk_flags;
1307 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1308 		asoc->send_sack = 1;
1309 	}
1310 	protocol_id = ch->dp.protocol_id;
1311 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1312 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1313 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1314 	}
1315 	if (stcb == NULL) {
1316 		return (0);
1317 	}
1318 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1319 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1320 		/* It is a duplicate */
1321 		SCTP_STAT_INCR(sctps_recvdupdata);
1322 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323 			/* Record a dup for the next outbound sack */
1324 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1325 			asoc->numduptsns++;
1326 		}
1327 		asoc->send_sack = 1;
1328 		return (0);
1329 	}
1330 	/* Calculate the number of TSN's between the base and this TSN */
1331 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1332 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1333 		/* Can't hold the bit in the mapping at max array, toss it */
1334 		return (0);
1335 	}
1336 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1337 		SCTP_TCB_LOCK_ASSERT(stcb);
1338 		if (sctp_expand_mapping_array(asoc, gap)) {
1339 			/* Can't expand, drop it */
1340 			return (0);
1341 		}
1342 	}
1343 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1344 		*high_tsn = tsn;
1345 	}
1346 	/* See if we have received this one already */
1347 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1348 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1349 		SCTP_STAT_INCR(sctps_recvdupdata);
1350 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1351 			/* Record a dup for the next outbound sack */
1352 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1353 			asoc->numduptsns++;
1354 		}
1355 		asoc->send_sack = 1;
1356 		return (0);
1357 	}
1358 	/*
1359 	 * Check to see about the GONE flag, duplicates would cause a sack
1360 	 * to be sent up above
1361 	 */
1362 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1363 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1364 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1365 		/*
1366 		 * wait a minute, this guy is gone, there is no longer a
1367 		 * receiver. Send peer an ABORT!
1368 		 */
1369 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1370 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1371 		*abort_flag = 1;
1372 		return (0);
1373 	}
1374 	/*
1375 	 * Now before going further we see if there is room. If NOT then we
1376 	 * MAY let one through only IF this TSN is the one we are waiting
1377 	 * for on a partial delivery API.
1378 	 */
1379 
1380 	/* now do the tests */
1381 	if (((asoc->cnt_on_all_streams +
1382 	    asoc->cnt_on_reasm_queue +
1383 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1384 	    (((int)asoc->my_rwnd) <= 0)) {
1385 		/*
1386 		 * When we have NO room in the rwnd we check to make sure
1387 		 * the reader is doing its job...
1388 		 */
1389 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1390 			/* some to read, wake-up */
1391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1392 			struct socket *so;
1393 
1394 			so = SCTP_INP_SO(stcb->sctp_ep);
1395 			atomic_add_int(&stcb->asoc.refcnt, 1);
1396 			SCTP_TCB_UNLOCK(stcb);
1397 			SCTP_SOCKET_LOCK(so, 1);
1398 			SCTP_TCB_LOCK(stcb);
1399 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1400 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1401 				/* assoc was freed while we were unlocked */
1402 				SCTP_SOCKET_UNLOCK(so, 1);
1403 				return (0);
1404 			}
1405 #endif
1406 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1407 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1408 			SCTP_SOCKET_UNLOCK(so, 1);
1409 #endif
1410 		}
1411 		/* now is it in the mapping array of what we have accepted? */
1412 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1413 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1414 			/* Nope not in the valid range dump it */
1415 			sctp_set_rwnd(stcb, asoc);
1416 			if ((asoc->cnt_on_all_streams +
1417 			    asoc->cnt_on_reasm_queue +
1418 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1419 				SCTP_STAT_INCR(sctps_datadropchklmt);
1420 			} else {
1421 				SCTP_STAT_INCR(sctps_datadroprwnd);
1422 			}
1423 			*break_flag = 1;
1424 			return (0);
1425 		}
1426 	}
1427 	strmno = ntohs(ch->dp.stream_id);
1428 	if (strmno >= asoc->streamincnt) {
1429 		struct sctp_paramhdr *phdr;
1430 		struct mbuf *mb;
1431 
1432 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1433 		    0, M_NOWAIT, 1, MT_DATA);
1434 		if (mb != NULL) {
1435 			/* add some space up front so prepend will work well */
1436 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1437 			phdr = mtod(mb, struct sctp_paramhdr *);
1438 			/*
1439 			 * Error causes are just param's and this one has
1440 			 * two back to back phdr, one with the error type
1441 			 * and size, the other with the streamid and a rsvd
1442 			 */
1443 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1444 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1445 			phdr->param_length =
1446 			    htons(sizeof(struct sctp_paramhdr) * 2);
1447 			phdr++;
1448 			/* We insert the stream in the type field */
1449 			phdr->param_type = ch->dp.stream_id;
1450 			/* And set the length to 0 for the rsvd field */
1451 			phdr->param_length = 0;
1452 			sctp_queue_op_err(stcb, mb);
1453 		}
1454 		SCTP_STAT_INCR(sctps_badsid);
1455 		SCTP_TCB_LOCK_ASSERT(stcb);
1456 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1457 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1458 			asoc->highest_tsn_inside_nr_map = tsn;
1459 		}
1460 		if (tsn == (asoc->cumulative_tsn + 1)) {
1461 			/* Update cum-ack */
1462 			asoc->cumulative_tsn = tsn;
1463 		}
1464 		return (0);
1465 	}
1466 	/*
1467 	 * Before we continue lets validate that we are not being fooled by
1468 	 * an evil attacker. We can only have 4k chunks based on our TSN
1469 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1470 	 * way our stream sequence numbers could have wrapped. We of course
1471 	 * only validate the FIRST fragment so the bit must be set.
1472 	 */
1473 	strmseq = ntohs(ch->dp.stream_sequence);
1474 #ifdef SCTP_ASOCLOG_OF_TSNS
1475 	SCTP_TCB_LOCK_ASSERT(stcb);
1476 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1477 		asoc->tsn_in_at = 0;
1478 		asoc->tsn_in_wrapped = 1;
1479 	}
1480 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1481 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1482 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1483 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1484 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1485 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1486 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1487 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1488 	asoc->tsn_in_at++;
1489 #endif
1490 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1491 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1492 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1493 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1494 		/* The incoming sseq is behind where we last delivered? */
1495 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1496 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1497 
1498 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1499 		    asoc->strmin[strmno].last_sequence_delivered,
1500 		    tsn, strmno, strmseq);
1501 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1502 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1503 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1504 		*abort_flag = 1;
1505 		return (0);
1506 	}
1507 	/************************************
1508 	 * From here down we may find ch-> invalid
1509 	 * so its a good idea NOT to use it.
1510 	 *************************************/
1511 
1512 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1513 	if (last_chunk == 0) {
1514 		dmbuf = SCTP_M_COPYM(*m,
1515 		    (offset + sizeof(struct sctp_data_chunk)),
1516 		    the_len, M_NOWAIT);
1517 #ifdef SCTP_MBUF_LOGGING
1518 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1519 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1520 		}
1521 #endif
1522 	} else {
1523 		/* We can steal the last chunk */
1524 		int l_len;
1525 
1526 		dmbuf = *m;
1527 		/* lop off the top part */
1528 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1529 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1530 			l_len = SCTP_BUF_LEN(dmbuf);
1531 		} else {
1532 			/*
1533 			 * need to count up the size hopefully does not hit
1534 			 * this to often :-0
1535 			 */
1536 			struct mbuf *lat;
1537 
1538 			l_len = 0;
1539 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1540 				l_len += SCTP_BUF_LEN(lat);
1541 			}
1542 		}
1543 		if (l_len > the_len) {
1544 			/* Trim the end round bytes off  too */
1545 			m_adj(dmbuf, -(l_len - the_len));
1546 		}
1547 	}
1548 	if (dmbuf == NULL) {
1549 		SCTP_STAT_INCR(sctps_nomem);
1550 		return (0);
1551 	}
1552 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1553 	    asoc->fragmented_delivery_inprogress == 0 &&
1554 	    TAILQ_EMPTY(&asoc->resetHead) &&
1555 	    ((ordered == 0) ||
1556 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1557 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1558 		/* Candidate for express delivery */
1559 		/*
1560 		 * Its not fragmented, No PD-API is up, Nothing in the
1561 		 * delivery queue, Its un-ordered OR ordered and the next to
1562 		 * deliver AND nothing else is stuck on the stream queue,
1563 		 * And there is room for it in the socket buffer. Lets just
1564 		 * stuff it up the buffer....
1565 		 */
1566 
1567 		/* It would be nice to avoid this copy if we could :< */
1568 		sctp_alloc_a_readq(stcb, control);
1569 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1570 		    protocol_id,
1571 		    strmno, strmseq,
1572 		    chunk_flags,
1573 		    dmbuf);
1574 		if (control == NULL) {
1575 			goto failed_express_del;
1576 		}
1577 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1578 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1579 			asoc->highest_tsn_inside_nr_map = tsn;
1580 		}
1581 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1582 		    control, &stcb->sctp_socket->so_rcv,
1583 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1584 
1585 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1586 			/* for ordered, bump what we delivered */
1587 			asoc->strmin[strmno].last_sequence_delivered++;
1588 		}
1589 		SCTP_STAT_INCR(sctps_recvexpress);
1590 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1591 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1592 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1593 		}
1594 		control = NULL;
1595 
1596 		goto finish_express_del;
1597 	}
1598 failed_express_del:
1599 	/* If we reach here this is a new chunk */
1600 	chk = NULL;
1601 	control = NULL;
1602 	/* Express for fragmented delivery? */
1603 	if ((asoc->fragmented_delivery_inprogress) &&
1604 	    (stcb->asoc.control_pdapi) &&
1605 	    (asoc->str_of_pdapi == strmno) &&
1606 	    (asoc->ssn_of_pdapi == strmseq)
1607 	    ) {
1608 		control = stcb->asoc.control_pdapi;
1609 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1610 			/* Can't be another first? */
1611 			goto failed_pdapi_express_del;
1612 		}
1613 		if (tsn == (control->sinfo_tsn + 1)) {
1614 			/* Yep, we can add it on */
1615 			int end = 0;
1616 
1617 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1618 				end = 1;
1619 			}
1620 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1621 			    tsn,
1622 			    &stcb->sctp_socket->so_rcv)) {
1623 				SCTP_PRINTF("Append fails end:%d\n", end);
1624 				goto failed_pdapi_express_del;
1625 			}
1626 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1627 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1628 				asoc->highest_tsn_inside_nr_map = tsn;
1629 			}
1630 			SCTP_STAT_INCR(sctps_recvexpressm);
1631 			asoc->tsn_last_delivered = tsn;
1632 			asoc->fragment_flags = chunk_flags;
1633 			asoc->tsn_of_pdapi_last_delivered = tsn;
1634 			asoc->last_flags_delivered = chunk_flags;
1635 			asoc->last_strm_seq_delivered = strmseq;
1636 			asoc->last_strm_no_delivered = strmno;
1637 			if (end) {
1638 				/* clean up the flags and such */
1639 				asoc->fragmented_delivery_inprogress = 0;
1640 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1641 					asoc->strmin[strmno].last_sequence_delivered++;
1642 				}
1643 				stcb->asoc.control_pdapi = NULL;
1644 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1645 					/*
1646 					 * There could be another message
1647 					 * ready
1648 					 */
1649 					need_reasm_check = 1;
1650 				}
1651 			}
1652 			control = NULL;
1653 			goto finish_express_del;
1654 		}
1655 	}
1656 failed_pdapi_express_del:
1657 	control = NULL;
1658 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1659 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1660 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1661 			asoc->highest_tsn_inside_nr_map = tsn;
1662 		}
1663 	} else {
1664 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1665 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1666 			asoc->highest_tsn_inside_map = tsn;
1667 		}
1668 	}
1669 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1670 		sctp_alloc_a_chunk(stcb, chk);
1671 		if (chk == NULL) {
1672 			/* No memory so we drop the chunk */
1673 			SCTP_STAT_INCR(sctps_nomem);
1674 			if (last_chunk == 0) {
1675 				/* we copied it, free the copy */
1676 				sctp_m_freem(dmbuf);
1677 			}
1678 			return (0);
1679 		}
1680 		chk->rec.data.TSN_seq = tsn;
1681 		chk->no_fr_allowed = 0;
1682 		chk->rec.data.stream_seq = strmseq;
1683 		chk->rec.data.stream_number = strmno;
1684 		chk->rec.data.payloadtype = protocol_id;
1685 		chk->rec.data.context = stcb->asoc.context;
1686 		chk->rec.data.doing_fast_retransmit = 0;
1687 		chk->rec.data.rcv_flags = chunk_flags;
1688 		chk->asoc = asoc;
1689 		chk->send_size = the_len;
1690 		chk->whoTo = net;
1691 		atomic_add_int(&net->ref_count, 1);
1692 		chk->data = dmbuf;
1693 	} else {
1694 		sctp_alloc_a_readq(stcb, control);
1695 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1696 		    protocol_id,
1697 		    strmno, strmseq,
1698 		    chunk_flags,
1699 		    dmbuf);
1700 		if (control == NULL) {
1701 			/* No memory so we drop the chunk */
1702 			SCTP_STAT_INCR(sctps_nomem);
1703 			if (last_chunk == 0) {
1704 				/* we copied it, free the copy */
1705 				sctp_m_freem(dmbuf);
1706 			}
1707 			return (0);
1708 		}
1709 		control->length = the_len;
1710 	}
1711 
1712 	/* Mark it as received */
1713 	/* Now queue it where it belongs */
1714 	if (control != NULL) {
1715 		/* First a sanity check */
1716 		if (asoc->fragmented_delivery_inprogress) {
1717 			/*
1718 			 * Ok, we have a fragmented delivery in progress if
1719 			 * this chunk is next to deliver OR belongs in our
1720 			 * view to the reassembly, the peer is evil or
1721 			 * broken.
1722 			 */
1723 			uint32_t estimate_tsn;
1724 
1725 			estimate_tsn = asoc->tsn_last_delivered + 1;
1726 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1727 			    (estimate_tsn == control->sinfo_tsn)) {
1728 				/* Evil/Broke peer */
1729 				sctp_m_freem(control->data);
1730 				control->data = NULL;
1731 				if (control->whoFrom) {
1732 					sctp_free_remote_addr(control->whoFrom);
1733 					control->whoFrom = NULL;
1734 				}
1735 				sctp_free_a_readq(stcb, control);
1736 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1737 				    tsn, strmno, strmseq);
1738 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1739 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1740 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1741 				*abort_flag = 1;
1742 				if (last_chunk) {
1743 					*m = NULL;
1744 				}
1745 				return (0);
1746 			} else {
1747 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1748 					sctp_m_freem(control->data);
1749 					control->data = NULL;
1750 					if (control->whoFrom) {
1751 						sctp_free_remote_addr(control->whoFrom);
1752 						control->whoFrom = NULL;
1753 					}
1754 					sctp_free_a_readq(stcb, control);
1755 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1756 					    tsn, strmno, strmseq);
1757 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1758 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1759 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1760 					*abort_flag = 1;
1761 					if (last_chunk) {
1762 						*m = NULL;
1763 					}
1764 					return (0);
1765 				}
1766 			}
1767 		} else {
1768 			/* No PDAPI running */
1769 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1770 				/*
1771 				 * Reassembly queue is NOT empty validate
1772 				 * that this tsn does not need to be in
1773 				 * reasembly queue. If it does then our peer
1774 				 * is broken or evil.
1775 				 */
1776 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1777 					sctp_m_freem(control->data);
1778 					control->data = NULL;
1779 					if (control->whoFrom) {
1780 						sctp_free_remote_addr(control->whoFrom);
1781 						control->whoFrom = NULL;
1782 					}
1783 					sctp_free_a_readq(stcb, control);
1784 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1785 					    tsn, strmno, strmseq);
1786 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1787 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1788 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1789 					*abort_flag = 1;
1790 					if (last_chunk) {
1791 						*m = NULL;
1792 					}
1793 					return (0);
1794 				}
1795 			}
1796 		}
1797 		/* ok, if we reach here we have passed the sanity checks */
1798 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1799 			/* queue directly into socket buffer */
1800 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1801 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1802 			    control,
1803 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1804 		} else {
1805 			/*
1806 			 * Special check for when streams are resetting. We
1807 			 * could be more smart about this and check the
1808 			 * actual stream to see if it is not being reset..
1809 			 * that way we would not create a HOLB when amongst
1810 			 * streams being reset and those not being reset.
1811 			 *
1812 			 * We take complete messages that have a stream reset
1813 			 * intervening (aka the TSN is after where our
1814 			 * cum-ack needs to be) off and put them on a
1815 			 * pending_reply_queue. The reassembly ones we do
1816 			 * not have to worry about since they are all sorted
1817 			 * and proceessed by TSN order. It is only the
1818 			 * singletons I must worry about.
1819 			 */
1820 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1821 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1822 				/*
1823 				 * yep its past where we need to reset... go
1824 				 * ahead and queue it.
1825 				 */
1826 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1827 					/* first one on */
1828 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1829 				} else {
1830 					struct sctp_queued_to_read *ctlOn,
1831 					                   *nctlOn;
1832 					unsigned char inserted = 0;
1833 
1834 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1835 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1836 							continue;
1837 						} else {
1838 							/* found it */
1839 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1840 							inserted = 1;
1841 							break;
1842 						}
1843 					}
1844 					if (inserted == 0) {
1845 						/*
1846 						 * must be put at end, use
1847 						 * prevP (all setup from
1848 						 * loop) to setup nextP.
1849 						 */
1850 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1851 					}
1852 				}
1853 			} else {
1854 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1855 				if (*abort_flag) {
1856 					if (last_chunk) {
1857 						*m = NULL;
1858 					}
1859 					return (0);
1860 				}
1861 			}
1862 		}
1863 	} else {
1864 		/* Into the re-assembly queue */
1865 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1866 		if (*abort_flag) {
1867 			/*
1868 			 * the assoc is now gone and chk was put onto the
1869 			 * reasm queue, which has all been freed.
1870 			 */
1871 			if (last_chunk) {
1872 				*m = NULL;
1873 			}
1874 			return (0);
1875 		}
1876 	}
1877 finish_express_del:
1878 	if (tsn == (asoc->cumulative_tsn + 1)) {
1879 		/* Update cum-ack */
1880 		asoc->cumulative_tsn = tsn;
1881 	}
1882 	if (last_chunk) {
1883 		*m = NULL;
1884 	}
1885 	if (ordered) {
1886 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1887 	} else {
1888 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1889 	}
1890 	SCTP_STAT_INCR(sctps_recvdata);
1891 	/* Set it present please */
1892 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1893 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1894 	}
1895 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1896 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1897 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1898 	}
1899 	/* check the special flag for stream resets */
1900 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1901 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1902 		/*
1903 		 * we have finished working through the backlogged TSN's now
1904 		 * time to reset streams. 1: call reset function. 2: free
1905 		 * pending_reply space 3: distribute any chunks in
1906 		 * pending_reply_queue.
1907 		 */
1908 		struct sctp_queued_to_read *ctl, *nctl;
1909 
1910 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1911 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1912 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
1913 		SCTP_FREE(liste, SCTP_M_STRESET);
1914 		/* sa_ignore FREED_MEMORY */
1915 		liste = TAILQ_FIRST(&asoc->resetHead);
1916 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1917 			/* All can be removed */
1918 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1919 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1920 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1921 				if (*abort_flag) {
1922 					return (0);
1923 				}
1924 			}
1925 		} else {
1926 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1927 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1928 					break;
1929 				}
1930 				/*
1931 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1932 				 * process it which is the NOT of
1933 				 * ctl->sinfo_tsn > liste->tsn
1934 				 */
1935 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1936 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1937 				if (*abort_flag) {
1938 					return (0);
1939 				}
1940 			}
1941 		}
1942 		/*
1943 		 * Now service re-assembly to pick up anything that has been
1944 		 * held on reassembly queue?
1945 		 */
1946 		sctp_deliver_reasm_check(stcb, asoc);
1947 		need_reasm_check = 0;
1948 	}
1949 	if (need_reasm_check) {
1950 		/* Another one waits ? */
1951 		sctp_deliver_reasm_check(stcb, asoc);
1952 	}
1953 	return (1);
1954 }
1955 
1956 int8_t sctp_map_lookup_tab[256] = {
1957 	0, 1, 0, 2, 0, 1, 0, 3,
1958 	0, 1, 0, 2, 0, 1, 0, 4,
1959 	0, 1, 0, 2, 0, 1, 0, 3,
1960 	0, 1, 0, 2, 0, 1, 0, 5,
1961 	0, 1, 0, 2, 0, 1, 0, 3,
1962 	0, 1, 0, 2, 0, 1, 0, 4,
1963 	0, 1, 0, 2, 0, 1, 0, 3,
1964 	0, 1, 0, 2, 0, 1, 0, 6,
1965 	0, 1, 0, 2, 0, 1, 0, 3,
1966 	0, 1, 0, 2, 0, 1, 0, 4,
1967 	0, 1, 0, 2, 0, 1, 0, 3,
1968 	0, 1, 0, 2, 0, 1, 0, 5,
1969 	0, 1, 0, 2, 0, 1, 0, 3,
1970 	0, 1, 0, 2, 0, 1, 0, 4,
1971 	0, 1, 0, 2, 0, 1, 0, 3,
1972 	0, 1, 0, 2, 0, 1, 0, 7,
1973 	0, 1, 0, 2, 0, 1, 0, 3,
1974 	0, 1, 0, 2, 0, 1, 0, 4,
1975 	0, 1, 0, 2, 0, 1, 0, 3,
1976 	0, 1, 0, 2, 0, 1, 0, 5,
1977 	0, 1, 0, 2, 0, 1, 0, 3,
1978 	0, 1, 0, 2, 0, 1, 0, 4,
1979 	0, 1, 0, 2, 0, 1, 0, 3,
1980 	0, 1, 0, 2, 0, 1, 0, 6,
1981 	0, 1, 0, 2, 0, 1, 0, 3,
1982 	0, 1, 0, 2, 0, 1, 0, 4,
1983 	0, 1, 0, 2, 0, 1, 0, 3,
1984 	0, 1, 0, 2, 0, 1, 0, 5,
1985 	0, 1, 0, 2, 0, 1, 0, 3,
1986 	0, 1, 0, 2, 0, 1, 0, 4,
1987 	0, 1, 0, 2, 0, 1, 0, 3,
1988 	0, 1, 0, 2, 0, 1, 0, 8
1989 };
1990 
1991 
1992 void
1993 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1994 {
1995 	/*
1996 	 * Now we also need to check the mapping array in a couple of ways.
1997 	 * 1) Did we move the cum-ack point?
1998 	 *
1999 	 * When you first glance at this you might think that all entries that
2000 	 * make up the postion of the cum-ack would be in the nr-mapping
2001 	 * array only.. i.e. things up to the cum-ack are always
2002 	 * deliverable. Thats true with one exception, when its a fragmented
2003 	 * message we may not deliver the data until some threshold (or all
2004 	 * of it) is in place. So we must OR the nr_mapping_array and
2005 	 * mapping_array to get a true picture of the cum-ack.
2006 	 */
2007 	struct sctp_association *asoc;
2008 	int at;
2009 	uint8_t val;
2010 	int slide_from, slide_end, lgap, distance;
2011 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2012 
2013 	asoc = &stcb->asoc;
2014 
2015 	old_cumack = asoc->cumulative_tsn;
2016 	old_base = asoc->mapping_array_base_tsn;
2017 	old_highest = asoc->highest_tsn_inside_map;
2018 	/*
2019 	 * We could probably improve this a small bit by calculating the
2020 	 * offset of the current cum-ack as the starting point.
2021 	 */
2022 	at = 0;
2023 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2024 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2025 		if (val == 0xff) {
2026 			at += 8;
2027 		} else {
2028 			/* there is a 0 bit */
2029 			at += sctp_map_lookup_tab[val];
2030 			break;
2031 		}
2032 	}
2033 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2034 
2035 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2036 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2037 #ifdef INVARIANTS
2038 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2039 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2040 #else
2041 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2042 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2043 		sctp_print_mapping_array(asoc);
2044 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2045 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2046 		}
2047 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2048 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2049 #endif
2050 	}
2051 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2052 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2053 	} else {
2054 		highest_tsn = asoc->highest_tsn_inside_map;
2055 	}
2056 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2057 		/* The complete array was completed by a single FR */
2058 		/* highest becomes the cum-ack */
2059 		int clr;
2060 
2061 #ifdef INVARIANTS
2062 		unsigned int i;
2063 
2064 #endif
2065 
2066 		/* clear the array */
2067 		clr = ((at + 7) >> 3);
2068 		if (clr > asoc->mapping_array_size) {
2069 			clr = asoc->mapping_array_size;
2070 		}
2071 		memset(asoc->mapping_array, 0, clr);
2072 		memset(asoc->nr_mapping_array, 0, clr);
2073 #ifdef INVARIANTS
2074 		for (i = 0; i < asoc->mapping_array_size; i++) {
2075 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2076 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2077 				sctp_print_mapping_array(asoc);
2078 			}
2079 		}
2080 #endif
2081 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2082 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2083 	} else if (at >= 8) {
2084 		/* we can slide the mapping array down */
2085 		/* slide_from holds where we hit the first NON 0xff byte */
2086 
2087 		/*
2088 		 * now calculate the ceiling of the move using our highest
2089 		 * TSN value
2090 		 */
2091 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2092 		slide_end = (lgap >> 3);
2093 		if (slide_end < slide_from) {
2094 			sctp_print_mapping_array(asoc);
2095 #ifdef INVARIANTS
2096 			panic("impossible slide");
2097 #else
2098 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2099 			    lgap, slide_end, slide_from, at);
2100 			return;
2101 #endif
2102 		}
2103 		if (slide_end > asoc->mapping_array_size) {
2104 #ifdef INVARIANTS
2105 			panic("would overrun buffer");
2106 #else
2107 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2108 			    asoc->mapping_array_size, slide_end);
2109 			slide_end = asoc->mapping_array_size;
2110 #endif
2111 		}
2112 		distance = (slide_end - slide_from) + 1;
2113 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2114 			sctp_log_map(old_base, old_cumack, old_highest,
2115 			    SCTP_MAP_PREPARE_SLIDE);
2116 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2117 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2118 		}
2119 		if (distance + slide_from > asoc->mapping_array_size ||
2120 		    distance < 0) {
2121 			/*
2122 			 * Here we do NOT slide forward the array so that
2123 			 * hopefully when more data comes in to fill it up
2124 			 * we will be able to slide it forward. Really I
2125 			 * don't think this should happen :-0
2126 			 */
2127 
2128 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2129 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2130 				    (uint32_t) asoc->mapping_array_size,
2131 				    SCTP_MAP_SLIDE_NONE);
2132 			}
2133 		} else {
2134 			int ii;
2135 
2136 			for (ii = 0; ii < distance; ii++) {
2137 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2138 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2139 
2140 			}
2141 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2142 				asoc->mapping_array[ii] = 0;
2143 				asoc->nr_mapping_array[ii] = 0;
2144 			}
2145 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2146 				asoc->highest_tsn_inside_map += (slide_from << 3);
2147 			}
2148 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2149 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2150 			}
2151 			asoc->mapping_array_base_tsn += (slide_from << 3);
2152 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2153 				sctp_log_map(asoc->mapping_array_base_tsn,
2154 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2155 				    SCTP_MAP_SLIDE_RESULT);
2156 			}
2157 		}
2158 	}
2159 }
2160 
2161 void
2162 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2163 {
2164 	struct sctp_association *asoc;
2165 	uint32_t highest_tsn;
2166 
2167 	asoc = &stcb->asoc;
2168 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2169 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2170 	} else {
2171 		highest_tsn = asoc->highest_tsn_inside_map;
2172 	}
2173 
2174 	/*
2175 	 * Now we need to see if we need to queue a sack or just start the
2176 	 * timer (if allowed).
2177 	 */
2178 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2179 		/*
2180 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2181 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2182 		 * SACK
2183 		 */
2184 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2185 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2186 			    stcb->sctp_ep, stcb, NULL,
2187 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2188 		}
2189 		sctp_send_shutdown(stcb,
2190 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2191 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2192 	} else {
2193 		int is_a_gap;
2194 
2195 		/* is there a gap now ? */
2196 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2197 
2198 		/*
2199 		 * CMT DAC algorithm: increase number of packets received
2200 		 * since last ack
2201 		 */
2202 		stcb->asoc.cmt_dac_pkts_rcvd++;
2203 
2204 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2205 							 * SACK */
2206 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2207 							 * longer is one */
2208 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2209 		    (is_a_gap) ||	/* is still a gap */
2210 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2211 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2212 		    ) {
2213 
2214 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2215 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2216 			    (stcb->asoc.send_sack == 0) &&
2217 			    (stcb->asoc.numduptsns == 0) &&
2218 			    (stcb->asoc.delayed_ack) &&
2219 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2220 
2221 				/*
2222 				 * CMT DAC algorithm: With CMT, delay acks
2223 				 * even in the face of
2224 				 *
2225 				 * reordering. Therefore, if acks that do not
2226 				 * have to be sent because of the above
2227 				 * reasons, will be delayed. That is, acks
2228 				 * that would have been sent due to gap
2229 				 * reports will be delayed with DAC. Start
2230 				 * the delayed ack timer.
2231 				 */
2232 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2233 				    stcb->sctp_ep, stcb, NULL);
2234 			} else {
2235 				/*
2236 				 * Ok we must build a SACK since the timer
2237 				 * is pending, we got our first packet OR
2238 				 * there are gaps or duplicates.
2239 				 */
2240 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2241 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2242 			}
2243 		} else {
2244 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2245 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2246 				    stcb->sctp_ep, stcb, NULL);
2247 			}
2248 		}
2249 	}
2250 }
2251 
2252 void
2253 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2254 {
2255 	struct sctp_tmit_chunk *chk;
2256 	uint32_t tsize, pd_point;
2257 	uint16_t nxt_todel;
2258 
2259 	if (asoc->fragmented_delivery_inprogress) {
2260 		sctp_service_reassembly(stcb, asoc);
2261 	}
2262 	/* Can we proceed further, i.e. the PD-API is complete */
2263 	if (asoc->fragmented_delivery_inprogress) {
2264 		/* no */
2265 		return;
2266 	}
2267 	/*
2268 	 * Now is there some other chunk I can deliver from the reassembly
2269 	 * queue.
2270 	 */
2271 doit_again:
2272 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2273 	if (chk == NULL) {
2274 		asoc->size_on_reasm_queue = 0;
2275 		asoc->cnt_on_reasm_queue = 0;
2276 		return;
2277 	}
2278 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2279 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2280 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2281 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2282 		/*
2283 		 * Yep the first one is here. We setup to start reception,
2284 		 * by backing down the TSN just in case we can't deliver.
2285 		 */
2286 
2287 		/*
2288 		 * Before we start though either all of the message should
2289 		 * be here or the socket buffer max or nothing on the
2290 		 * delivery queue and something can be delivered.
2291 		 */
2292 		if (stcb->sctp_socket) {
2293 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2294 			    stcb->sctp_ep->partial_delivery_point);
2295 		} else {
2296 			pd_point = stcb->sctp_ep->partial_delivery_point;
2297 		}
2298 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2299 			asoc->fragmented_delivery_inprogress = 1;
2300 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2301 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2302 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2303 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2304 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2305 			sctp_service_reassembly(stcb, asoc);
2306 			if (asoc->fragmented_delivery_inprogress == 0) {
2307 				goto doit_again;
2308 			}
2309 		}
2310 	}
2311 }
2312 
2313 int
2314 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2315     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2316     struct sctp_nets *net, uint32_t * high_tsn)
2317 {
2318 	struct sctp_data_chunk *ch, chunk_buf;
2319 	struct sctp_association *asoc;
2320 	int num_chunks = 0;	/* number of control chunks processed */
2321 	int stop_proc = 0;
2322 	int chk_length, break_flag, last_chunk;
2323 	int abort_flag = 0, was_a_gap;
2324 	struct mbuf *m;
2325 	uint32_t highest_tsn;
2326 
2327 	/* set the rwnd */
2328 	sctp_set_rwnd(stcb, &stcb->asoc);
2329 
2330 	m = *mm;
2331 	SCTP_TCB_LOCK_ASSERT(stcb);
2332 	asoc = &stcb->asoc;
2333 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2334 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2335 	} else {
2336 		highest_tsn = asoc->highest_tsn_inside_map;
2337 	}
2338 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2339 	/*
2340 	 * setup where we got the last DATA packet from for any SACK that
2341 	 * may need to go out. Don't bump the net. This is done ONLY when a
2342 	 * chunk is assigned.
2343 	 */
2344 	asoc->last_data_chunk_from = net;
2345 
2346 	/*-
2347 	 * Now before we proceed we must figure out if this is a wasted
2348 	 * cluster... i.e. it is a small packet sent in and yet the driver
2349 	 * underneath allocated a full cluster for it. If so we must copy it
2350 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2351 	 * with cluster starvation. Note for __Panda__ we don't do this
2352 	 * since it has clusters all the way down to 64 bytes.
2353 	 */
2354 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2355 		/* we only handle mbufs that are singletons.. not chains */
2356 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2357 		if (m) {
2358 			/* ok lets see if we can copy the data up */
2359 			caddr_t *from, *to;
2360 
2361 			/* get the pointers and copy */
2362 			to = mtod(m, caddr_t *);
2363 			from = mtod((*mm), caddr_t *);
2364 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2365 			/* copy the length and free up the old */
2366 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2367 			sctp_m_freem(*mm);
2368 			/* sucess, back copy */
2369 			*mm = m;
2370 		} else {
2371 			/* We are in trouble in the mbuf world .. yikes */
2372 			m = *mm;
2373 		}
2374 	}
2375 	/* get pointer to the first chunk header */
2376 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2377 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2378 	if (ch == NULL) {
2379 		return (1);
2380 	}
2381 	/*
2382 	 * process all DATA chunks...
2383 	 */
2384 	*high_tsn = asoc->cumulative_tsn;
2385 	break_flag = 0;
2386 	asoc->data_pkts_seen++;
2387 	while (stop_proc == 0) {
2388 		/* validate chunk length */
2389 		chk_length = ntohs(ch->ch.chunk_length);
2390 		if (length - *offset < chk_length) {
2391 			/* all done, mutulated chunk */
2392 			stop_proc = 1;
2393 			continue;
2394 		}
2395 		if (ch->ch.chunk_type == SCTP_DATA) {
2396 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2397 				/*
2398 				 * Need to send an abort since we had a
2399 				 * invalid data chunk.
2400 				 */
2401 				struct mbuf *op_err;
2402 				char msg[SCTP_DIAG_INFO_LEN];
2403 
2404 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2405 				    chk_length);
2406 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2407 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2408 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2409 				return (2);
2410 			}
2411 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2412 				/*
2413 				 * Need to send an abort since we had an
2414 				 * empty data chunk.
2415 				 */
2416 				struct mbuf *op_err;
2417 
2418 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2419 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2420 				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2421 				return (2);
2422 			}
2423 #ifdef SCTP_AUDITING_ENABLED
2424 			sctp_audit_log(0xB1, 0);
2425 #endif
2426 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2427 				last_chunk = 1;
2428 			} else {
2429 				last_chunk = 0;
2430 			}
2431 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2432 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2433 			    last_chunk)) {
2434 				num_chunks++;
2435 			}
2436 			if (abort_flag)
2437 				return (2);
2438 
2439 			if (break_flag) {
2440 				/*
2441 				 * Set because of out of rwnd space and no
2442 				 * drop rep space left.
2443 				 */
2444 				stop_proc = 1;
2445 				continue;
2446 			}
2447 		} else {
2448 			/* not a data chunk in the data region */
2449 			switch (ch->ch.chunk_type) {
2450 			case SCTP_INITIATION:
2451 			case SCTP_INITIATION_ACK:
2452 			case SCTP_SELECTIVE_ACK:
2453 			case SCTP_NR_SELECTIVE_ACK:
2454 			case SCTP_HEARTBEAT_REQUEST:
2455 			case SCTP_HEARTBEAT_ACK:
2456 			case SCTP_ABORT_ASSOCIATION:
2457 			case SCTP_SHUTDOWN:
2458 			case SCTP_SHUTDOWN_ACK:
2459 			case SCTP_OPERATION_ERROR:
2460 			case SCTP_COOKIE_ECHO:
2461 			case SCTP_COOKIE_ACK:
2462 			case SCTP_ECN_ECHO:
2463 			case SCTP_ECN_CWR:
2464 			case SCTP_SHUTDOWN_COMPLETE:
2465 			case SCTP_AUTHENTICATION:
2466 			case SCTP_ASCONF_ACK:
2467 			case SCTP_PACKET_DROPPED:
2468 			case SCTP_STREAM_RESET:
2469 			case SCTP_FORWARD_CUM_TSN:
2470 			case SCTP_ASCONF:
2471 				/*
2472 				 * Now, what do we do with KNOWN chunks that
2473 				 * are NOT in the right place?
2474 				 *
2475 				 * For now, I do nothing but ignore them. We
2476 				 * may later want to add sysctl stuff to
2477 				 * switch out and do either an ABORT() or
2478 				 * possibly process them.
2479 				 */
2480 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2481 					struct mbuf *op_err;
2482 					char msg[SCTP_DIAG_INFO_LEN];
2483 
2484 					snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2485 					    ch->ch.chunk_type);
2486 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2487 					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2488 					return (2);
2489 				}
2490 				break;
2491 			default:
2492 				/* unknown chunk type, use bit rules */
2493 				if (ch->ch.chunk_type & 0x40) {
2494 					/* Add a error report to the queue */
2495 					struct mbuf *merr;
2496 					struct sctp_paramhdr *phd;
2497 
2498 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2499 					if (merr) {
2500 						phd = mtod(merr, struct sctp_paramhdr *);
2501 						/*
2502 						 * We cheat and use param
2503 						 * type since we did not
2504 						 * bother to define a error
2505 						 * cause struct. They are
2506 						 * the same basic format
2507 						 * with different names.
2508 						 */
2509 						phd->param_type =
2510 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2511 						phd->param_length =
2512 						    htons(chk_length + sizeof(*phd));
2513 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2514 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2515 						if (SCTP_BUF_NEXT(merr)) {
2516 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2517 								sctp_m_freem(merr);
2518 							} else {
2519 								sctp_queue_op_err(stcb, merr);
2520 							}
2521 						} else {
2522 							sctp_m_freem(merr);
2523 						}
2524 					}
2525 				}
2526 				if ((ch->ch.chunk_type & 0x80) == 0) {
2527 					/* discard the rest of this packet */
2528 					stop_proc = 1;
2529 				}	/* else skip this bad chunk and
2530 					 * continue... */
2531 				break;
2532 			}	/* switch of chunk type */
2533 		}
2534 		*offset += SCTP_SIZE32(chk_length);
2535 		if ((*offset >= length) || stop_proc) {
2536 			/* no more data left in the mbuf chain */
2537 			stop_proc = 1;
2538 			continue;
2539 		}
2540 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2541 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2542 		if (ch == NULL) {
2543 			*offset = length;
2544 			stop_proc = 1;
2545 			continue;
2546 		}
2547 	}
2548 	if (break_flag) {
2549 		/*
2550 		 * we need to report rwnd overrun drops.
2551 		 */
2552 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2553 	}
2554 	if (num_chunks) {
2555 		/*
2556 		 * Did we get data, if so update the time for auto-close and
2557 		 * give peer credit for being alive.
2558 		 */
2559 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2560 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2561 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2562 			    stcb->asoc.overall_error_count,
2563 			    0,
2564 			    SCTP_FROM_SCTP_INDATA,
2565 			    __LINE__);
2566 		}
2567 		stcb->asoc.overall_error_count = 0;
2568 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2569 	}
2570 	/* now service all of the reassm queue if needed */
2571 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2572 		sctp_service_queues(stcb, asoc);
2573 
2574 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2575 		/* Assure that we ack right away */
2576 		stcb->asoc.send_sack = 1;
2577 	}
2578 	/* Start a sack timer or QUEUE a SACK for sending */
2579 	sctp_sack_check(stcb, was_a_gap);
2580 	return (0);
2581 }
2582 
2583 static int
2584 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2585     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2586     int *num_frs,
2587     uint32_t * biggest_newly_acked_tsn,
2588     uint32_t * this_sack_lowest_newack,
2589     int *rto_ok)
2590 {
2591 	struct sctp_tmit_chunk *tp1;
2592 	unsigned int theTSN;
2593 	int j, wake_him = 0, circled = 0;
2594 
2595 	/* Recover the tp1 we last saw */
2596 	tp1 = *p_tp1;
2597 	if (tp1 == NULL) {
2598 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2599 	}
2600 	for (j = frag_strt; j <= frag_end; j++) {
2601 		theTSN = j + last_tsn;
2602 		while (tp1) {
2603 			if (tp1->rec.data.doing_fast_retransmit)
2604 				(*num_frs) += 1;
2605 
2606 			/*-
2607 			 * CMT: CUCv2 algorithm. For each TSN being
2608 			 * processed from the sent queue, track the
2609 			 * next expected pseudo-cumack, or
2610 			 * rtx_pseudo_cumack, if required. Separate
2611 			 * cumack trackers for first transmissions,
2612 			 * and retransmissions.
2613 			 */
2614 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2615 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2616 			    (tp1->snd_count == 1)) {
2617 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2618 				tp1->whoTo->find_pseudo_cumack = 0;
2619 			}
2620 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2621 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2622 			    (tp1->snd_count > 1)) {
2623 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2624 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2625 			}
2626 			if (tp1->rec.data.TSN_seq == theTSN) {
2627 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2628 					/*-
2629 					 * must be held until
2630 					 * cum-ack passes
2631 					 */
2632 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2633 						/*-
2634 						 * If it is less than RESEND, it is
2635 						 * now no-longer in flight.
2636 						 * Higher values may already be set
2637 						 * via previous Gap Ack Blocks...
2638 						 * i.e. ACKED or RESEND.
2639 						 */
2640 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2641 						    *biggest_newly_acked_tsn)) {
2642 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2643 						}
2644 						/*-
2645 						 * CMT: SFR algo (and HTNA) - set
2646 						 * saw_newack to 1 for dest being
2647 						 * newly acked. update
2648 						 * this_sack_highest_newack if
2649 						 * appropriate.
2650 						 */
2651 						if (tp1->rec.data.chunk_was_revoked == 0)
2652 							tp1->whoTo->saw_newack = 1;
2653 
2654 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2655 						    tp1->whoTo->this_sack_highest_newack)) {
2656 							tp1->whoTo->this_sack_highest_newack =
2657 							    tp1->rec.data.TSN_seq;
2658 						}
2659 						/*-
2660 						 * CMT DAC algo: also update
2661 						 * this_sack_lowest_newack
2662 						 */
2663 						if (*this_sack_lowest_newack == 0) {
2664 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2665 								sctp_log_sack(*this_sack_lowest_newack,
2666 								    last_tsn,
2667 								    tp1->rec.data.TSN_seq,
2668 								    0,
2669 								    0,
2670 								    SCTP_LOG_TSN_ACKED);
2671 							}
2672 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2673 						}
2674 						/*-
2675 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2676 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2677 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2678 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2679 						 * Separate pseudo_cumack trackers for first transmissions and
2680 						 * retransmissions.
2681 						 */
2682 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2683 							if (tp1->rec.data.chunk_was_revoked == 0) {
2684 								tp1->whoTo->new_pseudo_cumack = 1;
2685 							}
2686 							tp1->whoTo->find_pseudo_cumack = 1;
2687 						}
2688 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2689 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2690 						}
2691 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2692 							if (tp1->rec.data.chunk_was_revoked == 0) {
2693 								tp1->whoTo->new_pseudo_cumack = 1;
2694 							}
2695 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2696 						}
2697 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2698 							sctp_log_sack(*biggest_newly_acked_tsn,
2699 							    last_tsn,
2700 							    tp1->rec.data.TSN_seq,
2701 							    frag_strt,
2702 							    frag_end,
2703 							    SCTP_LOG_TSN_ACKED);
2704 						}
2705 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2706 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2707 							    tp1->whoTo->flight_size,
2708 							    tp1->book_size,
2709 							    (uintptr_t) tp1->whoTo,
2710 							    tp1->rec.data.TSN_seq);
2711 						}
2712 						sctp_flight_size_decrease(tp1);
2713 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2714 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2715 							    tp1);
2716 						}
2717 						sctp_total_flight_decrease(stcb, tp1);
2718 
2719 						tp1->whoTo->net_ack += tp1->send_size;
2720 						if (tp1->snd_count < 2) {
2721 							/*-
2722 							 * True non-retransmited chunk
2723 							 */
2724 							tp1->whoTo->net_ack2 += tp1->send_size;
2725 
2726 							/*-
2727 							 * update RTO too ?
2728 							 */
2729 							if (tp1->do_rtt) {
2730 								if (*rto_ok) {
2731 									tp1->whoTo->RTO =
2732 									    sctp_calculate_rto(stcb,
2733 									    &stcb->asoc,
2734 									    tp1->whoTo,
2735 									    &tp1->sent_rcv_time,
2736 									    sctp_align_safe_nocopy,
2737 									    SCTP_RTT_FROM_DATA);
2738 									*rto_ok = 0;
2739 								}
2740 								if (tp1->whoTo->rto_needed == 0) {
2741 									tp1->whoTo->rto_needed = 1;
2742 								}
2743 								tp1->do_rtt = 0;
2744 							}
2745 						}
2746 					}
2747 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2748 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2749 						    stcb->asoc.this_sack_highest_gap)) {
2750 							stcb->asoc.this_sack_highest_gap =
2751 							    tp1->rec.data.TSN_seq;
2752 						}
2753 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2754 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2755 #ifdef SCTP_AUDITING_ENABLED
2756 							sctp_audit_log(0xB2,
2757 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2758 #endif
2759 						}
2760 					}
2761 					/*-
2762 					 * All chunks NOT UNSENT fall through here and are marked
2763 					 * (leave PR-SCTP ones that are to skip alone though)
2764 					 */
2765 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2766 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2767 						tp1->sent = SCTP_DATAGRAM_MARKED;
2768 					}
2769 					if (tp1->rec.data.chunk_was_revoked) {
2770 						/* deflate the cwnd */
2771 						tp1->whoTo->cwnd -= tp1->book_size;
2772 						tp1->rec.data.chunk_was_revoked = 0;
2773 					}
2774 					/* NR Sack code here */
2775 					if (nr_sacking &&
2776 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2777 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2778 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2779 #ifdef INVARIANTS
2780 						} else {
2781 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2782 #endif
2783 						}
2784 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2785 						if (tp1->data) {
2786 							/*
2787 							 * sa_ignore
2788 							 * NO_NULL_CHK
2789 							 */
2790 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2791 							sctp_m_freem(tp1->data);
2792 							tp1->data = NULL;
2793 						}
2794 						wake_him++;
2795 					}
2796 				}
2797 				break;
2798 			}	/* if (tp1->TSN_seq == theTSN) */
2799 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2800 				break;
2801 			}
2802 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2803 			if ((tp1 == NULL) && (circled == 0)) {
2804 				circled++;
2805 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2806 			}
2807 		}		/* end while (tp1) */
2808 		if (tp1 == NULL) {
2809 			circled = 0;
2810 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2811 		}
2812 		/* In case the fragments were not in order we must reset */
2813 	}			/* end for (j = fragStart */
2814 	*p_tp1 = tp1;
2815 	return (wake_him);	/* Return value only used for nr-sack */
2816 }
2817 
2818 
2819 static int
2820 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2821     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2822     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2823     int num_seg, int num_nr_seg, int *rto_ok)
2824 {
2825 	struct sctp_gap_ack_block *frag, block;
2826 	struct sctp_tmit_chunk *tp1;
2827 	int i;
2828 	int num_frs = 0;
2829 	int chunk_freed;
2830 	int non_revocable;
2831 	uint16_t frag_strt, frag_end, prev_frag_end;
2832 
2833 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2834 	prev_frag_end = 0;
2835 	chunk_freed = 0;
2836 
2837 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2838 		if (i == num_seg) {
2839 			prev_frag_end = 0;
2840 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2841 		}
2842 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2843 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2844 		*offset += sizeof(block);
2845 		if (frag == NULL) {
2846 			return (chunk_freed);
2847 		}
2848 		frag_strt = ntohs(frag->start);
2849 		frag_end = ntohs(frag->end);
2850 
2851 		if (frag_strt > frag_end) {
2852 			/* This gap report is malformed, skip it. */
2853 			continue;
2854 		}
2855 		if (frag_strt <= prev_frag_end) {
2856 			/* This gap report is not in order, so restart. */
2857 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2858 		}
2859 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2860 			*biggest_tsn_acked = last_tsn + frag_end;
2861 		}
2862 		if (i < num_seg) {
2863 			non_revocable = 0;
2864 		} else {
2865 			non_revocable = 1;
2866 		}
2867 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2868 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2869 		    this_sack_lowest_newack, rto_ok)) {
2870 			chunk_freed = 1;
2871 		}
2872 		prev_frag_end = frag_end;
2873 	}
2874 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2875 		if (num_frs)
2876 			sctp_log_fr(*biggest_tsn_acked,
2877 			    *biggest_newly_acked_tsn,
2878 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2879 	}
2880 	return (chunk_freed);
2881 }
2882 
2883 static void
2884 sctp_check_for_revoked(struct sctp_tcb *stcb,
2885     struct sctp_association *asoc, uint32_t cumack,
2886     uint32_t biggest_tsn_acked)
2887 {
2888 	struct sctp_tmit_chunk *tp1;
2889 
2890 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2891 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2892 			/*
2893 			 * ok this guy is either ACK or MARKED. If it is
2894 			 * ACKED it has been previously acked but not this
2895 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2896 			 * again.
2897 			 */
2898 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2899 				break;
2900 			}
2901 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2902 				/* it has been revoked */
2903 				tp1->sent = SCTP_DATAGRAM_SENT;
2904 				tp1->rec.data.chunk_was_revoked = 1;
2905 				/*
2906 				 * We must add this stuff back in to assure
2907 				 * timers and such get started.
2908 				 */
2909 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2910 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2911 					    tp1->whoTo->flight_size,
2912 					    tp1->book_size,
2913 					    (uintptr_t) tp1->whoTo,
2914 					    tp1->rec.data.TSN_seq);
2915 				}
2916 				sctp_flight_size_increase(tp1);
2917 				sctp_total_flight_increase(stcb, tp1);
2918 				/*
2919 				 * We inflate the cwnd to compensate for our
2920 				 * artificial inflation of the flight_size.
2921 				 */
2922 				tp1->whoTo->cwnd += tp1->book_size;
2923 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2924 					sctp_log_sack(asoc->last_acked_seq,
2925 					    cumack,
2926 					    tp1->rec.data.TSN_seq,
2927 					    0,
2928 					    0,
2929 					    SCTP_LOG_TSN_REVOKED);
2930 				}
2931 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2932 				/* it has been re-acked in this SACK */
2933 				tp1->sent = SCTP_DATAGRAM_ACKED;
2934 			}
2935 		}
2936 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2937 			break;
2938 	}
2939 }
2940 
2941 
2942 static void
2943 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2944     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2945 {
2946 	struct sctp_tmit_chunk *tp1;
2947 	int strike_flag = 0;
2948 	struct timeval now;
2949 	int tot_retrans = 0;
2950 	uint32_t sending_seq;
2951 	struct sctp_nets *net;
2952 	int num_dests_sacked = 0;
2953 
2954 	/*
2955 	 * select the sending_seq, this is either the next thing ready to be
2956 	 * sent but not transmitted, OR, the next seq we assign.
2957 	 */
2958 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2959 	if (tp1 == NULL) {
2960 		sending_seq = asoc->sending_seq;
2961 	} else {
2962 		sending_seq = tp1->rec.data.TSN_seq;
2963 	}
2964 
2965 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2966 	if ((asoc->sctp_cmt_on_off > 0) &&
2967 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2968 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2969 			if (net->saw_newack)
2970 				num_dests_sacked++;
2971 		}
2972 	}
2973 	if (stcb->asoc.prsctp_supported) {
2974 		(void)SCTP_GETTIME_TIMEVAL(&now);
2975 	}
2976 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2977 		strike_flag = 0;
2978 		if (tp1->no_fr_allowed) {
2979 			/* this one had a timeout or something */
2980 			continue;
2981 		}
2982 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2983 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2984 				sctp_log_fr(biggest_tsn_newly_acked,
2985 				    tp1->rec.data.TSN_seq,
2986 				    tp1->sent,
2987 				    SCTP_FR_LOG_CHECK_STRIKE);
2988 		}
2989 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2990 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2991 			/* done */
2992 			break;
2993 		}
2994 		if (stcb->asoc.prsctp_supported) {
2995 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2996 				/* Is it expired? */
2997 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2998 					/* Yes so drop it */
2999 					if (tp1->data != NULL) {
3000 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3001 						    SCTP_SO_NOT_LOCKED);
3002 					}
3003 					continue;
3004 				}
3005 			}
3006 		}
3007 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3008 			/* we are beyond the tsn in the sack  */
3009 			break;
3010 		}
3011 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3012 			/* either a RESEND, ACKED, or MARKED */
3013 			/* skip */
3014 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3015 				/* Continue strikin FWD-TSN chunks */
3016 				tp1->rec.data.fwd_tsn_cnt++;
3017 			}
3018 			continue;
3019 		}
3020 		/*
3021 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3022 		 */
3023 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3024 			/*
3025 			 * No new acks were receieved for data sent to this
3026 			 * dest. Therefore, according to the SFR algo for
3027 			 * CMT, no data sent to this dest can be marked for
3028 			 * FR using this SACK.
3029 			 */
3030 			continue;
3031 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3032 		    tp1->whoTo->this_sack_highest_newack)) {
3033 			/*
3034 			 * CMT: New acks were receieved for data sent to
3035 			 * this dest. But no new acks were seen for data
3036 			 * sent after tp1. Therefore, according to the SFR
3037 			 * algo for CMT, tp1 cannot be marked for FR using
3038 			 * this SACK. This step covers part of the DAC algo
3039 			 * and the HTNA algo as well.
3040 			 */
3041 			continue;
3042 		}
3043 		/*
3044 		 * Here we check to see if we were have already done a FR
3045 		 * and if so we see if the biggest TSN we saw in the sack is
3046 		 * smaller than the recovery point. If so we don't strike
3047 		 * the tsn... otherwise we CAN strike the TSN.
3048 		 */
3049 		/*
3050 		 * @@@ JRI: Check for CMT if (accum_moved &&
3051 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3052 		 * 0)) {
3053 		 */
3054 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3055 			/*
3056 			 * Strike the TSN if in fast-recovery and cum-ack
3057 			 * moved.
3058 			 */
3059 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3060 				sctp_log_fr(biggest_tsn_newly_acked,
3061 				    tp1->rec.data.TSN_seq,
3062 				    tp1->sent,
3063 				    SCTP_FR_LOG_STRIKE_CHUNK);
3064 			}
3065 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3066 				tp1->sent++;
3067 			}
3068 			if ((asoc->sctp_cmt_on_off > 0) &&
3069 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3070 				/*
3071 				 * CMT DAC algorithm: If SACK flag is set to
3072 				 * 0, then lowest_newack test will not pass
3073 				 * because it would have been set to the
3074 				 * cumack earlier. If not already to be
3075 				 * rtx'd, If not a mixed sack and if tp1 is
3076 				 * not between two sacked TSNs, then mark by
3077 				 * one more. NOTE that we are marking by one
3078 				 * additional time since the SACK DAC flag
3079 				 * indicates that two packets have been
3080 				 * received after this missing TSN.
3081 				 */
3082 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3083 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3084 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3085 						sctp_log_fr(16 + num_dests_sacked,
3086 						    tp1->rec.data.TSN_seq,
3087 						    tp1->sent,
3088 						    SCTP_FR_LOG_STRIKE_CHUNK);
3089 					}
3090 					tp1->sent++;
3091 				}
3092 			}
3093 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3094 		    (asoc->sctp_cmt_on_off == 0)) {
3095 			/*
3096 			 * For those that have done a FR we must take
3097 			 * special consideration if we strike. I.e the
3098 			 * biggest_newly_acked must be higher than the
3099 			 * sending_seq at the time we did the FR.
3100 			 */
3101 			if (
3102 #ifdef SCTP_FR_TO_ALTERNATE
3103 			/*
3104 			 * If FR's go to new networks, then we must only do
3105 			 * this for singly homed asoc's. However if the FR's
3106 			 * go to the same network (Armando's work) then its
3107 			 * ok to FR multiple times.
3108 			 */
3109 			    (asoc->numnets < 2)
3110 #else
3111 			    (1)
3112 #endif
3113 			    ) {
3114 
3115 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3116 				    tp1->rec.data.fast_retran_tsn)) {
3117 					/*
3118 					 * Strike the TSN, since this ack is
3119 					 * beyond where things were when we
3120 					 * did a FR.
3121 					 */
3122 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3123 						sctp_log_fr(biggest_tsn_newly_acked,
3124 						    tp1->rec.data.TSN_seq,
3125 						    tp1->sent,
3126 						    SCTP_FR_LOG_STRIKE_CHUNK);
3127 					}
3128 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3129 						tp1->sent++;
3130 					}
3131 					strike_flag = 1;
3132 					if ((asoc->sctp_cmt_on_off > 0) &&
3133 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3134 						/*
3135 						 * CMT DAC algorithm: If
3136 						 * SACK flag is set to 0,
3137 						 * then lowest_newack test
3138 						 * will not pass because it
3139 						 * would have been set to
3140 						 * the cumack earlier. If
3141 						 * not already to be rtx'd,
3142 						 * If not a mixed sack and
3143 						 * if tp1 is not between two
3144 						 * sacked TSNs, then mark by
3145 						 * one more. NOTE that we
3146 						 * are marking by one
3147 						 * additional time since the
3148 						 * SACK DAC flag indicates
3149 						 * that two packets have
3150 						 * been received after this
3151 						 * missing TSN.
3152 						 */
3153 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3154 						    (num_dests_sacked == 1) &&
3155 						    SCTP_TSN_GT(this_sack_lowest_newack,
3156 						    tp1->rec.data.TSN_seq)) {
3157 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3158 								sctp_log_fr(32 + num_dests_sacked,
3159 								    tp1->rec.data.TSN_seq,
3160 								    tp1->sent,
3161 								    SCTP_FR_LOG_STRIKE_CHUNK);
3162 							}
3163 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3164 								tp1->sent++;
3165 							}
3166 						}
3167 					}
3168 				}
3169 			}
3170 			/*
3171 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3172 			 * algo covers HTNA.
3173 			 */
3174 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3175 		    biggest_tsn_newly_acked)) {
3176 			/*
3177 			 * We don't strike these: This is the  HTNA
3178 			 * algorithm i.e. we don't strike If our TSN is
3179 			 * larger than the Highest TSN Newly Acked.
3180 			 */
3181 			;
3182 		} else {
3183 			/* Strike the TSN */
3184 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3185 				sctp_log_fr(biggest_tsn_newly_acked,
3186 				    tp1->rec.data.TSN_seq,
3187 				    tp1->sent,
3188 				    SCTP_FR_LOG_STRIKE_CHUNK);
3189 			}
3190 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3191 				tp1->sent++;
3192 			}
3193 			if ((asoc->sctp_cmt_on_off > 0) &&
3194 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3195 				/*
3196 				 * CMT DAC algorithm: If SACK flag is set to
3197 				 * 0, then lowest_newack test will not pass
3198 				 * because it would have been set to the
3199 				 * cumack earlier. If not already to be
3200 				 * rtx'd, If not a mixed sack and if tp1 is
3201 				 * not between two sacked TSNs, then mark by
3202 				 * one more. NOTE that we are marking by one
3203 				 * additional time since the SACK DAC flag
3204 				 * indicates that two packets have been
3205 				 * received after this missing TSN.
3206 				 */
3207 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3208 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3209 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3210 						sctp_log_fr(48 + num_dests_sacked,
3211 						    tp1->rec.data.TSN_seq,
3212 						    tp1->sent,
3213 						    SCTP_FR_LOG_STRIKE_CHUNK);
3214 					}
3215 					tp1->sent++;
3216 				}
3217 			}
3218 		}
3219 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3220 			struct sctp_nets *alt;
3221 
3222 			/* fix counts and things */
3223 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3224 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3225 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3226 				    tp1->book_size,
3227 				    (uintptr_t) tp1->whoTo,
3228 				    tp1->rec.data.TSN_seq);
3229 			}
3230 			if (tp1->whoTo) {
3231 				tp1->whoTo->net_ack++;
3232 				sctp_flight_size_decrease(tp1);
3233 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3234 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3235 					    tp1);
3236 				}
3237 			}
3238 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3239 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3240 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3241 			}
3242 			/* add back to the rwnd */
3243 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3244 
3245 			/* remove from the total flight */
3246 			sctp_total_flight_decrease(stcb, tp1);
3247 
3248 			if ((stcb->asoc.prsctp_supported) &&
3249 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3250 				/*
3251 				 * Has it been retransmitted tv_sec times? -
3252 				 * we store the retran count there.
3253 				 */
3254 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3255 					/* Yes, so drop it */
3256 					if (tp1->data != NULL) {
3257 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3258 						    SCTP_SO_NOT_LOCKED);
3259 					}
3260 					/* Make sure to flag we had a FR */
3261 					tp1->whoTo->net_ack++;
3262 					continue;
3263 				}
3264 			}
3265 			/*
3266 			 * SCTP_PRINTF("OK, we are now ready to FR this
3267 			 * guy\n");
3268 			 */
3269 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3270 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3271 				    0, SCTP_FR_MARKED);
3272 			}
3273 			if (strike_flag) {
3274 				/* This is a subsequent FR */
3275 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3276 			}
3277 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3278 			if (asoc->sctp_cmt_on_off > 0) {
3279 				/*
3280 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3281 				 * If CMT is being used, then pick dest with
3282 				 * largest ssthresh for any retransmission.
3283 				 */
3284 				tp1->no_fr_allowed = 1;
3285 				alt = tp1->whoTo;
3286 				/* sa_ignore NO_NULL_CHK */
3287 				if (asoc->sctp_cmt_pf > 0) {
3288 					/*
3289 					 * JRS 5/18/07 - If CMT PF is on,
3290 					 * use the PF version of
3291 					 * find_alt_net()
3292 					 */
3293 					alt = sctp_find_alternate_net(stcb, alt, 2);
3294 				} else {
3295 					/*
3296 					 * JRS 5/18/07 - If only CMT is on,
3297 					 * use the CMT version of
3298 					 * find_alt_net()
3299 					 */
3300 					/* sa_ignore NO_NULL_CHK */
3301 					alt = sctp_find_alternate_net(stcb, alt, 1);
3302 				}
3303 				if (alt == NULL) {
3304 					alt = tp1->whoTo;
3305 				}
3306 				/*
3307 				 * CUCv2: If a different dest is picked for
3308 				 * the retransmission, then new
3309 				 * (rtx-)pseudo_cumack needs to be tracked
3310 				 * for orig dest. Let CUCv2 track new (rtx-)
3311 				 * pseudo-cumack always.
3312 				 */
3313 				if (tp1->whoTo) {
3314 					tp1->whoTo->find_pseudo_cumack = 1;
3315 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3316 				}
3317 			} else {/* CMT is OFF */
3318 
3319 #ifdef SCTP_FR_TO_ALTERNATE
3320 				/* Can we find an alternate? */
3321 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3322 #else
3323 				/*
3324 				 * default behavior is to NOT retransmit
3325 				 * FR's to an alternate. Armando Caro's
3326 				 * paper details why.
3327 				 */
3328 				alt = tp1->whoTo;
3329 #endif
3330 			}
3331 
3332 			tp1->rec.data.doing_fast_retransmit = 1;
3333 			tot_retrans++;
3334 			/* mark the sending seq for possible subsequent FR's */
3335 			/*
3336 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3337 			 * (uint32_t)tpi->rec.data.TSN_seq);
3338 			 */
3339 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3340 				/*
3341 				 * If the queue of send is empty then its
3342 				 * the next sequence number that will be
3343 				 * assigned so we subtract one from this to
3344 				 * get the one we last sent.
3345 				 */
3346 				tp1->rec.data.fast_retran_tsn = sending_seq;
3347 			} else {
3348 				/*
3349 				 * If there are chunks on the send queue
3350 				 * (unsent data that has made it from the
3351 				 * stream queues but not out the door, we
3352 				 * take the first one (which will have the
3353 				 * lowest TSN) and subtract one to get the
3354 				 * one we last sent.
3355 				 */
3356 				struct sctp_tmit_chunk *ttt;
3357 
3358 				ttt = TAILQ_FIRST(&asoc->send_queue);
3359 				tp1->rec.data.fast_retran_tsn =
3360 				    ttt->rec.data.TSN_seq;
3361 			}
3362 
3363 			if (tp1->do_rtt) {
3364 				/*
3365 				 * this guy had a RTO calculation pending on
3366 				 * it, cancel it
3367 				 */
3368 				if ((tp1->whoTo != NULL) &&
3369 				    (tp1->whoTo->rto_needed == 0)) {
3370 					tp1->whoTo->rto_needed = 1;
3371 				}
3372 				tp1->do_rtt = 0;
3373 			}
3374 			if (alt != tp1->whoTo) {
3375 				/* yes, there is an alternate. */
3376 				sctp_free_remote_addr(tp1->whoTo);
3377 				/* sa_ignore FREED_MEMORY */
3378 				tp1->whoTo = alt;
3379 				atomic_add_int(&alt->ref_count, 1);
3380 			}
3381 		}
3382 	}
3383 }
3384 
3385 struct sctp_tmit_chunk *
3386 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3387     struct sctp_association *asoc)
3388 {
3389 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3390 	struct timeval now;
3391 	int now_filled = 0;
3392 
3393 	if (asoc->prsctp_supported == 0) {
3394 		return (NULL);
3395 	}
3396 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3397 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3398 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3399 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3400 			/* no chance to advance, out of here */
3401 			break;
3402 		}
3403 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3404 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3405 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3406 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3407 				    asoc->advanced_peer_ack_point,
3408 				    tp1->rec.data.TSN_seq, 0, 0);
3409 			}
3410 		}
3411 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3412 			/*
3413 			 * We can't fwd-tsn past any that are reliable aka
3414 			 * retransmitted until the asoc fails.
3415 			 */
3416 			break;
3417 		}
3418 		if (!now_filled) {
3419 			(void)SCTP_GETTIME_TIMEVAL(&now);
3420 			now_filled = 1;
3421 		}
3422 		/*
3423 		 * now we got a chunk which is marked for another
3424 		 * retransmission to a PR-stream but has run out its chances
3425 		 * already maybe OR has been marked to skip now. Can we skip
3426 		 * it if its a resend?
3427 		 */
3428 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3429 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3430 			/*
3431 			 * Now is this one marked for resend and its time is
3432 			 * now up?
3433 			 */
3434 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3435 				/* Yes so drop it */
3436 				if (tp1->data) {
3437 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3438 					    1, SCTP_SO_NOT_LOCKED);
3439 				}
3440 			} else {
3441 				/*
3442 				 * No, we are done when hit one for resend
3443 				 * whos time as not expired.
3444 				 */
3445 				break;
3446 			}
3447 		}
3448 		/*
3449 		 * Ok now if this chunk is marked to drop it we can clean up
3450 		 * the chunk, advance our peer ack point and we can check
3451 		 * the next chunk.
3452 		 */
3453 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3454 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3455 			/* advance PeerAckPoint goes forward */
3456 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3457 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3458 				a_adv = tp1;
3459 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3460 				/* No update but we do save the chk */
3461 				a_adv = tp1;
3462 			}
3463 		} else {
3464 			/*
3465 			 * If it is still in RESEND we can advance no
3466 			 * further
3467 			 */
3468 			break;
3469 		}
3470 	}
3471 	return (a_adv);
3472 }
3473 
3474 static int
3475 sctp_fs_audit(struct sctp_association *asoc)
3476 {
3477 	struct sctp_tmit_chunk *chk;
3478 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3479 	int ret;
3480 
3481 #ifndef INVARIANTS
3482 	int entry_flight, entry_cnt;
3483 
3484 #endif
3485 
3486 	ret = 0;
3487 #ifndef INVARIANTS
3488 	entry_flight = asoc->total_flight;
3489 	entry_cnt = asoc->total_flight_count;
3490 #endif
3491 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3492 		return (0);
3493 
3494 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3495 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3496 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3497 			    chk->rec.data.TSN_seq,
3498 			    chk->send_size,
3499 			    chk->snd_count);
3500 			inflight++;
3501 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3502 			resend++;
3503 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3504 			inbetween++;
3505 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3506 			above++;
3507 		} else {
3508 			acked++;
3509 		}
3510 	}
3511 
3512 	if ((inflight > 0) || (inbetween > 0)) {
3513 #ifdef INVARIANTS
3514 		panic("Flight size-express incorrect? \n");
3515 #else
3516 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3517 		    entry_flight, entry_cnt);
3518 
3519 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3520 		    inflight, inbetween, resend, above, acked);
3521 		ret = 1;
3522 #endif
3523 	}
3524 	return (ret);
3525 }
3526 
3527 
3528 static void
3529 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3530     struct sctp_association *asoc,
3531     struct sctp_tmit_chunk *tp1)
3532 {
3533 	tp1->window_probe = 0;
3534 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3535 		/* TSN's skipped we do NOT move back. */
3536 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3537 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3538 		    tp1->book_size,
3539 		    (uintptr_t) tp1->whoTo,
3540 		    tp1->rec.data.TSN_seq);
3541 		return;
3542 	}
3543 	/* First setup this by shrinking flight */
3544 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3545 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3546 		    tp1);
3547 	}
3548 	sctp_flight_size_decrease(tp1);
3549 	sctp_total_flight_decrease(stcb, tp1);
3550 	/* Now mark for resend */
3551 	tp1->sent = SCTP_DATAGRAM_RESEND;
3552 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3553 
3554 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3555 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3556 		    tp1->whoTo->flight_size,
3557 		    tp1->book_size,
3558 		    (uintptr_t) tp1->whoTo,
3559 		    tp1->rec.data.TSN_seq);
3560 	}
3561 }
3562 
3563 void
3564 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3565     uint32_t rwnd, int *abort_now, int ecne_seen)
3566 {
3567 	struct sctp_nets *net;
3568 	struct sctp_association *asoc;
3569 	struct sctp_tmit_chunk *tp1, *tp2;
3570 	uint32_t old_rwnd;
3571 	int win_probe_recovery = 0;
3572 	int win_probe_recovered = 0;
3573 	int j, done_once = 0;
3574 	int rto_ok = 1;
3575 
3576 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3577 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3578 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3579 	}
3580 	SCTP_TCB_LOCK_ASSERT(stcb);
3581 #ifdef SCTP_ASOCLOG_OF_TSNS
3582 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3583 	stcb->asoc.cumack_log_at++;
3584 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3585 		stcb->asoc.cumack_log_at = 0;
3586 	}
3587 #endif
3588 	asoc = &stcb->asoc;
3589 	old_rwnd = asoc->peers_rwnd;
3590 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3591 		/* old ack */
3592 		return;
3593 	} else if (asoc->last_acked_seq == cumack) {
3594 		/* Window update sack */
3595 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3596 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3597 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3598 			/* SWS sender side engages */
3599 			asoc->peers_rwnd = 0;
3600 		}
3601 		if (asoc->peers_rwnd > old_rwnd) {
3602 			goto again;
3603 		}
3604 		return;
3605 	}
3606 	/* First setup for CC stuff */
3607 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3608 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3609 			/* Drag along the window_tsn for cwr's */
3610 			net->cwr_window_tsn = cumack;
3611 		}
3612 		net->prev_cwnd = net->cwnd;
3613 		net->net_ack = 0;
3614 		net->net_ack2 = 0;
3615 
3616 		/*
3617 		 * CMT: Reset CUC and Fast recovery algo variables before
3618 		 * SACK processing
3619 		 */
3620 		net->new_pseudo_cumack = 0;
3621 		net->will_exit_fast_recovery = 0;
3622 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3623 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3624 		}
3625 	}
3626 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3627 		uint32_t send_s;
3628 
3629 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3630 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3631 			    sctpchunk_listhead);
3632 			send_s = tp1->rec.data.TSN_seq + 1;
3633 		} else {
3634 			send_s = asoc->sending_seq;
3635 		}
3636 		if (SCTP_TSN_GE(cumack, send_s)) {
3637 			struct mbuf *op_err;
3638 			char msg[SCTP_DIAG_INFO_LEN];
3639 
3640 			*abort_now = 1;
3641 			/* XXX */
3642 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3643 			    cumack, send_s);
3644 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3645 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3646 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3647 			return;
3648 		}
3649 	}
3650 	asoc->this_sack_highest_gap = cumack;
3651 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3652 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3653 		    stcb->asoc.overall_error_count,
3654 		    0,
3655 		    SCTP_FROM_SCTP_INDATA,
3656 		    __LINE__);
3657 	}
3658 	stcb->asoc.overall_error_count = 0;
3659 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3660 		/* process the new consecutive TSN first */
3661 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3662 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3663 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3664 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3665 				}
3666 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3667 					/*
3668 					 * If it is less than ACKED, it is
3669 					 * now no-longer in flight. Higher
3670 					 * values may occur during marking
3671 					 */
3672 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3673 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3674 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3675 							    tp1->whoTo->flight_size,
3676 							    tp1->book_size,
3677 							    (uintptr_t) tp1->whoTo,
3678 							    tp1->rec.data.TSN_seq);
3679 						}
3680 						sctp_flight_size_decrease(tp1);
3681 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3682 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3683 							    tp1);
3684 						}
3685 						/* sa_ignore NO_NULL_CHK */
3686 						sctp_total_flight_decrease(stcb, tp1);
3687 					}
3688 					tp1->whoTo->net_ack += tp1->send_size;
3689 					if (tp1->snd_count < 2) {
3690 						/*
3691 						 * True non-retransmited
3692 						 * chunk
3693 						 */
3694 						tp1->whoTo->net_ack2 +=
3695 						    tp1->send_size;
3696 
3697 						/* update RTO too? */
3698 						if (tp1->do_rtt) {
3699 							if (rto_ok) {
3700 								tp1->whoTo->RTO =
3701 								/*
3702 								 * sa_ignore
3703 								 * NO_NULL_CH
3704 								 * K
3705 								 */
3706 								    sctp_calculate_rto(stcb,
3707 								    asoc, tp1->whoTo,
3708 								    &tp1->sent_rcv_time,
3709 								    sctp_align_safe_nocopy,
3710 								    SCTP_RTT_FROM_DATA);
3711 								rto_ok = 0;
3712 							}
3713 							if (tp1->whoTo->rto_needed == 0) {
3714 								tp1->whoTo->rto_needed = 1;
3715 							}
3716 							tp1->do_rtt = 0;
3717 						}
3718 					}
3719 					/*
3720 					 * CMT: CUCv2 algorithm. From the
3721 					 * cumack'd TSNs, for each TSN being
3722 					 * acked for the first time, set the
3723 					 * following variables for the
3724 					 * corresp destination.
3725 					 * new_pseudo_cumack will trigger a
3726 					 * cwnd update.
3727 					 * find_(rtx_)pseudo_cumack will
3728 					 * trigger search for the next
3729 					 * expected (rtx-)pseudo-cumack.
3730 					 */
3731 					tp1->whoTo->new_pseudo_cumack = 1;
3732 					tp1->whoTo->find_pseudo_cumack = 1;
3733 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3734 
3735 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3736 						/* sa_ignore NO_NULL_CHK */
3737 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3738 					}
3739 				}
3740 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3741 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3742 				}
3743 				if (tp1->rec.data.chunk_was_revoked) {
3744 					/* deflate the cwnd */
3745 					tp1->whoTo->cwnd -= tp1->book_size;
3746 					tp1->rec.data.chunk_was_revoked = 0;
3747 				}
3748 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3749 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3750 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3751 #ifdef INVARIANTS
3752 					} else {
3753 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3754 #endif
3755 					}
3756 				}
3757 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3758 				if (tp1->data) {
3759 					/* sa_ignore NO_NULL_CHK */
3760 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3761 					sctp_m_freem(tp1->data);
3762 					tp1->data = NULL;
3763 				}
3764 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3765 					sctp_log_sack(asoc->last_acked_seq,
3766 					    cumack,
3767 					    tp1->rec.data.TSN_seq,
3768 					    0,
3769 					    0,
3770 					    SCTP_LOG_FREE_SENT);
3771 				}
3772 				asoc->sent_queue_cnt--;
3773 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3774 			} else {
3775 				break;
3776 			}
3777 		}
3778 
3779 	}
3780 	/* sa_ignore NO_NULL_CHK */
3781 	if (stcb->sctp_socket) {
3782 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3783 		struct socket *so;
3784 
3785 #endif
3786 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3787 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3788 			/* sa_ignore NO_NULL_CHK */
3789 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3790 		}
3791 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3792 		so = SCTP_INP_SO(stcb->sctp_ep);
3793 		atomic_add_int(&stcb->asoc.refcnt, 1);
3794 		SCTP_TCB_UNLOCK(stcb);
3795 		SCTP_SOCKET_LOCK(so, 1);
3796 		SCTP_TCB_LOCK(stcb);
3797 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3798 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3799 			/* assoc was freed while we were unlocked */
3800 			SCTP_SOCKET_UNLOCK(so, 1);
3801 			return;
3802 		}
3803 #endif
3804 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3805 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3806 		SCTP_SOCKET_UNLOCK(so, 1);
3807 #endif
3808 	} else {
3809 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3810 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3811 		}
3812 	}
3813 
3814 	/* JRS - Use the congestion control given in the CC module */
3815 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3816 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3817 			if (net->net_ack2 > 0) {
3818 				/*
3819 				 * Karn's rule applies to clearing error
3820 				 * count, this is optional.
3821 				 */
3822 				net->error_count = 0;
3823 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3824 					/* addr came good */
3825 					net->dest_state |= SCTP_ADDR_REACHABLE;
3826 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3827 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3828 				}
3829 				if (net == stcb->asoc.primary_destination) {
3830 					if (stcb->asoc.alternate) {
3831 						/*
3832 						 * release the alternate,
3833 						 * primary is good
3834 						 */
3835 						sctp_free_remote_addr(stcb->asoc.alternate);
3836 						stcb->asoc.alternate = NULL;
3837 					}
3838 				}
3839 				if (net->dest_state & SCTP_ADDR_PF) {
3840 					net->dest_state &= ~SCTP_ADDR_PF;
3841 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3842 					    stcb->sctp_ep, stcb, net,
3843 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3844 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3845 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3846 					/* Done with this net */
3847 					net->net_ack = 0;
3848 				}
3849 				/* restore any doubled timers */
3850 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3851 				if (net->RTO < stcb->asoc.minrto) {
3852 					net->RTO = stcb->asoc.minrto;
3853 				}
3854 				if (net->RTO > stcb->asoc.maxrto) {
3855 					net->RTO = stcb->asoc.maxrto;
3856 				}
3857 			}
3858 		}
3859 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3860 	}
3861 	asoc->last_acked_seq = cumack;
3862 
3863 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3864 		/* nothing left in-flight */
3865 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3866 			net->flight_size = 0;
3867 			net->partial_bytes_acked = 0;
3868 		}
3869 		asoc->total_flight = 0;
3870 		asoc->total_flight_count = 0;
3871 	}
3872 	/* RWND update */
3873 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3874 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3875 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3876 		/* SWS sender side engages */
3877 		asoc->peers_rwnd = 0;
3878 	}
3879 	if (asoc->peers_rwnd > old_rwnd) {
3880 		win_probe_recovery = 1;
3881 	}
3882 	/* Now assure a timer where data is queued at */
3883 again:
3884 	j = 0;
3885 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3886 		int to_ticks;
3887 
3888 		if (win_probe_recovery && (net->window_probe)) {
3889 			win_probe_recovered = 1;
3890 			/*
3891 			 * Find first chunk that was used with window probe
3892 			 * and clear the sent
3893 			 */
3894 			/* sa_ignore FREED_MEMORY */
3895 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3896 				if (tp1->window_probe) {
3897 					/* move back to data send queue */
3898 					sctp_window_probe_recovery(stcb, asoc, tp1);
3899 					break;
3900 				}
3901 			}
3902 		}
3903 		if (net->RTO == 0) {
3904 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3905 		} else {
3906 			to_ticks = MSEC_TO_TICKS(net->RTO);
3907 		}
3908 		if (net->flight_size) {
3909 			j++;
3910 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3911 			    sctp_timeout_handler, &net->rxt_timer);
3912 			if (net->window_probe) {
3913 				net->window_probe = 0;
3914 			}
3915 		} else {
3916 			if (net->window_probe) {
3917 				/*
3918 				 * In window probes we must assure a timer
3919 				 * is still running there
3920 				 */
3921 				net->window_probe = 0;
3922 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3923 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3924 					    sctp_timeout_handler, &net->rxt_timer);
3925 				}
3926 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3927 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3928 				    stcb, net,
3929 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3930 			}
3931 		}
3932 	}
3933 	if ((j == 0) &&
3934 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3935 	    (asoc->sent_queue_retran_cnt == 0) &&
3936 	    (win_probe_recovered == 0) &&
3937 	    (done_once == 0)) {
3938 		/*
3939 		 * huh, this should not happen unless all packets are
3940 		 * PR-SCTP and marked to skip of course.
3941 		 */
3942 		if (sctp_fs_audit(asoc)) {
3943 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3944 				net->flight_size = 0;
3945 			}
3946 			asoc->total_flight = 0;
3947 			asoc->total_flight_count = 0;
3948 			asoc->sent_queue_retran_cnt = 0;
3949 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3950 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3951 					sctp_flight_size_increase(tp1);
3952 					sctp_total_flight_increase(stcb, tp1);
3953 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3954 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3955 				}
3956 			}
3957 		}
3958 		done_once = 1;
3959 		goto again;
3960 	}
3961 	/**********************************/
3962 	/* Now what about shutdown issues */
3963 	/**********************************/
3964 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3965 		/* nothing left on sendqueue.. consider done */
3966 		/* clean up */
3967 		if ((asoc->stream_queue_cnt == 1) &&
3968 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3969 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3970 		    (asoc->locked_on_sending)
3971 		    ) {
3972 			struct sctp_stream_queue_pending *sp;
3973 
3974 			/*
3975 			 * I may be in a state where we got all across.. but
3976 			 * cannot write more due to a shutdown... we abort
3977 			 * since the user did not indicate EOR in this case.
3978 			 * The sp will be cleaned during free of the asoc.
3979 			 */
3980 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3981 			    sctp_streamhead);
3982 			if ((sp) && (sp->length == 0)) {
3983 				/* Let cleanup code purge it */
3984 				if (sp->msg_is_complete) {
3985 					asoc->stream_queue_cnt--;
3986 				} else {
3987 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3988 					asoc->locked_on_sending = NULL;
3989 					asoc->stream_queue_cnt--;
3990 				}
3991 			}
3992 		}
3993 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3994 		    (asoc->stream_queue_cnt == 0)) {
3995 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3996 				/* Need to abort here */
3997 				struct mbuf *op_err;
3998 
3999 		abort_out_now:
4000 				*abort_now = 1;
4001 				/* XXX */
4002 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4003 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
4004 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4005 			} else {
4006 				struct sctp_nets *netp;
4007 
4008 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4009 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4010 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4011 				}
4012 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4013 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4014 				sctp_stop_timers_for_shutdown(stcb);
4015 				if (asoc->alternate) {
4016 					netp = asoc->alternate;
4017 				} else {
4018 					netp = asoc->primary_destination;
4019 				}
4020 				sctp_send_shutdown(stcb, netp);
4021 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4022 				    stcb->sctp_ep, stcb, netp);
4023 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4024 				    stcb->sctp_ep, stcb, netp);
4025 			}
4026 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4027 		    (asoc->stream_queue_cnt == 0)) {
4028 			struct sctp_nets *netp;
4029 
4030 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4031 				goto abort_out_now;
4032 			}
4033 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4034 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4035 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4036 			sctp_stop_timers_for_shutdown(stcb);
4037 			if (asoc->alternate) {
4038 				netp = asoc->alternate;
4039 			} else {
4040 				netp = asoc->primary_destination;
4041 			}
4042 			sctp_send_shutdown_ack(stcb, netp);
4043 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4044 			    stcb->sctp_ep, stcb, netp);
4045 		}
4046 	}
4047 	/*********************************************/
4048 	/* Here we perform PR-SCTP procedures        */
4049 	/* (section 4.2)                             */
4050 	/*********************************************/
4051 	/* C1. update advancedPeerAckPoint */
4052 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4053 		asoc->advanced_peer_ack_point = cumack;
4054 	}
4055 	/* PR-Sctp issues need to be addressed too */
4056 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4057 		struct sctp_tmit_chunk *lchk;
4058 		uint32_t old_adv_peer_ack_point;
4059 
4060 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4061 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4062 		/* C3. See if we need to send a Fwd-TSN */
4063 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4064 			/*
4065 			 * ISSUE with ECN, see FWD-TSN processing.
4066 			 */
4067 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4068 				send_forward_tsn(stcb, asoc);
4069 			} else if (lchk) {
4070 				/* try to FR fwd-tsn's that get lost too */
4071 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4072 					send_forward_tsn(stcb, asoc);
4073 				}
4074 			}
4075 		}
4076 		if (lchk) {
4077 			/* Assure a timer is up */
4078 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4079 			    stcb->sctp_ep, stcb, lchk->whoTo);
4080 		}
4081 	}
4082 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4083 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4084 		    rwnd,
4085 		    stcb->asoc.peers_rwnd,
4086 		    stcb->asoc.total_flight,
4087 		    stcb->asoc.total_output_queue_size);
4088 	}
4089 }
4090 
4091 void
4092 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4093     struct sctp_tcb *stcb,
4094     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4095     int *abort_now, uint8_t flags,
4096     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4097 {
4098 	struct sctp_association *asoc;
4099 	struct sctp_tmit_chunk *tp1, *tp2;
4100 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4101 	uint16_t wake_him = 0;
4102 	uint32_t send_s = 0;
4103 	long j;
4104 	int accum_moved = 0;
4105 	int will_exit_fast_recovery = 0;
4106 	uint32_t a_rwnd, old_rwnd;
4107 	int win_probe_recovery = 0;
4108 	int win_probe_recovered = 0;
4109 	struct sctp_nets *net = NULL;
4110 	int done_once;
4111 	int rto_ok = 1;
4112 	uint8_t reneged_all = 0;
4113 	uint8_t cmt_dac_flag;
4114 
4115 	/*
4116 	 * we take any chance we can to service our queues since we cannot
4117 	 * get awoken when the socket is read from :<
4118 	 */
4119 	/*
4120 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4121 	 * old sack, if so discard. 2) If there is nothing left in the send
4122 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4123 	 * too, update any rwnd change and verify no timers are running.
4124 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4125 	 * moved process these first and note that it moved. 4) Process any
4126 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4127 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4128 	 * sync up flightsizes and things, stop all timers and also check
4129 	 * for shutdown_pending state. If so then go ahead and send off the
4130 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4131 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4132 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4133 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4134 	 * if in shutdown_recv state.
4135 	 */
4136 	SCTP_TCB_LOCK_ASSERT(stcb);
4137 	/* CMT DAC algo */
4138 	this_sack_lowest_newack = 0;
4139 	SCTP_STAT_INCR(sctps_slowpath_sack);
4140 	last_tsn = cum_ack;
4141 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4142 #ifdef SCTP_ASOCLOG_OF_TSNS
4143 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4144 	stcb->asoc.cumack_log_at++;
4145 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4146 		stcb->asoc.cumack_log_at = 0;
4147 	}
4148 #endif
4149 	a_rwnd = rwnd;
4150 
4151 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4152 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4153 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4154 	}
4155 	old_rwnd = stcb->asoc.peers_rwnd;
4156 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4157 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4158 		    stcb->asoc.overall_error_count,
4159 		    0,
4160 		    SCTP_FROM_SCTP_INDATA,
4161 		    __LINE__);
4162 	}
4163 	stcb->asoc.overall_error_count = 0;
4164 	asoc = &stcb->asoc;
4165 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4166 		sctp_log_sack(asoc->last_acked_seq,
4167 		    cum_ack,
4168 		    0,
4169 		    num_seg,
4170 		    num_dup,
4171 		    SCTP_LOG_NEW_SACK);
4172 	}
4173 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4174 		uint16_t i;
4175 		uint32_t *dupdata, dblock;
4176 
4177 		for (i = 0; i < num_dup; i++) {
4178 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4179 			    sizeof(uint32_t), (uint8_t *) & dblock);
4180 			if (dupdata == NULL) {
4181 				break;
4182 			}
4183 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4184 		}
4185 	}
4186 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4187 		/* reality check */
4188 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4189 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4190 			    sctpchunk_listhead);
4191 			send_s = tp1->rec.data.TSN_seq + 1;
4192 		} else {
4193 			tp1 = NULL;
4194 			send_s = asoc->sending_seq;
4195 		}
4196 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4197 			struct mbuf *op_err;
4198 			char msg[SCTP_DIAG_INFO_LEN];
4199 
4200 			/*
4201 			 * no way, we have not even sent this TSN out yet.
4202 			 * Peer is hopelessly messed up with us.
4203 			 */
4204 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4205 			    cum_ack, send_s);
4206 			if (tp1) {
4207 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4208 				    tp1->rec.data.TSN_seq, (void *)tp1);
4209 			}
4210 	hopeless_peer:
4211 			*abort_now = 1;
4212 			/* XXX */
4213 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4214 			    cum_ack, send_s);
4215 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4216 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4217 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4218 			return;
4219 		}
4220 	}
4221 	/**********************/
4222 	/* 1) check the range */
4223 	/**********************/
4224 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4225 		/* acking something behind */
4226 		return;
4227 	}
4228 	/* update the Rwnd of the peer */
4229 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4230 	    TAILQ_EMPTY(&asoc->send_queue) &&
4231 	    (asoc->stream_queue_cnt == 0)) {
4232 		/* nothing left on send/sent and strmq */
4233 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4234 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4235 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4236 		}
4237 		asoc->peers_rwnd = a_rwnd;
4238 		if (asoc->sent_queue_retran_cnt) {
4239 			asoc->sent_queue_retran_cnt = 0;
4240 		}
4241 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4242 			/* SWS sender side engages */
4243 			asoc->peers_rwnd = 0;
4244 		}
4245 		/* stop any timers */
4246 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4248 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4249 			net->partial_bytes_acked = 0;
4250 			net->flight_size = 0;
4251 		}
4252 		asoc->total_flight = 0;
4253 		asoc->total_flight_count = 0;
4254 		return;
4255 	}
4256 	/*
4257 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4258 	 * things. The total byte count acked is tracked in netAckSz AND
4259 	 * netAck2 is used to track the total bytes acked that are un-
4260 	 * amibguious and were never retransmitted. We track these on a per
4261 	 * destination address basis.
4262 	 */
4263 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4264 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4265 			/* Drag along the window_tsn for cwr's */
4266 			net->cwr_window_tsn = cum_ack;
4267 		}
4268 		net->prev_cwnd = net->cwnd;
4269 		net->net_ack = 0;
4270 		net->net_ack2 = 0;
4271 
4272 		/*
4273 		 * CMT: Reset CUC and Fast recovery algo variables before
4274 		 * SACK processing
4275 		 */
4276 		net->new_pseudo_cumack = 0;
4277 		net->will_exit_fast_recovery = 0;
4278 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4279 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4280 		}
4281 	}
4282 	/* process the new consecutive TSN first */
4283 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4284 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4285 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4286 				accum_moved = 1;
4287 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4288 					/*
4289 					 * If it is less than ACKED, it is
4290 					 * now no-longer in flight. Higher
4291 					 * values may occur during marking
4292 					 */
4293 					if ((tp1->whoTo->dest_state &
4294 					    SCTP_ADDR_UNCONFIRMED) &&
4295 					    (tp1->snd_count < 2)) {
4296 						/*
4297 						 * If there was no retran
4298 						 * and the address is
4299 						 * un-confirmed and we sent
4300 						 * there and are now
4301 						 * sacked.. its confirmed,
4302 						 * mark it so.
4303 						 */
4304 						tp1->whoTo->dest_state &=
4305 						    ~SCTP_ADDR_UNCONFIRMED;
4306 					}
4307 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4308 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4309 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4310 							    tp1->whoTo->flight_size,
4311 							    tp1->book_size,
4312 							    (uintptr_t) tp1->whoTo,
4313 							    tp1->rec.data.TSN_seq);
4314 						}
4315 						sctp_flight_size_decrease(tp1);
4316 						sctp_total_flight_decrease(stcb, tp1);
4317 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4318 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4319 							    tp1);
4320 						}
4321 					}
4322 					tp1->whoTo->net_ack += tp1->send_size;
4323 
4324 					/* CMT SFR and DAC algos */
4325 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4326 					tp1->whoTo->saw_newack = 1;
4327 
4328 					if (tp1->snd_count < 2) {
4329 						/*
4330 						 * True non-retransmited
4331 						 * chunk
4332 						 */
4333 						tp1->whoTo->net_ack2 +=
4334 						    tp1->send_size;
4335 
4336 						/* update RTO too? */
4337 						if (tp1->do_rtt) {
4338 							if (rto_ok) {
4339 								tp1->whoTo->RTO =
4340 								    sctp_calculate_rto(stcb,
4341 								    asoc, tp1->whoTo,
4342 								    &tp1->sent_rcv_time,
4343 								    sctp_align_safe_nocopy,
4344 								    SCTP_RTT_FROM_DATA);
4345 								rto_ok = 0;
4346 							}
4347 							if (tp1->whoTo->rto_needed == 0) {
4348 								tp1->whoTo->rto_needed = 1;
4349 							}
4350 							tp1->do_rtt = 0;
4351 						}
4352 					}
4353 					/*
4354 					 * CMT: CUCv2 algorithm. From the
4355 					 * cumack'd TSNs, for each TSN being
4356 					 * acked for the first time, set the
4357 					 * following variables for the
4358 					 * corresp destination.
4359 					 * new_pseudo_cumack will trigger a
4360 					 * cwnd update.
4361 					 * find_(rtx_)pseudo_cumack will
4362 					 * trigger search for the next
4363 					 * expected (rtx-)pseudo-cumack.
4364 					 */
4365 					tp1->whoTo->new_pseudo_cumack = 1;
4366 					tp1->whoTo->find_pseudo_cumack = 1;
4367 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4368 
4369 
4370 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4371 						sctp_log_sack(asoc->last_acked_seq,
4372 						    cum_ack,
4373 						    tp1->rec.data.TSN_seq,
4374 						    0,
4375 						    0,
4376 						    SCTP_LOG_TSN_ACKED);
4377 					}
4378 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4379 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4380 					}
4381 				}
4382 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4383 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4384 #ifdef SCTP_AUDITING_ENABLED
4385 					sctp_audit_log(0xB3,
4386 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4387 #endif
4388 				}
4389 				if (tp1->rec.data.chunk_was_revoked) {
4390 					/* deflate the cwnd */
4391 					tp1->whoTo->cwnd -= tp1->book_size;
4392 					tp1->rec.data.chunk_was_revoked = 0;
4393 				}
4394 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4395 					tp1->sent = SCTP_DATAGRAM_ACKED;
4396 				}
4397 			}
4398 		} else {
4399 			break;
4400 		}
4401 	}
4402 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4403 	/* always set this up to cum-ack */
4404 	asoc->this_sack_highest_gap = last_tsn;
4405 
4406 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4407 
4408 		/*
4409 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4410 		 * to be greater than the cumack. Also reset saw_newack to 0
4411 		 * for all dests.
4412 		 */
4413 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4414 			net->saw_newack = 0;
4415 			net->this_sack_highest_newack = last_tsn;
4416 		}
4417 
4418 		/*
4419 		 * thisSackHighestGap will increase while handling NEW
4420 		 * segments this_sack_highest_newack will increase while
4421 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4422 		 * used for CMT DAC algo. saw_newack will also change.
4423 		 */
4424 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4425 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4426 		    num_seg, num_nr_seg, &rto_ok)) {
4427 			wake_him++;
4428 		}
4429 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4430 			/*
4431 			 * validate the biggest_tsn_acked in the gap acks if
4432 			 * strict adherence is wanted.
4433 			 */
4434 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4435 				/*
4436 				 * peer is either confused or we are under
4437 				 * attack. We must abort.
4438 				 */
4439 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4440 				    biggest_tsn_acked, send_s);
4441 				goto hopeless_peer;
4442 			}
4443 		}
4444 	}
4445 	/*******************************************/
4446 	/* cancel ALL T3-send timer if accum moved */
4447 	/*******************************************/
4448 	if (asoc->sctp_cmt_on_off > 0) {
4449 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4450 			if (net->new_pseudo_cumack)
4451 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4452 				    stcb, net,
4453 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4454 
4455 		}
4456 	} else {
4457 		if (accum_moved) {
4458 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4459 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4460 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4461 			}
4462 		}
4463 	}
4464 	/********************************************/
4465 	/* drop the acked chunks from the sentqueue */
4466 	/********************************************/
4467 	asoc->last_acked_seq = cum_ack;
4468 
4469 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4470 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4471 			break;
4472 		}
4473 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4474 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4475 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4476 #ifdef INVARIANTS
4477 			} else {
4478 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4479 #endif
4480 			}
4481 		}
4482 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4483 		if (PR_SCTP_ENABLED(tp1->flags)) {
4484 			if (asoc->pr_sctp_cnt != 0)
4485 				asoc->pr_sctp_cnt--;
4486 		}
4487 		asoc->sent_queue_cnt--;
4488 		if (tp1->data) {
4489 			/* sa_ignore NO_NULL_CHK */
4490 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4491 			sctp_m_freem(tp1->data);
4492 			tp1->data = NULL;
4493 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4494 				asoc->sent_queue_cnt_removeable--;
4495 			}
4496 		}
4497 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4498 			sctp_log_sack(asoc->last_acked_seq,
4499 			    cum_ack,
4500 			    tp1->rec.data.TSN_seq,
4501 			    0,
4502 			    0,
4503 			    SCTP_LOG_FREE_SENT);
4504 		}
4505 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4506 		wake_him++;
4507 	}
4508 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4509 #ifdef INVARIANTS
4510 		panic("Warning flight size is postive and should be 0");
4511 #else
4512 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4513 		    asoc->total_flight);
4514 #endif
4515 		asoc->total_flight = 0;
4516 	}
4517 	/* sa_ignore NO_NULL_CHK */
4518 	if ((wake_him) && (stcb->sctp_socket)) {
4519 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4520 		struct socket *so;
4521 
4522 #endif
4523 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4524 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4525 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4526 		}
4527 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4528 		so = SCTP_INP_SO(stcb->sctp_ep);
4529 		atomic_add_int(&stcb->asoc.refcnt, 1);
4530 		SCTP_TCB_UNLOCK(stcb);
4531 		SCTP_SOCKET_LOCK(so, 1);
4532 		SCTP_TCB_LOCK(stcb);
4533 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4534 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4535 			/* assoc was freed while we were unlocked */
4536 			SCTP_SOCKET_UNLOCK(so, 1);
4537 			return;
4538 		}
4539 #endif
4540 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4541 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4542 		SCTP_SOCKET_UNLOCK(so, 1);
4543 #endif
4544 	} else {
4545 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4546 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4547 		}
4548 	}
4549 
4550 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4551 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4552 			/* Setup so we will exit RFC2582 fast recovery */
4553 			will_exit_fast_recovery = 1;
4554 		}
4555 	}
4556 	/*
4557 	 * Check for revoked fragments:
4558 	 *
4559 	 * if Previous sack - Had no frags then we can't have any revoked if
4560 	 * Previous sack - Had frag's then - If we now have frags aka
4561 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4562 	 * some of them. else - The peer revoked all ACKED fragments, since
4563 	 * we had some before and now we have NONE.
4564 	 */
4565 
4566 	if (num_seg) {
4567 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4568 		asoc->saw_sack_with_frags = 1;
4569 	} else if (asoc->saw_sack_with_frags) {
4570 		int cnt_revoked = 0;
4571 
4572 		/* Peer revoked all dg's marked or acked */
4573 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4574 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4575 				tp1->sent = SCTP_DATAGRAM_SENT;
4576 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4577 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4578 					    tp1->whoTo->flight_size,
4579 					    tp1->book_size,
4580 					    (uintptr_t) tp1->whoTo,
4581 					    tp1->rec.data.TSN_seq);
4582 				}
4583 				sctp_flight_size_increase(tp1);
4584 				sctp_total_flight_increase(stcb, tp1);
4585 				tp1->rec.data.chunk_was_revoked = 1;
4586 				/*
4587 				 * To ensure that this increase in
4588 				 * flightsize, which is artificial, does not
4589 				 * throttle the sender, we also increase the
4590 				 * cwnd artificially.
4591 				 */
4592 				tp1->whoTo->cwnd += tp1->book_size;
4593 				cnt_revoked++;
4594 			}
4595 		}
4596 		if (cnt_revoked) {
4597 			reneged_all = 1;
4598 		}
4599 		asoc->saw_sack_with_frags = 0;
4600 	}
4601 	if (num_nr_seg > 0)
4602 		asoc->saw_sack_with_nr_frags = 1;
4603 	else
4604 		asoc->saw_sack_with_nr_frags = 0;
4605 
4606 	/* JRS - Use the congestion control given in the CC module */
4607 	if (ecne_seen == 0) {
4608 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4609 			if (net->net_ack2 > 0) {
4610 				/*
4611 				 * Karn's rule applies to clearing error
4612 				 * count, this is optional.
4613 				 */
4614 				net->error_count = 0;
4615 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4616 					/* addr came good */
4617 					net->dest_state |= SCTP_ADDR_REACHABLE;
4618 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4619 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4620 				}
4621 				if (net == stcb->asoc.primary_destination) {
4622 					if (stcb->asoc.alternate) {
4623 						/*
4624 						 * release the alternate,
4625 						 * primary is good
4626 						 */
4627 						sctp_free_remote_addr(stcb->asoc.alternate);
4628 						stcb->asoc.alternate = NULL;
4629 					}
4630 				}
4631 				if (net->dest_state & SCTP_ADDR_PF) {
4632 					net->dest_state &= ~SCTP_ADDR_PF;
4633 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4634 					    stcb->sctp_ep, stcb, net,
4635 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4636 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4637 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4638 					/* Done with this net */
4639 					net->net_ack = 0;
4640 				}
4641 				/* restore any doubled timers */
4642 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4643 				if (net->RTO < stcb->asoc.minrto) {
4644 					net->RTO = stcb->asoc.minrto;
4645 				}
4646 				if (net->RTO > stcb->asoc.maxrto) {
4647 					net->RTO = stcb->asoc.maxrto;
4648 				}
4649 			}
4650 		}
4651 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4652 	}
4653 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4654 		/* nothing left in-flight */
4655 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4656 			/* stop all timers */
4657 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4658 			    stcb, net,
4659 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4660 			net->flight_size = 0;
4661 			net->partial_bytes_acked = 0;
4662 		}
4663 		asoc->total_flight = 0;
4664 		asoc->total_flight_count = 0;
4665 	}
4666 	/**********************************/
4667 	/* Now what about shutdown issues */
4668 	/**********************************/
4669 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4670 		/* nothing left on sendqueue.. consider done */
4671 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4672 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4673 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4674 		}
4675 		asoc->peers_rwnd = a_rwnd;
4676 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4677 			/* SWS sender side engages */
4678 			asoc->peers_rwnd = 0;
4679 		}
4680 		/* clean up */
4681 		if ((asoc->stream_queue_cnt == 1) &&
4682 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4683 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4684 		    (asoc->locked_on_sending)
4685 		    ) {
4686 			struct sctp_stream_queue_pending *sp;
4687 
4688 			/*
4689 			 * I may be in a state where we got all across.. but
4690 			 * cannot write more due to a shutdown... we abort
4691 			 * since the user did not indicate EOR in this case.
4692 			 */
4693 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4694 			    sctp_streamhead);
4695 			if ((sp) && (sp->length == 0)) {
4696 				asoc->locked_on_sending = NULL;
4697 				if (sp->msg_is_complete) {
4698 					asoc->stream_queue_cnt--;
4699 				} else {
4700 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4701 					asoc->stream_queue_cnt--;
4702 				}
4703 			}
4704 		}
4705 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4706 		    (asoc->stream_queue_cnt == 0)) {
4707 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4708 				/* Need to abort here */
4709 				struct mbuf *op_err;
4710 
4711 		abort_out_now:
4712 				*abort_now = 1;
4713 				/* XXX */
4714 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4715 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4716 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4717 				return;
4718 			} else {
4719 				struct sctp_nets *netp;
4720 
4721 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4722 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4723 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4724 				}
4725 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4726 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4727 				sctp_stop_timers_for_shutdown(stcb);
4728 				if (asoc->alternate) {
4729 					netp = asoc->alternate;
4730 				} else {
4731 					netp = asoc->primary_destination;
4732 				}
4733 				sctp_send_shutdown(stcb, netp);
4734 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4735 				    stcb->sctp_ep, stcb, netp);
4736 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4737 				    stcb->sctp_ep, stcb, netp);
4738 			}
4739 			return;
4740 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4741 		    (asoc->stream_queue_cnt == 0)) {
4742 			struct sctp_nets *netp;
4743 
4744 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4745 				goto abort_out_now;
4746 			}
4747 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4748 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4749 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4750 			sctp_stop_timers_for_shutdown(stcb);
4751 			if (asoc->alternate) {
4752 				netp = asoc->alternate;
4753 			} else {
4754 				netp = asoc->primary_destination;
4755 			}
4756 			sctp_send_shutdown_ack(stcb, netp);
4757 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4758 			    stcb->sctp_ep, stcb, netp);
4759 			return;
4760 		}
4761 	}
4762 	/*
4763 	 * Now here we are going to recycle net_ack for a different use...
4764 	 * HEADS UP.
4765 	 */
4766 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4767 		net->net_ack = 0;
4768 	}
4769 
4770 	/*
4771 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4772 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4773 	 * automatically ensure that.
4774 	 */
4775 	if ((asoc->sctp_cmt_on_off > 0) &&
4776 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4777 	    (cmt_dac_flag == 0)) {
4778 		this_sack_lowest_newack = cum_ack;
4779 	}
4780 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4781 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4782 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4783 	}
4784 	/* JRS - Use the congestion control given in the CC module */
4785 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4786 
4787 	/* Now are we exiting loss recovery ? */
4788 	if (will_exit_fast_recovery) {
4789 		/* Ok, we must exit fast recovery */
4790 		asoc->fast_retran_loss_recovery = 0;
4791 	}
4792 	if ((asoc->sat_t3_loss_recovery) &&
4793 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4794 		/* end satellite t3 loss recovery */
4795 		asoc->sat_t3_loss_recovery = 0;
4796 	}
4797 	/*
4798 	 * CMT Fast recovery
4799 	 */
4800 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4801 		if (net->will_exit_fast_recovery) {
4802 			/* Ok, we must exit fast recovery */
4803 			net->fast_retran_loss_recovery = 0;
4804 		}
4805 	}
4806 
4807 	/* Adjust and set the new rwnd value */
4808 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4809 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4810 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4811 	}
4812 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4813 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4814 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4815 		/* SWS sender side engages */
4816 		asoc->peers_rwnd = 0;
4817 	}
4818 	if (asoc->peers_rwnd > old_rwnd) {
4819 		win_probe_recovery = 1;
4820 	}
4821 	/*
4822 	 * Now we must setup so we have a timer up for anyone with
4823 	 * outstanding data.
4824 	 */
4825 	done_once = 0;
4826 again:
4827 	j = 0;
4828 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4829 		if (win_probe_recovery && (net->window_probe)) {
4830 			win_probe_recovered = 1;
4831 			/*-
4832 			 * Find first chunk that was used with
4833 			 * window probe and clear the event. Put
4834 			 * it back into the send queue as if has
4835 			 * not been sent.
4836 			 */
4837 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4838 				if (tp1->window_probe) {
4839 					sctp_window_probe_recovery(stcb, asoc, tp1);
4840 					break;
4841 				}
4842 			}
4843 		}
4844 		if (net->flight_size) {
4845 			j++;
4846 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4847 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4848 				    stcb->sctp_ep, stcb, net);
4849 			}
4850 			if (net->window_probe) {
4851 				net->window_probe = 0;
4852 			}
4853 		} else {
4854 			if (net->window_probe) {
4855 				/*
4856 				 * In window probes we must assure a timer
4857 				 * is still running there
4858 				 */
4859 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4860 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4861 					    stcb->sctp_ep, stcb, net);
4862 
4863 				}
4864 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4865 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4866 				    stcb, net,
4867 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4868 			}
4869 		}
4870 	}
4871 	if ((j == 0) &&
4872 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4873 	    (asoc->sent_queue_retran_cnt == 0) &&
4874 	    (win_probe_recovered == 0) &&
4875 	    (done_once == 0)) {
4876 		/*
4877 		 * huh, this should not happen unless all packets are
4878 		 * PR-SCTP and marked to skip of course.
4879 		 */
4880 		if (sctp_fs_audit(asoc)) {
4881 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4882 				net->flight_size = 0;
4883 			}
4884 			asoc->total_flight = 0;
4885 			asoc->total_flight_count = 0;
4886 			asoc->sent_queue_retran_cnt = 0;
4887 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4888 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4889 					sctp_flight_size_increase(tp1);
4890 					sctp_total_flight_increase(stcb, tp1);
4891 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4892 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4893 				}
4894 			}
4895 		}
4896 		done_once = 1;
4897 		goto again;
4898 	}
4899 	/*********************************************/
4900 	/* Here we perform PR-SCTP procedures        */
4901 	/* (section 4.2)                             */
4902 	/*********************************************/
4903 	/* C1. update advancedPeerAckPoint */
4904 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4905 		asoc->advanced_peer_ack_point = cum_ack;
4906 	}
4907 	/* C2. try to further move advancedPeerAckPoint ahead */
4908 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4909 		struct sctp_tmit_chunk *lchk;
4910 		uint32_t old_adv_peer_ack_point;
4911 
4912 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4913 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4914 		/* C3. See if we need to send a Fwd-TSN */
4915 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4916 			/*
4917 			 * ISSUE with ECN, see FWD-TSN processing.
4918 			 */
4919 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4920 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4921 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4922 				    old_adv_peer_ack_point);
4923 			}
4924 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4925 				send_forward_tsn(stcb, asoc);
4926 			} else if (lchk) {
4927 				/* try to FR fwd-tsn's that get lost too */
4928 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4929 					send_forward_tsn(stcb, asoc);
4930 				}
4931 			}
4932 		}
4933 		if (lchk) {
4934 			/* Assure a timer is up */
4935 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4936 			    stcb->sctp_ep, stcb, lchk->whoTo);
4937 		}
4938 	}
4939 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4940 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4941 		    a_rwnd,
4942 		    stcb->asoc.peers_rwnd,
4943 		    stcb->asoc.total_flight,
4944 		    stcb->asoc.total_output_queue_size);
4945 	}
4946 }
4947 
4948 void
4949 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4950 {
4951 	/* Copy cum-ack */
4952 	uint32_t cum_ack, a_rwnd;
4953 
4954 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4955 	/* Arrange so a_rwnd does NOT change */
4956 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4957 
4958 	/* Now call the express sack handling */
4959 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4960 }
4961 
4962 static void
4963 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4964     struct sctp_stream_in *strmin)
4965 {
4966 	struct sctp_queued_to_read *ctl, *nctl;
4967 	struct sctp_association *asoc;
4968 	uint16_t tt;
4969 
4970 	asoc = &stcb->asoc;
4971 	tt = strmin->last_sequence_delivered;
4972 	/*
4973 	 * First deliver anything prior to and including the stream no that
4974 	 * came in
4975 	 */
4976 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4977 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4978 			/* this is deliverable now */
4979 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4980 			/* subtract pending on streams */
4981 			asoc->size_on_all_streams -= ctl->length;
4982 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4983 			/* deliver it to at least the delivery-q */
4984 			if (stcb->sctp_socket) {
4985 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4986 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4987 				    ctl,
4988 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4989 			}
4990 		} else {
4991 			/* no more delivery now. */
4992 			break;
4993 		}
4994 	}
4995 	/*
4996 	 * now we must deliver things in queue the normal way  if any are
4997 	 * now ready.
4998 	 */
4999 	tt = strmin->last_sequence_delivered + 1;
5000 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5001 		if (tt == ctl->sinfo_ssn) {
5002 			/* this is deliverable now */
5003 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5004 			/* subtract pending on streams */
5005 			asoc->size_on_all_streams -= ctl->length;
5006 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5007 			/* deliver it to at least the delivery-q */
5008 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5009 			if (stcb->sctp_socket) {
5010 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5011 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5012 				    ctl,
5013 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5014 
5015 			}
5016 			tt = strmin->last_sequence_delivered + 1;
5017 		} else {
5018 			break;
5019 		}
5020 	}
5021 }
5022 
5023 static void
5024 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5025     struct sctp_association *asoc,
5026     uint16_t stream, uint16_t seq)
5027 {
5028 	struct sctp_tmit_chunk *chk, *nchk;
5029 
5030 	/* For each one on here see if we need to toss it */
5031 	/*
5032 	 * For now large messages held on the reasmqueue that are complete
5033 	 * will be tossed too. We could in theory do more work to spin
5034 	 * through and stop after dumping one msg aka seeing the start of a
5035 	 * new msg at the head, and call the delivery function... to see if
5036 	 * it can be delivered... But for now we just dump everything on the
5037 	 * queue.
5038 	 */
5039 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5040 		/*
5041 		 * Do not toss it if on a different stream or marked for
5042 		 * unordered delivery in which case the stream sequence
5043 		 * number has no meaning.
5044 		 */
5045 		if ((chk->rec.data.stream_number != stream) ||
5046 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5047 			continue;
5048 		}
5049 		if (chk->rec.data.stream_seq == seq) {
5050 			/* It needs to be tossed */
5051 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5052 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5053 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5054 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5055 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5056 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5057 			}
5058 			asoc->size_on_reasm_queue -= chk->send_size;
5059 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5060 
5061 			/* Clear up any stream problem */
5062 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5063 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5064 				/*
5065 				 * We must dump forward this streams
5066 				 * sequence number if the chunk is not
5067 				 * unordered that is being skipped. There is
5068 				 * a chance that if the peer does not
5069 				 * include the last fragment in its FWD-TSN
5070 				 * we WILL have a problem here since you
5071 				 * would have a partial chunk in queue that
5072 				 * may not be deliverable. Also if a Partial
5073 				 * delivery API as started the user may get
5074 				 * a partial chunk. The next read returning
5075 				 * a new chunk... really ugly but I see no
5076 				 * way around it! Maybe a notify??
5077 				 */
5078 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5079 			}
5080 			if (chk->data) {
5081 				sctp_m_freem(chk->data);
5082 				chk->data = NULL;
5083 			}
5084 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5085 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5086 			/*
5087 			 * If the stream_seq is > than the purging one, we
5088 			 * are done
5089 			 */
5090 			break;
5091 		}
5092 	}
5093 }
5094 
5095 
5096 void
5097 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5098     struct sctp_forward_tsn_chunk *fwd,
5099     int *abort_flag, struct mbuf *m, int offset)
5100 {
5101 	/* The pr-sctp fwd tsn */
5102 	/*
5103 	 * here we will perform all the data receiver side steps for
5104 	 * processing FwdTSN, as required in by pr-sctp draft:
5105 	 *
5106 	 * Assume we get FwdTSN(x):
5107 	 *
5108 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5109 	 * others we have 3) examine and update re-ordering queue on
5110 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5111 	 * report where we are.
5112 	 */
5113 	struct sctp_association *asoc;
5114 	uint32_t new_cum_tsn, gap;
5115 	unsigned int i, fwd_sz, m_size;
5116 	uint32_t str_seq;
5117 	struct sctp_stream_in *strm;
5118 	struct sctp_tmit_chunk *chk, *nchk;
5119 	struct sctp_queued_to_read *ctl, *sv;
5120 
5121 	asoc = &stcb->asoc;
5122 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5123 		SCTPDBG(SCTP_DEBUG_INDATA1,
5124 		    "Bad size too small/big fwd-tsn\n");
5125 		return;
5126 	}
5127 	m_size = (stcb->asoc.mapping_array_size << 3);
5128 	/*************************************************************/
5129 	/* 1. Here we update local cumTSN and shift the bitmap array */
5130 	/*************************************************************/
5131 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5132 
5133 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5134 		/* Already got there ... */
5135 		return;
5136 	}
5137 	/*
5138 	 * now we know the new TSN is more advanced, let's find the actual
5139 	 * gap
5140 	 */
5141 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5142 	asoc->cumulative_tsn = new_cum_tsn;
5143 	if (gap >= m_size) {
5144 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5145 			struct mbuf *op_err;
5146 			char msg[SCTP_DIAG_INFO_LEN];
5147 
5148 			/*
5149 			 * out of range (of single byte chunks in the rwnd I
5150 			 * give out). This must be an attacker.
5151 			 */
5152 			*abort_flag = 1;
5153 			snprintf(msg, sizeof(msg),
5154 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5155 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5156 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5157 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5158 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5159 			return;
5160 		}
5161 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5162 
5163 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5164 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5165 		asoc->highest_tsn_inside_map = new_cum_tsn;
5166 
5167 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5168 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5169 
5170 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5171 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5172 		}
5173 	} else {
5174 		SCTP_TCB_LOCK_ASSERT(stcb);
5175 		for (i = 0; i <= gap; i++) {
5176 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5177 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5178 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5179 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5180 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5181 				}
5182 			}
5183 		}
5184 	}
5185 	/*************************************************************/
5186 	/* 2. Clear up re-assembly queue                             */
5187 	/*************************************************************/
5188 	/*
5189 	 * First service it if pd-api is up, just in case we can progress it
5190 	 * forward
5191 	 */
5192 	if (asoc->fragmented_delivery_inprogress) {
5193 		sctp_service_reassembly(stcb, asoc);
5194 	}
5195 	/* For each one on here see if we need to toss it */
5196 	/*
5197 	 * For now large messages held on the reasmqueue that are complete
5198 	 * will be tossed too. We could in theory do more work to spin
5199 	 * through and stop after dumping one msg aka seeing the start of a
5200 	 * new msg at the head, and call the delivery function... to see if
5201 	 * it can be delivered... But for now we just dump everything on the
5202 	 * queue.
5203 	 */
5204 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5205 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5206 			/* It needs to be tossed */
5207 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5208 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5209 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5210 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5211 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5212 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5213 			}
5214 			asoc->size_on_reasm_queue -= chk->send_size;
5215 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5216 
5217 			/* Clear up any stream problem */
5218 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5219 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5220 				/*
5221 				 * We must dump forward this streams
5222 				 * sequence number if the chunk is not
5223 				 * unordered that is being skipped. There is
5224 				 * a chance that if the peer does not
5225 				 * include the last fragment in its FWD-TSN
5226 				 * we WILL have a problem here since you
5227 				 * would have a partial chunk in queue that
5228 				 * may not be deliverable. Also if a Partial
5229 				 * delivery API as started the user may get
5230 				 * a partial chunk. The next read returning
5231 				 * a new chunk... really ugly but I see no
5232 				 * way around it! Maybe a notify??
5233 				 */
5234 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5235 			}
5236 			if (chk->data) {
5237 				sctp_m_freem(chk->data);
5238 				chk->data = NULL;
5239 			}
5240 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5241 		} else {
5242 			/*
5243 			 * Ok we have gone beyond the end of the fwd-tsn's
5244 			 * mark.
5245 			 */
5246 			break;
5247 		}
5248 	}
5249 	/*******************************************************/
5250 	/* 3. Update the PR-stream re-ordering queues and fix  */
5251 	/* delivery issues as needed.                       */
5252 	/*******************************************************/
5253 	fwd_sz -= sizeof(*fwd);
5254 	if (m && fwd_sz) {
5255 		/* New method. */
5256 		unsigned int num_str;
5257 		struct sctp_strseq *stseq, strseqbuf;
5258 
5259 		offset += sizeof(*fwd);
5260 
5261 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5262 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5263 		for (i = 0; i < num_str; i++) {
5264 			uint16_t st;
5265 
5266 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5267 			    sizeof(struct sctp_strseq),
5268 			    (uint8_t *) & strseqbuf);
5269 			offset += sizeof(struct sctp_strseq);
5270 			if (stseq == NULL) {
5271 				break;
5272 			}
5273 			/* Convert */
5274 			st = ntohs(stseq->stream);
5275 			stseq->stream = st;
5276 			st = ntohs(stseq->sequence);
5277 			stseq->sequence = st;
5278 
5279 			/* now process */
5280 
5281 			/*
5282 			 * Ok we now look for the stream/seq on the read
5283 			 * queue where its not all delivered. If we find it
5284 			 * we transmute the read entry into a PDI_ABORTED.
5285 			 */
5286 			if (stseq->stream >= asoc->streamincnt) {
5287 				/* screwed up streams, stop!  */
5288 				break;
5289 			}
5290 			if ((asoc->str_of_pdapi == stseq->stream) &&
5291 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5292 				/*
5293 				 * If this is the one we were partially
5294 				 * delivering now then we no longer are.
5295 				 * Note this will change with the reassembly
5296 				 * re-write.
5297 				 */
5298 				asoc->fragmented_delivery_inprogress = 0;
5299 			}
5300 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5301 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5302 				if ((ctl->sinfo_stream == stseq->stream) &&
5303 				    (ctl->sinfo_ssn == stseq->sequence)) {
5304 					str_seq = (stseq->stream << 16) | stseq->sequence;
5305 					ctl->end_added = 1;
5306 					ctl->pdapi_aborted = 1;
5307 					sv = stcb->asoc.control_pdapi;
5308 					stcb->asoc.control_pdapi = ctl;
5309 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5310 					    stcb,
5311 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5312 					    (void *)&str_seq,
5313 					    SCTP_SO_NOT_LOCKED);
5314 					stcb->asoc.control_pdapi = sv;
5315 					break;
5316 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5317 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5318 					/* We are past our victim SSN */
5319 					break;
5320 				}
5321 			}
5322 			strm = &asoc->strmin[stseq->stream];
5323 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5324 				/* Update the sequence number */
5325 				strm->last_sequence_delivered = stseq->sequence;
5326 			}
5327 			/* now kick the stream the new way */
5328 			/* sa_ignore NO_NULL_CHK */
5329 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5330 		}
5331 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5332 	}
5333 	/*
5334 	 * Now slide thing forward.
5335 	 */
5336 	sctp_slide_mapping_arrays(stcb);
5337 
5338 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5339 		/* now lets kick out and check for more fragmented delivery */
5340 		/* sa_ignore NO_NULL_CHK */
5341 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5342 	}
5343 }
5344