xref: /freebsd/sys/netinet/sctp_indata.c (revision 3823d5e198425b4f5e5a80267d195769d1063773)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	/*
254 	 * Make sure that there is no un-initialized padding between the
255 	 * cmsg header and cmsg data and after the cmsg data.
256 	 */
257 	memset(cmh, 0, len);
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 		cmh->cmsg_type = SCTP_RCVINFO;
262 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 		rcvinfo->rcv_context = sinfo->sinfo_context;
270 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 	}
274 	if (provide_nxt) {
275 		cmh->cmsg_level = IPPROTO_SCTP;
276 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 		cmh->cmsg_type = SCTP_NXTINFO;
278 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 		nxtinfo->nxt_flags = 0;
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 		}
287 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 		}
290 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 	}
296 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 		cmh->cmsg_level = IPPROTO_SCTP;
298 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 		if (use_extended) {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 			cmh->cmsg_type = SCTP_EXTRCV;
302 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 		} else {
305 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 			cmh->cmsg_type = SCTP_SNDRCV;
307 			*outinfo = *sinfo;
308 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 		}
310 	}
311 	return (ret);
312 }
313 
314 
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 	uint32_t gap, i, cumackp1;
319 	int fnd = 0;
320 
321 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 		return;
323 	}
324 	cumackp1 = asoc->cumulative_tsn + 1;
325 	if (SCTP_TSN_GT(cumackp1, tsn)) {
326 		/*
327 		 * this tsn is behind the cum ack and thus we don't need to
328 		 * worry about it being moved from one to the other.
329 		 */
330 		return;
331 	}
332 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 		sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 		panic("Things are really messed up now!!");
338 #endif
339 	}
340 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 		asoc->highest_tsn_inside_nr_map = tsn;
344 	}
345 	if (tsn == asoc->highest_tsn_inside_map) {
346 		/* We must back down to see what the new highest is */
347 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 				asoc->highest_tsn_inside_map = i;
351 				fnd = 1;
352 				break;
353 			}
354 		}
355 		if (!fnd) {
356 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 		}
358 	}
359 }
360 
361 
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 	struct sctp_tmit_chunk *chk, *nchk;
371 	uint16_t nxt_todel;
372 	uint16_t stream_no;
373 	int end = 0;
374 	int cntDel;
375 	struct sctp_queued_to_read *control, *ctl, *nctl;
376 
377 	if (stcb == NULL)
378 		return;
379 
380 	cntDel = stream_no = 0;
381 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 		/* socket above is long gone or going.. */
385 abandon:
386 		asoc->fragmented_delivery_inprogress = 0;
387 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 			asoc->size_on_reasm_queue -= chk->send_size;
390 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 			/*
392 			 * Lose the data pointer, since its in the socket
393 			 * buffer
394 			 */
395 			if (chk->data) {
396 				sctp_m_freem(chk->data);
397 				chk->data = NULL;
398 			}
399 			/* Now free the address and data */
400 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 			/* sa_ignore FREED_MEMORY */
402 		}
403 		return;
404 	}
405 	SCTP_TCB_LOCK_ASSERT(stcb);
406 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 			/* Can't deliver more :< */
409 			return;
410 		}
411 		stream_no = chk->rec.data.stream_number;
412 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 		if (nxt_todel != chk->rec.data.stream_seq &&
414 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 			/*
416 			 * Not the next sequence to deliver in its stream OR
417 			 * unordered
418 			 */
419 			return;
420 		}
421 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422 
423 			control = sctp_build_readq_entry_chk(stcb, chk);
424 			if (control == NULL) {
425 				/* out of memory? */
426 				return;
427 			}
428 			/* save it off for our future deliveries */
429 			stcb->asoc.control_pdapi = control;
430 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 				end = 1;
432 			else
433 				end = 0;
434 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 			sctp_add_to_readq(stcb->sctp_ep,
436 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 			cntDel++;
439 		} else {
440 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 				end = 1;
442 			else
443 				end = 0;
444 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 			    stcb->asoc.control_pdapi,
447 			    chk->data, end, chk->rec.data.TSN_seq,
448 			    &stcb->sctp_socket->so_rcv)) {
449 				/*
450 				 * something is very wrong, either
451 				 * control_pdapi is NULL, or the tail_mbuf
452 				 * is corrupt, or there is a EOM already on
453 				 * the mbuf chain.
454 				 */
455 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 					goto abandon;
457 				} else {
458 #ifdef INVARIANTS
459 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 						panic("This should not happen control_pdapi NULL?");
461 					}
462 					/* if we did not panic, it was a EOM */
463 					panic("Bad chunking ??");
464 #else
465 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 					}
468 					SCTP_PRINTF("Bad chunking ??\n");
469 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470 
471 #endif
472 					goto abandon;
473 				}
474 			}
475 			cntDel++;
476 		}
477 		/* pull it we did it */
478 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 			asoc->fragmented_delivery_inprogress = 0;
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 				asoc->strmin[stream_no].last_sequence_delivered++;
483 			}
484 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 			}
487 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 			/*
489 			 * turn the flag back on since we just  delivered
490 			 * yet another one.
491 			 */
492 			asoc->fragmented_delivery_inprogress = 1;
493 		}
494 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498 
499 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 		asoc->size_on_reasm_queue -= chk->send_size;
501 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 		/* free up the chk */
503 		chk->data = NULL;
504 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505 
506 		if (asoc->fragmented_delivery_inprogress == 0) {
507 			/*
508 			 * Now lets see if we can deliver the next one on
509 			 * the stream
510 			 */
511 			struct sctp_stream_in *strm;
512 
513 			strm = &asoc->strmin[stream_no];
514 			nxt_todel = strm->last_sequence_delivered + 1;
515 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 				/* Deliver more if we can. */
517 				if (nxt_todel == ctl->sinfo_ssn) {
518 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 					asoc->size_on_all_streams -= ctl->length;
520 					sctp_ucount_decr(asoc->cnt_on_all_streams);
521 					strm->last_sequence_delivered++;
522 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 					sctp_add_to_readq(stcb->sctp_ep, stcb,
524 					    ctl,
525 					    &stcb->sctp_socket->so_rcv, 1,
526 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 				} else {
528 					break;
529 				}
530 				nxt_todel = strm->last_sequence_delivered + 1;
531 			}
532 			break;
533 		}
534 	}
535 }
536 
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 	/*
548 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 	 * all the data in one stream this could happen quite rapidly. One
550 	 * could use the TSN to keep track of things, but this scheme breaks
551 	 * down in the other type of stream useage that could occur. Send a
552 	 * single msg to stream 0, send 4Billion messages to stream 1, now
553 	 * send a message to stream 0. You have a situation where the TSN
554 	 * has wrapped but not in the stream. Is this worth worrying about
555 	 * or should we just change our queue sort at the bottom to be by
556 	 * TSN.
557 	 *
558 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 	 * assignment this could happen... and I don't see how this would be
561 	 * a violation. So for now I am undecided an will leave the sort by
562 	 * SSN alone. Maybe a hybred approach is the answer
563 	 *
564 	 */
565 	struct sctp_stream_in *strm;
566 	struct sctp_queued_to_read *at;
567 	int queue_needed;
568 	uint16_t nxt_todel;
569 	struct mbuf *op_err;
570 	char msg[SCTP_DIAG_INFO_LEN];
571 
572 	queue_needed = 1;
573 	asoc->size_on_all_streams += control->length;
574 	sctp_ucount_incr(asoc->cnt_on_all_streams);
575 	strm = &asoc->strmin[control->sinfo_stream];
576 	nxt_todel = strm->last_sequence_delivered + 1;
577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 	}
580 	SCTPDBG(SCTP_DEBUG_INDATA1,
581 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
582 	    (uint32_t) control->sinfo_stream,
583 	    (uint32_t) strm->last_sequence_delivered,
584 	    (uint32_t) nxt_todel);
585 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 		/* The incoming sseq is behind where we last delivered? */
587 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 		    control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590 		/*
591 		 * throw it in the stream so it gets cleaned up in
592 		 * association destruction
593 		 */
594 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 		    strm->last_sequence_delivered, control->sinfo_tsn,
597 		    control->sinfo_stream, control->sinfo_ssn);
598 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601 		*abort_flag = 1;
602 		return;
603 
604 	}
605 	if (nxt_todel == control->sinfo_ssn) {
606 		/* can be delivered right away? */
607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
608 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
609 		}
610 		/* EY it wont be queued if it could be delivered directly */
611 		queue_needed = 0;
612 		asoc->size_on_all_streams -= control->length;
613 		sctp_ucount_decr(asoc->cnt_on_all_streams);
614 		strm->last_sequence_delivered++;
615 
616 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
617 		sctp_add_to_readq(stcb->sctp_ep, stcb,
618 		    control,
619 		    &stcb->sctp_socket->so_rcv, 1,
620 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
621 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
622 			/* all delivered */
623 			nxt_todel = strm->last_sequence_delivered + 1;
624 			if (nxt_todel == control->sinfo_ssn) {
625 				TAILQ_REMOVE(&strm->inqueue, control, next);
626 				asoc->size_on_all_streams -= control->length;
627 				sctp_ucount_decr(asoc->cnt_on_all_streams);
628 				strm->last_sequence_delivered++;
629 				/*
630 				 * We ignore the return of deliver_data here
631 				 * since we always can hold the chunk on the
632 				 * d-queue. And we have a finite number that
633 				 * can be delivered from the strq.
634 				 */
635 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 					sctp_log_strm_del(control, NULL,
637 					    SCTP_STR_LOG_FROM_IMMED_DEL);
638 				}
639 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 				sctp_add_to_readq(stcb->sctp_ep, stcb,
641 				    control,
642 				    &stcb->sctp_socket->so_rcv, 1,
643 				    SCTP_READ_LOCK_NOT_HELD,
644 				    SCTP_SO_NOT_LOCKED);
645 				continue;
646 			}
647 			break;
648 		}
649 	}
650 	if (queue_needed) {
651 		/*
652 		 * Ok, we did not deliver this guy, find the correct place
653 		 * to put it on the queue.
654 		 */
655 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
656 			goto protocol_error;
657 		}
658 		if (TAILQ_EMPTY(&strm->inqueue)) {
659 			/* Empty queue */
660 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
661 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
662 			}
663 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
664 		} else {
665 			TAILQ_FOREACH(at, &strm->inqueue, next) {
666 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
667 					/*
668 					 * one in queue is bigger than the
669 					 * new one, insert before this one
670 					 */
671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 						sctp_log_strm_del(control, at,
673 						    SCTP_STR_LOG_FROM_INSERT_MD);
674 					}
675 					TAILQ_INSERT_BEFORE(at, control, next);
676 					break;
677 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
678 					/*
679 					 * Gak, He sent me a duplicate str
680 					 * seq number
681 					 */
682 					/*
683 					 * foo bar, I guess I will just free
684 					 * this new guy, should we abort
685 					 * too? FIX ME MAYBE? Or it COULD be
686 					 * that the SSN's have wrapped.
687 					 * Maybe I should compare to TSN
688 					 * somehow... sigh for now just blow
689 					 * away the chunk!
690 					 */
691 
692 					if (control->data)
693 						sctp_m_freem(control->data);
694 					control->data = NULL;
695 					asoc->size_on_all_streams -= control->length;
696 					sctp_ucount_decr(asoc->cnt_on_all_streams);
697 					if (control->whoFrom) {
698 						sctp_free_remote_addr(control->whoFrom);
699 						control->whoFrom = NULL;
700 					}
701 					sctp_free_a_readq(stcb, control);
702 					return;
703 				} else {
704 					if (TAILQ_NEXT(at, next) == NULL) {
705 						/*
706 						 * We are at the end, insert
707 						 * it after this one
708 						 */
709 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
710 							sctp_log_strm_del(control, at,
711 							    SCTP_STR_LOG_FROM_INSERT_TL);
712 						}
713 						TAILQ_INSERT_AFTER(&strm->inqueue,
714 						    at, control, next);
715 						break;
716 					}
717 				}
718 			}
719 		}
720 	}
721 }
722 
723 /*
724  * Returns two things: You get the total size of the deliverable parts of the
725  * first fragmented message on the reassembly queue. And you get a 1 back if
726  * all of the message is ready or a 0 back if the message is still incomplete
727  */
728 static int
729 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
730 {
731 	struct sctp_tmit_chunk *chk;
732 	uint32_t tsn;
733 
734 	*t_size = 0;
735 	chk = TAILQ_FIRST(&asoc->reasmqueue);
736 	if (chk == NULL) {
737 		/* nothing on the queue */
738 		return (0);
739 	}
740 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
741 		/* Not a first on the queue */
742 		return (0);
743 	}
744 	tsn = chk->rec.data.TSN_seq;
745 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
746 		if (tsn != chk->rec.data.TSN_seq) {
747 			return (0);
748 		}
749 		*t_size += chk->send_size;
750 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
751 			return (1);
752 		}
753 		tsn++;
754 	}
755 	return (0);
756 }
757 
758 static void
759 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
760 {
761 	struct sctp_tmit_chunk *chk;
762 	uint16_t nxt_todel;
763 	uint32_t tsize, pd_point;
764 
765 doit_again:
766 	chk = TAILQ_FIRST(&asoc->reasmqueue);
767 	if (chk == NULL) {
768 		/* Huh? */
769 		asoc->size_on_reasm_queue = 0;
770 		asoc->cnt_on_reasm_queue = 0;
771 		return;
772 	}
773 	if (asoc->fragmented_delivery_inprogress == 0) {
774 		nxt_todel =
775 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
776 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
777 		    (nxt_todel == chk->rec.data.stream_seq ||
778 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
779 			/*
780 			 * Yep the first one is here and its ok to deliver
781 			 * but should we?
782 			 */
783 			if (stcb->sctp_socket) {
784 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
785 				    stcb->sctp_ep->partial_delivery_point);
786 			} else {
787 				pd_point = stcb->sctp_ep->partial_delivery_point;
788 			}
789 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
790 				/*
791 				 * Yes, we setup to start reception, by
792 				 * backing down the TSN just in case we
793 				 * can't deliver. If we
794 				 */
795 				asoc->fragmented_delivery_inprogress = 1;
796 				asoc->tsn_last_delivered =
797 				    chk->rec.data.TSN_seq - 1;
798 				asoc->str_of_pdapi =
799 				    chk->rec.data.stream_number;
800 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
801 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
802 				asoc->fragment_flags = chk->rec.data.rcv_flags;
803 				sctp_service_reassembly(stcb, asoc);
804 			}
805 		}
806 	} else {
807 		/*
808 		 * Service re-assembly will deliver stream data queued at
809 		 * the end of fragmented delivery.. but it wont know to go
810 		 * back and call itself again... we do that here with the
811 		 * got doit_again
812 		 */
813 		sctp_service_reassembly(stcb, asoc);
814 		if (asoc->fragmented_delivery_inprogress == 0) {
815 			/*
816 			 * finished our Fragmented delivery, could be more
817 			 * waiting?
818 			 */
819 			goto doit_again;
820 		}
821 	}
822 }
823 
824 /*
825  * Dump onto the re-assembly queue, in its proper place. After dumping on the
826  * queue, see if anthing can be delivered. If so pull it off (or as much as
827  * we can. If we run out of space then we must dump what we can and set the
828  * appropriate flag to say we queued what we could.
829  */
830 static void
831 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
832     struct sctp_tmit_chunk *chk, int *abort_flag)
833 {
834 	struct mbuf *op_err;
835 	char msg[SCTP_DIAG_INFO_LEN];
836 	uint32_t cum_ackp1, prev_tsn, post_tsn;
837 	struct sctp_tmit_chunk *at, *prev, *next;
838 
839 	prev = next = NULL;
840 	cum_ackp1 = asoc->tsn_last_delivered + 1;
841 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
842 		/* This is the first one on the queue */
843 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
844 		/*
845 		 * we do not check for delivery of anything when only one
846 		 * fragment is here
847 		 */
848 		asoc->size_on_reasm_queue = chk->send_size;
849 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
850 		if (chk->rec.data.TSN_seq == cum_ackp1) {
851 			if (asoc->fragmented_delivery_inprogress == 0 &&
852 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
853 			    SCTP_DATA_FIRST_FRAG) {
854 				/*
855 				 * An empty queue, no delivery inprogress,
856 				 * we hit the next one and it does NOT have
857 				 * a FIRST fragment mark.
858 				 */
859 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
860 				snprintf(msg, sizeof(msg),
861 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
862 				    chk->rec.data.TSN_seq,
863 				    chk->rec.data.stream_number,
864 				    chk->rec.data.stream_seq);
865 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
866 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
867 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
868 				*abort_flag = 1;
869 			} else if (asoc->fragmented_delivery_inprogress &&
870 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
871 				/*
872 				 * We are doing a partial delivery and the
873 				 * NEXT chunk MUST be either the LAST or
874 				 * MIDDLE fragment NOT a FIRST
875 				 */
876 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
877 				snprintf(msg, sizeof(msg),
878 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
879 				    chk->rec.data.TSN_seq,
880 				    chk->rec.data.stream_number,
881 				    chk->rec.data.stream_seq);
882 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
883 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
884 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
885 				*abort_flag = 1;
886 			} else if (asoc->fragmented_delivery_inprogress) {
887 				/*
888 				 * Here we are ok with a MIDDLE or LAST
889 				 * piece
890 				 */
891 				if (chk->rec.data.stream_number !=
892 				    asoc->str_of_pdapi) {
893 					/* Got to be the right STR No */
894 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
895 					    chk->rec.data.stream_number,
896 					    asoc->str_of_pdapi);
897 					snprintf(msg, sizeof(msg),
898 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
899 					    asoc->str_of_pdapi,
900 					    chk->rec.data.TSN_seq,
901 					    chk->rec.data.stream_number,
902 					    chk->rec.data.stream_seq);
903 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
904 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
905 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
906 					*abort_flag = 1;
907 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
908 					    SCTP_DATA_UNORDERED &&
909 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
910 					/* Got to be the right STR Seq */
911 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
912 					    chk->rec.data.stream_seq,
913 					    asoc->ssn_of_pdapi);
914 					snprintf(msg, sizeof(msg),
915 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
916 					    asoc->ssn_of_pdapi,
917 					    chk->rec.data.TSN_seq,
918 					    chk->rec.data.stream_number,
919 					    chk->rec.data.stream_seq);
920 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
921 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
922 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
923 					*abort_flag = 1;
924 				}
925 			}
926 		}
927 		return;
928 	}
929 	/* Find its place */
930 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
931 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
932 			/*
933 			 * one in queue is bigger than the new one, insert
934 			 * before this one
935 			 */
936 			/* A check */
937 			asoc->size_on_reasm_queue += chk->send_size;
938 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
939 			next = at;
940 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
941 			break;
942 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
943 			/* Gak, He sent me a duplicate str seq number */
944 			/*
945 			 * foo bar, I guess I will just free this new guy,
946 			 * should we abort too? FIX ME MAYBE? Or it COULD be
947 			 * that the SSN's have wrapped. Maybe I should
948 			 * compare to TSN somehow... sigh for now just blow
949 			 * away the chunk!
950 			 */
951 			if (chk->data) {
952 				sctp_m_freem(chk->data);
953 				chk->data = NULL;
954 			}
955 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
956 			return;
957 		} else {
958 			prev = at;
959 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
960 				/*
961 				 * We are at the end, insert it after this
962 				 * one
963 				 */
964 				/* check it first */
965 				asoc->size_on_reasm_queue += chk->send_size;
966 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
967 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
968 				break;
969 			}
970 		}
971 	}
972 	/* Now the audits */
973 	if (prev) {
974 		prev_tsn = chk->rec.data.TSN_seq - 1;
975 		if (prev_tsn == prev->rec.data.TSN_seq) {
976 			/*
977 			 * Ok the one I am dropping onto the end is the
978 			 * NEXT. A bit of valdiation here.
979 			 */
980 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
981 			    SCTP_DATA_FIRST_FRAG ||
982 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
983 			    SCTP_DATA_MIDDLE_FRAG) {
984 				/*
985 				 * Insert chk MUST be a MIDDLE or LAST
986 				 * fragment
987 				 */
988 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
989 				    SCTP_DATA_FIRST_FRAG) {
990 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
991 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
992 					snprintf(msg, sizeof(msg),
993 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
994 					    chk->rec.data.TSN_seq,
995 					    chk->rec.data.stream_number,
996 					    chk->rec.data.stream_seq);
997 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
999 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1000 					*abort_flag = 1;
1001 					return;
1002 				}
1003 				if (chk->rec.data.stream_number !=
1004 				    prev->rec.data.stream_number) {
1005 					/*
1006 					 * Huh, need the correct STR here,
1007 					 * they must be the same.
1008 					 */
1009 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1010 					    chk->rec.data.stream_number,
1011 					    prev->rec.data.stream_number);
1012 					snprintf(msg, sizeof(msg),
1013 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1014 					    prev->rec.data.stream_number,
1015 					    chk->rec.data.TSN_seq,
1016 					    chk->rec.data.stream_number,
1017 					    chk->rec.data.stream_seq);
1018 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1019 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1020 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1021 					*abort_flag = 1;
1022 					return;
1023 				}
1024 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1025 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1026 					/*
1027 					 * Huh, need the same ordering here,
1028 					 * they must be the same.
1029 					 */
1030 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1031 					snprintf(msg, sizeof(msg),
1032 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1033 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1034 					    chk->rec.data.TSN_seq,
1035 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1036 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1038 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1039 					*abort_flag = 1;
1040 					return;
1041 				}
1042 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1043 				    chk->rec.data.stream_seq !=
1044 				    prev->rec.data.stream_seq) {
1045 					/*
1046 					 * Huh, need the correct STR here,
1047 					 * they must be the same.
1048 					 */
1049 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1050 					    chk->rec.data.stream_seq,
1051 					    prev->rec.data.stream_seq);
1052 					snprintf(msg, sizeof(msg),
1053 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1054 					    prev->rec.data.stream_seq,
1055 					    chk->rec.data.TSN_seq,
1056 					    chk->rec.data.stream_number,
1057 					    chk->rec.data.stream_seq);
1058 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1059 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1060 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1061 					*abort_flag = 1;
1062 					return;
1063 				}
1064 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 			    SCTP_DATA_LAST_FRAG) {
1066 				/* Insert chk MUST be a FIRST */
1067 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1068 				    SCTP_DATA_FIRST_FRAG) {
1069 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1070 					snprintf(msg, sizeof(msg),
1071 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1072 					    chk->rec.data.TSN_seq,
1073 					    chk->rec.data.stream_number,
1074 					    chk->rec.data.stream_seq);
1075 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1076 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1077 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1078 					*abort_flag = 1;
1079 					return;
1080 				}
1081 			}
1082 		}
1083 	}
1084 	if (next) {
1085 		post_tsn = chk->rec.data.TSN_seq + 1;
1086 		if (post_tsn == next->rec.data.TSN_seq) {
1087 			/*
1088 			 * Ok the one I am inserting ahead of is my NEXT
1089 			 * one. A bit of valdiation here.
1090 			 */
1091 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1092 				/* Insert chk MUST be a last fragment */
1093 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1094 				    != SCTP_DATA_LAST_FRAG) {
1095 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1096 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1097 					snprintf(msg, sizeof(msg),
1098 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1099 					    chk->rec.data.TSN_seq,
1100 					    chk->rec.data.stream_number,
1101 					    chk->rec.data.stream_seq);
1102 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1103 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1104 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1105 					*abort_flag = 1;
1106 					return;
1107 				}
1108 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1109 				    SCTP_DATA_MIDDLE_FRAG ||
1110 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1111 			    SCTP_DATA_LAST_FRAG) {
1112 				/*
1113 				 * Insert chk CAN be MIDDLE or FIRST NOT
1114 				 * LAST
1115 				 */
1116 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1117 				    SCTP_DATA_LAST_FRAG) {
1118 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1119 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1120 					snprintf(msg, sizeof(msg),
1121 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 					    chk->rec.data.TSN_seq,
1123 					    chk->rec.data.stream_number,
1124 					    chk->rec.data.stream_seq);
1125 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128 					*abort_flag = 1;
1129 					return;
1130 				}
1131 				if (chk->rec.data.stream_number !=
1132 				    next->rec.data.stream_number) {
1133 					/*
1134 					 * Huh, need the correct STR here,
1135 					 * they must be the same.
1136 					 */
1137 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1138 					    chk->rec.data.stream_number,
1139 					    next->rec.data.stream_number);
1140 					snprintf(msg, sizeof(msg),
1141 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142 					    next->rec.data.stream_number,
1143 					    chk->rec.data.TSN_seq,
1144 					    chk->rec.data.stream_number,
1145 					    chk->rec.data.stream_seq);
1146 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1148 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1149 					*abort_flag = 1;
1150 					return;
1151 				}
1152 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1153 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1154 					/*
1155 					 * Huh, need the same ordering here,
1156 					 * they must be the same.
1157 					 */
1158 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1159 					snprintf(msg, sizeof(msg),
1160 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1161 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1162 					    chk->rec.data.TSN_seq,
1163 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1164 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1165 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1166 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1167 					*abort_flag = 1;
1168 					return;
1169 				}
1170 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1171 				    chk->rec.data.stream_seq !=
1172 				    next->rec.data.stream_seq) {
1173 					/*
1174 					 * Huh, need the correct STR here,
1175 					 * they must be the same.
1176 					 */
1177 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1178 					    chk->rec.data.stream_seq,
1179 					    next->rec.data.stream_seq);
1180 					snprintf(msg, sizeof(msg),
1181 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1182 					    next->rec.data.stream_seq,
1183 					    chk->rec.data.TSN_seq,
1184 					    chk->rec.data.stream_number,
1185 					    chk->rec.data.stream_seq);
1186 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1187 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1188 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1189 					*abort_flag = 1;
1190 					return;
1191 				}
1192 			}
1193 		}
1194 	}
1195 	/* Do we need to do some delivery? check */
1196 	sctp_deliver_reasm_check(stcb, asoc);
1197 }
1198 
1199 /*
1200  * This is an unfortunate routine. It checks to make sure a evil guy is not
1201  * stuffing us full of bad packet fragments. A broken peer could also do this
1202  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1203  * :< more cycles.
1204  */
1205 static int
1206 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1207     uint32_t TSN_seq)
1208 {
1209 	struct sctp_tmit_chunk *at;
1210 	uint32_t tsn_est;
1211 
1212 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1213 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1214 			/* is it one bigger? */
1215 			tsn_est = at->rec.data.TSN_seq + 1;
1216 			if (tsn_est == TSN_seq) {
1217 				/* yep. It better be a last then */
1218 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1219 				    SCTP_DATA_LAST_FRAG) {
1220 					/*
1221 					 * Ok this guy belongs next to a guy
1222 					 * that is NOT last, it should be a
1223 					 * middle/last, not a complete
1224 					 * chunk.
1225 					 */
1226 					return (1);
1227 				} else {
1228 					/*
1229 					 * This guy is ok since its a LAST
1230 					 * and the new chunk is a fully
1231 					 * self- contained one.
1232 					 */
1233 					return (0);
1234 				}
1235 			}
1236 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1237 			/* Software error since I have a dup? */
1238 			return (1);
1239 		} else {
1240 			/*
1241 			 * Ok, 'at' is larger than new chunk but does it
1242 			 * need to be right before it.
1243 			 */
1244 			tsn_est = TSN_seq + 1;
1245 			if (tsn_est == at->rec.data.TSN_seq) {
1246 				/* Yep, It better be a first */
1247 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1248 				    SCTP_DATA_FIRST_FRAG) {
1249 					return (1);
1250 				} else {
1251 					return (0);
1252 				}
1253 			}
1254 		}
1255 	}
1256 	return (0);
1257 }
1258 
1259 static int
1260 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1261     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1262     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1263     int *break_flag, int last_chunk)
1264 {
1265 	/* Process a data chunk */
1266 	/* struct sctp_tmit_chunk *chk; */
1267 	struct sctp_tmit_chunk *chk;
1268 	uint32_t tsn, gap;
1269 	struct mbuf *dmbuf;
1270 	int the_len;
1271 	int need_reasm_check = 0;
1272 	uint16_t strmno, strmseq;
1273 	struct mbuf *op_err;
1274 	char msg[SCTP_DIAG_INFO_LEN];
1275 	struct sctp_queued_to_read *control;
1276 	int ordered;
1277 	uint32_t protocol_id;
1278 	uint8_t chunk_flags;
1279 	struct sctp_stream_reset_list *liste;
1280 
1281 	chk = NULL;
1282 	tsn = ntohl(ch->dp.tsn);
1283 	chunk_flags = ch->ch.chunk_flags;
1284 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1285 		asoc->send_sack = 1;
1286 	}
1287 	protocol_id = ch->dp.protocol_id;
1288 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1289 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1290 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1291 	}
1292 	if (stcb == NULL) {
1293 		return (0);
1294 	}
1295 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1296 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1297 		/* It is a duplicate */
1298 		SCTP_STAT_INCR(sctps_recvdupdata);
1299 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1300 			/* Record a dup for the next outbound sack */
1301 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1302 			asoc->numduptsns++;
1303 		}
1304 		asoc->send_sack = 1;
1305 		return (0);
1306 	}
1307 	/* Calculate the number of TSN's between the base and this TSN */
1308 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1309 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1310 		/* Can't hold the bit in the mapping at max array, toss it */
1311 		return (0);
1312 	}
1313 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1314 		SCTP_TCB_LOCK_ASSERT(stcb);
1315 		if (sctp_expand_mapping_array(asoc, gap)) {
1316 			/* Can't expand, drop it */
1317 			return (0);
1318 		}
1319 	}
1320 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1321 		*high_tsn = tsn;
1322 	}
1323 	/* See if we have received this one already */
1324 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1325 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1326 		SCTP_STAT_INCR(sctps_recvdupdata);
1327 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1328 			/* Record a dup for the next outbound sack */
1329 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1330 			asoc->numduptsns++;
1331 		}
1332 		asoc->send_sack = 1;
1333 		return (0);
1334 	}
1335 	/*
1336 	 * Check to see about the GONE flag, duplicates would cause a sack
1337 	 * to be sent up above
1338 	 */
1339 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1340 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1341 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1342 		/*
1343 		 * wait a minute, this guy is gone, there is no longer a
1344 		 * receiver. Send peer an ABORT!
1345 		 */
1346 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1347 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1348 		*abort_flag = 1;
1349 		return (0);
1350 	}
1351 	/*
1352 	 * Now before going further we see if there is room. If NOT then we
1353 	 * MAY let one through only IF this TSN is the one we are waiting
1354 	 * for on a partial delivery API.
1355 	 */
1356 
1357 	/* now do the tests */
1358 	if (((asoc->cnt_on_all_streams +
1359 	    asoc->cnt_on_reasm_queue +
1360 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1361 	    (((int)asoc->my_rwnd) <= 0)) {
1362 		/*
1363 		 * When we have NO room in the rwnd we check to make sure
1364 		 * the reader is doing its job...
1365 		 */
1366 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1367 			/* some to read, wake-up */
1368 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1369 			struct socket *so;
1370 
1371 			so = SCTP_INP_SO(stcb->sctp_ep);
1372 			atomic_add_int(&stcb->asoc.refcnt, 1);
1373 			SCTP_TCB_UNLOCK(stcb);
1374 			SCTP_SOCKET_LOCK(so, 1);
1375 			SCTP_TCB_LOCK(stcb);
1376 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1377 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1378 				/* assoc was freed while we were unlocked */
1379 				SCTP_SOCKET_UNLOCK(so, 1);
1380 				return (0);
1381 			}
1382 #endif
1383 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1384 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1385 			SCTP_SOCKET_UNLOCK(so, 1);
1386 #endif
1387 		}
1388 		/* now is it in the mapping array of what we have accepted? */
1389 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1390 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1391 			/* Nope not in the valid range dump it */
1392 			sctp_set_rwnd(stcb, asoc);
1393 			if ((asoc->cnt_on_all_streams +
1394 			    asoc->cnt_on_reasm_queue +
1395 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1396 				SCTP_STAT_INCR(sctps_datadropchklmt);
1397 			} else {
1398 				SCTP_STAT_INCR(sctps_datadroprwnd);
1399 			}
1400 			*break_flag = 1;
1401 			return (0);
1402 		}
1403 	}
1404 	strmno = ntohs(ch->dp.stream_id);
1405 	if (strmno >= asoc->streamincnt) {
1406 		struct sctp_paramhdr *phdr;
1407 		struct mbuf *mb;
1408 
1409 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1410 		    0, M_NOWAIT, 1, MT_DATA);
1411 		if (mb != NULL) {
1412 			/* add some space up front so prepend will work well */
1413 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1414 			phdr = mtod(mb, struct sctp_paramhdr *);
1415 			/*
1416 			 * Error causes are just param's and this one has
1417 			 * two back to back phdr, one with the error type
1418 			 * and size, the other with the streamid and a rsvd
1419 			 */
1420 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1421 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1422 			phdr->param_length =
1423 			    htons(sizeof(struct sctp_paramhdr) * 2);
1424 			phdr++;
1425 			/* We insert the stream in the type field */
1426 			phdr->param_type = ch->dp.stream_id;
1427 			/* And set the length to 0 for the rsvd field */
1428 			phdr->param_length = 0;
1429 			sctp_queue_op_err(stcb, mb);
1430 		}
1431 		SCTP_STAT_INCR(sctps_badsid);
1432 		SCTP_TCB_LOCK_ASSERT(stcb);
1433 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1434 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1435 			asoc->highest_tsn_inside_nr_map = tsn;
1436 		}
1437 		if (tsn == (asoc->cumulative_tsn + 1)) {
1438 			/* Update cum-ack */
1439 			asoc->cumulative_tsn = tsn;
1440 		}
1441 		return (0);
1442 	}
1443 	/*
1444 	 * Before we continue lets validate that we are not being fooled by
1445 	 * an evil attacker. We can only have 4k chunks based on our TSN
1446 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1447 	 * way our stream sequence numbers could have wrapped. We of course
1448 	 * only validate the FIRST fragment so the bit must be set.
1449 	 */
1450 	strmseq = ntohs(ch->dp.stream_sequence);
1451 #ifdef SCTP_ASOCLOG_OF_TSNS
1452 	SCTP_TCB_LOCK_ASSERT(stcb);
1453 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1454 		asoc->tsn_in_at = 0;
1455 		asoc->tsn_in_wrapped = 1;
1456 	}
1457 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1458 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1459 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1460 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1461 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1462 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1463 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1464 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1465 	asoc->tsn_in_at++;
1466 #endif
1467 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1468 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1469 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1470 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1471 		/* The incoming sseq is behind where we last delivered? */
1472 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1473 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1474 
1475 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1476 		    asoc->strmin[strmno].last_sequence_delivered,
1477 		    tsn, strmno, strmseq);
1478 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1479 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1480 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1481 		*abort_flag = 1;
1482 		return (0);
1483 	}
1484 	/************************************
1485 	 * From here down we may find ch-> invalid
1486 	 * so its a good idea NOT to use it.
1487 	 *************************************/
1488 
1489 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1490 	if (last_chunk == 0) {
1491 		dmbuf = SCTP_M_COPYM(*m,
1492 		    (offset + sizeof(struct sctp_data_chunk)),
1493 		    the_len, M_NOWAIT);
1494 #ifdef SCTP_MBUF_LOGGING
1495 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1496 			struct mbuf *mat;
1497 
1498 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1499 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1500 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1501 				}
1502 			}
1503 		}
1504 #endif
1505 	} else {
1506 		/* We can steal the last chunk */
1507 		int l_len;
1508 
1509 		dmbuf = *m;
1510 		/* lop off the top part */
1511 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1512 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1513 			l_len = SCTP_BUF_LEN(dmbuf);
1514 		} else {
1515 			/*
1516 			 * need to count up the size hopefully does not hit
1517 			 * this to often :-0
1518 			 */
1519 			struct mbuf *lat;
1520 
1521 			l_len = 0;
1522 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1523 				l_len += SCTP_BUF_LEN(lat);
1524 			}
1525 		}
1526 		if (l_len > the_len) {
1527 			/* Trim the end round bytes off  too */
1528 			m_adj(dmbuf, -(l_len - the_len));
1529 		}
1530 	}
1531 	if (dmbuf == NULL) {
1532 		SCTP_STAT_INCR(sctps_nomem);
1533 		return (0);
1534 	}
1535 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1536 	    asoc->fragmented_delivery_inprogress == 0 &&
1537 	    TAILQ_EMPTY(&asoc->resetHead) &&
1538 	    ((ordered == 0) ||
1539 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1540 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1541 		/* Candidate for express delivery */
1542 		/*
1543 		 * Its not fragmented, No PD-API is up, Nothing in the
1544 		 * delivery queue, Its un-ordered OR ordered and the next to
1545 		 * deliver AND nothing else is stuck on the stream queue,
1546 		 * And there is room for it in the socket buffer. Lets just
1547 		 * stuff it up the buffer....
1548 		 */
1549 
1550 		/* It would be nice to avoid this copy if we could :< */
1551 		sctp_alloc_a_readq(stcb, control);
1552 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1553 		    protocol_id,
1554 		    strmno, strmseq,
1555 		    chunk_flags,
1556 		    dmbuf);
1557 		if (control == NULL) {
1558 			goto failed_express_del;
1559 		}
1560 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1561 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1562 			asoc->highest_tsn_inside_nr_map = tsn;
1563 		}
1564 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1565 		    control, &stcb->sctp_socket->so_rcv,
1566 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1567 
1568 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1569 			/* for ordered, bump what we delivered */
1570 			asoc->strmin[strmno].last_sequence_delivered++;
1571 		}
1572 		SCTP_STAT_INCR(sctps_recvexpress);
1573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1574 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1575 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1576 		}
1577 		control = NULL;
1578 
1579 		goto finish_express_del;
1580 	}
1581 failed_express_del:
1582 	/* If we reach here this is a new chunk */
1583 	chk = NULL;
1584 	control = NULL;
1585 	/* Express for fragmented delivery? */
1586 	if ((asoc->fragmented_delivery_inprogress) &&
1587 	    (stcb->asoc.control_pdapi) &&
1588 	    (asoc->str_of_pdapi == strmno) &&
1589 	    (asoc->ssn_of_pdapi == strmseq)
1590 	    ) {
1591 		control = stcb->asoc.control_pdapi;
1592 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1593 			/* Can't be another first? */
1594 			goto failed_pdapi_express_del;
1595 		}
1596 		if (tsn == (control->sinfo_tsn + 1)) {
1597 			/* Yep, we can add it on */
1598 			int end = 0;
1599 
1600 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1601 				end = 1;
1602 			}
1603 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1604 			    tsn,
1605 			    &stcb->sctp_socket->so_rcv)) {
1606 				SCTP_PRINTF("Append fails end:%d\n", end);
1607 				goto failed_pdapi_express_del;
1608 			}
1609 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1610 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1611 				asoc->highest_tsn_inside_nr_map = tsn;
1612 			}
1613 			SCTP_STAT_INCR(sctps_recvexpressm);
1614 			asoc->tsn_last_delivered = tsn;
1615 			asoc->fragment_flags = chunk_flags;
1616 			asoc->tsn_of_pdapi_last_delivered = tsn;
1617 			asoc->last_flags_delivered = chunk_flags;
1618 			asoc->last_strm_seq_delivered = strmseq;
1619 			asoc->last_strm_no_delivered = strmno;
1620 			if (end) {
1621 				/* clean up the flags and such */
1622 				asoc->fragmented_delivery_inprogress = 0;
1623 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1624 					asoc->strmin[strmno].last_sequence_delivered++;
1625 				}
1626 				stcb->asoc.control_pdapi = NULL;
1627 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1628 					/*
1629 					 * There could be another message
1630 					 * ready
1631 					 */
1632 					need_reasm_check = 1;
1633 				}
1634 			}
1635 			control = NULL;
1636 			goto finish_express_del;
1637 		}
1638 	}
1639 failed_pdapi_express_del:
1640 	control = NULL;
1641 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1642 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1643 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1644 			asoc->highest_tsn_inside_nr_map = tsn;
1645 		}
1646 	} else {
1647 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1648 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1649 			asoc->highest_tsn_inside_map = tsn;
1650 		}
1651 	}
1652 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1653 		sctp_alloc_a_chunk(stcb, chk);
1654 		if (chk == NULL) {
1655 			/* No memory so we drop the chunk */
1656 			SCTP_STAT_INCR(sctps_nomem);
1657 			if (last_chunk == 0) {
1658 				/* we copied it, free the copy */
1659 				sctp_m_freem(dmbuf);
1660 			}
1661 			return (0);
1662 		}
1663 		chk->rec.data.TSN_seq = tsn;
1664 		chk->no_fr_allowed = 0;
1665 		chk->rec.data.stream_seq = strmseq;
1666 		chk->rec.data.stream_number = strmno;
1667 		chk->rec.data.payloadtype = protocol_id;
1668 		chk->rec.data.context = stcb->asoc.context;
1669 		chk->rec.data.doing_fast_retransmit = 0;
1670 		chk->rec.data.rcv_flags = chunk_flags;
1671 		chk->asoc = asoc;
1672 		chk->send_size = the_len;
1673 		chk->whoTo = net;
1674 		atomic_add_int(&net->ref_count, 1);
1675 		chk->data = dmbuf;
1676 	} else {
1677 		sctp_alloc_a_readq(stcb, control);
1678 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1679 		    protocol_id,
1680 		    strmno, strmseq,
1681 		    chunk_flags,
1682 		    dmbuf);
1683 		if (control == NULL) {
1684 			/* No memory so we drop the chunk */
1685 			SCTP_STAT_INCR(sctps_nomem);
1686 			if (last_chunk == 0) {
1687 				/* we copied it, free the copy */
1688 				sctp_m_freem(dmbuf);
1689 			}
1690 			return (0);
1691 		}
1692 		control->length = the_len;
1693 	}
1694 
1695 	/* Mark it as received */
1696 	/* Now queue it where it belongs */
1697 	if (control != NULL) {
1698 		/* First a sanity check */
1699 		if (asoc->fragmented_delivery_inprogress) {
1700 			/*
1701 			 * Ok, we have a fragmented delivery in progress if
1702 			 * this chunk is next to deliver OR belongs in our
1703 			 * view to the reassembly, the peer is evil or
1704 			 * broken.
1705 			 */
1706 			uint32_t estimate_tsn;
1707 
1708 			estimate_tsn = asoc->tsn_last_delivered + 1;
1709 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1710 			    (estimate_tsn == control->sinfo_tsn)) {
1711 				/* Evil/Broke peer */
1712 				sctp_m_freem(control->data);
1713 				control->data = NULL;
1714 				if (control->whoFrom) {
1715 					sctp_free_remote_addr(control->whoFrom);
1716 					control->whoFrom = NULL;
1717 				}
1718 				sctp_free_a_readq(stcb, control);
1719 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1720 				    tsn, strmno, strmseq);
1721 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1722 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1723 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1724 				*abort_flag = 1;
1725 				if (last_chunk) {
1726 					*m = NULL;
1727 				}
1728 				return (0);
1729 			} else {
1730 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1731 					sctp_m_freem(control->data);
1732 					control->data = NULL;
1733 					if (control->whoFrom) {
1734 						sctp_free_remote_addr(control->whoFrom);
1735 						control->whoFrom = NULL;
1736 					}
1737 					sctp_free_a_readq(stcb, control);
1738 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1739 					    tsn, strmno, strmseq);
1740 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1741 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1742 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1743 					*abort_flag = 1;
1744 					if (last_chunk) {
1745 						*m = NULL;
1746 					}
1747 					return (0);
1748 				}
1749 			}
1750 		} else {
1751 			/* No PDAPI running */
1752 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1753 				/*
1754 				 * Reassembly queue is NOT empty validate
1755 				 * that this tsn does not need to be in
1756 				 * reasembly queue. If it does then our peer
1757 				 * is broken or evil.
1758 				 */
1759 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1760 					sctp_m_freem(control->data);
1761 					control->data = NULL;
1762 					if (control->whoFrom) {
1763 						sctp_free_remote_addr(control->whoFrom);
1764 						control->whoFrom = NULL;
1765 					}
1766 					sctp_free_a_readq(stcb, control);
1767 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1768 					    tsn, strmno, strmseq);
1769 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1770 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1771 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1772 					*abort_flag = 1;
1773 					if (last_chunk) {
1774 						*m = NULL;
1775 					}
1776 					return (0);
1777 				}
1778 			}
1779 		}
1780 		/* ok, if we reach here we have passed the sanity checks */
1781 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1782 			/* queue directly into socket buffer */
1783 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1784 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1785 			    control,
1786 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1787 		} else {
1788 			/*
1789 			 * Special check for when streams are resetting. We
1790 			 * could be more smart about this and check the
1791 			 * actual stream to see if it is not being reset..
1792 			 * that way we would not create a HOLB when amongst
1793 			 * streams being reset and those not being reset.
1794 			 *
1795 			 * We take complete messages that have a stream reset
1796 			 * intervening (aka the TSN is after where our
1797 			 * cum-ack needs to be) off and put them on a
1798 			 * pending_reply_queue. The reassembly ones we do
1799 			 * not have to worry about since they are all sorted
1800 			 * and proceessed by TSN order. It is only the
1801 			 * singletons I must worry about.
1802 			 */
1803 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1804 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1805 				/*
1806 				 * yep its past where we need to reset... go
1807 				 * ahead and queue it.
1808 				 */
1809 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1810 					/* first one on */
1811 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1812 				} else {
1813 					struct sctp_queued_to_read *ctlOn,
1814 					                   *nctlOn;
1815 					unsigned char inserted = 0;
1816 
1817 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1818 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1819 							continue;
1820 						} else {
1821 							/* found it */
1822 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1823 							inserted = 1;
1824 							break;
1825 						}
1826 					}
1827 					if (inserted == 0) {
1828 						/*
1829 						 * must be put at end, use
1830 						 * prevP (all setup from
1831 						 * loop) to setup nextP.
1832 						 */
1833 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1834 					}
1835 				}
1836 			} else {
1837 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1838 				if (*abort_flag) {
1839 					if (last_chunk) {
1840 						*m = NULL;
1841 					}
1842 					return (0);
1843 				}
1844 			}
1845 		}
1846 	} else {
1847 		/* Into the re-assembly queue */
1848 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1849 		if (*abort_flag) {
1850 			/*
1851 			 * the assoc is now gone and chk was put onto the
1852 			 * reasm queue, which has all been freed.
1853 			 */
1854 			if (last_chunk) {
1855 				*m = NULL;
1856 			}
1857 			return (0);
1858 		}
1859 	}
1860 finish_express_del:
1861 	if (tsn == (asoc->cumulative_tsn + 1)) {
1862 		/* Update cum-ack */
1863 		asoc->cumulative_tsn = tsn;
1864 	}
1865 	if (last_chunk) {
1866 		*m = NULL;
1867 	}
1868 	if (ordered) {
1869 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1870 	} else {
1871 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1872 	}
1873 	SCTP_STAT_INCR(sctps_recvdata);
1874 	/* Set it present please */
1875 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1876 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1877 	}
1878 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1879 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1880 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1881 	}
1882 	/* check the special flag for stream resets */
1883 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1884 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1885 		/*
1886 		 * we have finished working through the backlogged TSN's now
1887 		 * time to reset streams. 1: call reset function. 2: free
1888 		 * pending_reply space 3: distribute any chunks in
1889 		 * pending_reply_queue.
1890 		 */
1891 		struct sctp_queued_to_read *ctl, *nctl;
1892 
1893 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1894 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1895 		SCTP_FREE(liste, SCTP_M_STRESET);
1896 		/* sa_ignore FREED_MEMORY */
1897 		liste = TAILQ_FIRST(&asoc->resetHead);
1898 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1899 			/* All can be removed */
1900 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1901 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1902 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1903 				if (*abort_flag) {
1904 					return (0);
1905 				}
1906 			}
1907 		} else {
1908 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1909 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1910 					break;
1911 				}
1912 				/*
1913 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1914 				 * process it which is the NOT of
1915 				 * ctl->sinfo_tsn > liste->tsn
1916 				 */
1917 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1918 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1919 				if (*abort_flag) {
1920 					return (0);
1921 				}
1922 			}
1923 		}
1924 		/*
1925 		 * Now service re-assembly to pick up anything that has been
1926 		 * held on reassembly queue?
1927 		 */
1928 		sctp_deliver_reasm_check(stcb, asoc);
1929 		need_reasm_check = 0;
1930 	}
1931 	if (need_reasm_check) {
1932 		/* Another one waits ? */
1933 		sctp_deliver_reasm_check(stcb, asoc);
1934 	}
1935 	return (1);
1936 }
1937 
1938 int8_t sctp_map_lookup_tab[256] = {
1939 	0, 1, 0, 2, 0, 1, 0, 3,
1940 	0, 1, 0, 2, 0, 1, 0, 4,
1941 	0, 1, 0, 2, 0, 1, 0, 3,
1942 	0, 1, 0, 2, 0, 1, 0, 5,
1943 	0, 1, 0, 2, 0, 1, 0, 3,
1944 	0, 1, 0, 2, 0, 1, 0, 4,
1945 	0, 1, 0, 2, 0, 1, 0, 3,
1946 	0, 1, 0, 2, 0, 1, 0, 6,
1947 	0, 1, 0, 2, 0, 1, 0, 3,
1948 	0, 1, 0, 2, 0, 1, 0, 4,
1949 	0, 1, 0, 2, 0, 1, 0, 3,
1950 	0, 1, 0, 2, 0, 1, 0, 5,
1951 	0, 1, 0, 2, 0, 1, 0, 3,
1952 	0, 1, 0, 2, 0, 1, 0, 4,
1953 	0, 1, 0, 2, 0, 1, 0, 3,
1954 	0, 1, 0, 2, 0, 1, 0, 7,
1955 	0, 1, 0, 2, 0, 1, 0, 3,
1956 	0, 1, 0, 2, 0, 1, 0, 4,
1957 	0, 1, 0, 2, 0, 1, 0, 3,
1958 	0, 1, 0, 2, 0, 1, 0, 5,
1959 	0, 1, 0, 2, 0, 1, 0, 3,
1960 	0, 1, 0, 2, 0, 1, 0, 4,
1961 	0, 1, 0, 2, 0, 1, 0, 3,
1962 	0, 1, 0, 2, 0, 1, 0, 6,
1963 	0, 1, 0, 2, 0, 1, 0, 3,
1964 	0, 1, 0, 2, 0, 1, 0, 4,
1965 	0, 1, 0, 2, 0, 1, 0, 3,
1966 	0, 1, 0, 2, 0, 1, 0, 5,
1967 	0, 1, 0, 2, 0, 1, 0, 3,
1968 	0, 1, 0, 2, 0, 1, 0, 4,
1969 	0, 1, 0, 2, 0, 1, 0, 3,
1970 	0, 1, 0, 2, 0, 1, 0, 8
1971 };
1972 
1973 
1974 void
1975 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1976 {
1977 	/*
1978 	 * Now we also need to check the mapping array in a couple of ways.
1979 	 * 1) Did we move the cum-ack point?
1980 	 *
1981 	 * When you first glance at this you might think that all entries that
1982 	 * make up the postion of the cum-ack would be in the nr-mapping
1983 	 * array only.. i.e. things up to the cum-ack are always
1984 	 * deliverable. Thats true with one exception, when its a fragmented
1985 	 * message we may not deliver the data until some threshold (or all
1986 	 * of it) is in place. So we must OR the nr_mapping_array and
1987 	 * mapping_array to get a true picture of the cum-ack.
1988 	 */
1989 	struct sctp_association *asoc;
1990 	int at;
1991 	uint8_t val;
1992 	int slide_from, slide_end, lgap, distance;
1993 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1994 
1995 	asoc = &stcb->asoc;
1996 
1997 	old_cumack = asoc->cumulative_tsn;
1998 	old_base = asoc->mapping_array_base_tsn;
1999 	old_highest = asoc->highest_tsn_inside_map;
2000 	/*
2001 	 * We could probably improve this a small bit by calculating the
2002 	 * offset of the current cum-ack as the starting point.
2003 	 */
2004 	at = 0;
2005 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2006 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2007 		if (val == 0xff) {
2008 			at += 8;
2009 		} else {
2010 			/* there is a 0 bit */
2011 			at += sctp_map_lookup_tab[val];
2012 			break;
2013 		}
2014 	}
2015 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2016 
2017 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2018 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2019 #ifdef INVARIANTS
2020 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2021 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2022 #else
2023 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2024 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2025 		sctp_print_mapping_array(asoc);
2026 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2027 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2028 		}
2029 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2030 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2031 #endif
2032 	}
2033 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2034 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2035 	} else {
2036 		highest_tsn = asoc->highest_tsn_inside_map;
2037 	}
2038 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2039 		/* The complete array was completed by a single FR */
2040 		/* highest becomes the cum-ack */
2041 		int clr;
2042 
2043 #ifdef INVARIANTS
2044 		unsigned int i;
2045 
2046 #endif
2047 
2048 		/* clear the array */
2049 		clr = ((at + 7) >> 3);
2050 		if (clr > asoc->mapping_array_size) {
2051 			clr = asoc->mapping_array_size;
2052 		}
2053 		memset(asoc->mapping_array, 0, clr);
2054 		memset(asoc->nr_mapping_array, 0, clr);
2055 #ifdef INVARIANTS
2056 		for (i = 0; i < asoc->mapping_array_size; i++) {
2057 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2058 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2059 				sctp_print_mapping_array(asoc);
2060 			}
2061 		}
2062 #endif
2063 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2064 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2065 	} else if (at >= 8) {
2066 		/* we can slide the mapping array down */
2067 		/* slide_from holds where we hit the first NON 0xff byte */
2068 
2069 		/*
2070 		 * now calculate the ceiling of the move using our highest
2071 		 * TSN value
2072 		 */
2073 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2074 		slide_end = (lgap >> 3);
2075 		if (slide_end < slide_from) {
2076 			sctp_print_mapping_array(asoc);
2077 #ifdef INVARIANTS
2078 			panic("impossible slide");
2079 #else
2080 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2081 			    lgap, slide_end, slide_from, at);
2082 			return;
2083 #endif
2084 		}
2085 		if (slide_end > asoc->mapping_array_size) {
2086 #ifdef INVARIANTS
2087 			panic("would overrun buffer");
2088 #else
2089 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2090 			    asoc->mapping_array_size, slide_end);
2091 			slide_end = asoc->mapping_array_size;
2092 #endif
2093 		}
2094 		distance = (slide_end - slide_from) + 1;
2095 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2096 			sctp_log_map(old_base, old_cumack, old_highest,
2097 			    SCTP_MAP_PREPARE_SLIDE);
2098 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2099 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2100 		}
2101 		if (distance + slide_from > asoc->mapping_array_size ||
2102 		    distance < 0) {
2103 			/*
2104 			 * Here we do NOT slide forward the array so that
2105 			 * hopefully when more data comes in to fill it up
2106 			 * we will be able to slide it forward. Really I
2107 			 * don't think this should happen :-0
2108 			 */
2109 
2110 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2111 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2112 				    (uint32_t) asoc->mapping_array_size,
2113 				    SCTP_MAP_SLIDE_NONE);
2114 			}
2115 		} else {
2116 			int ii;
2117 
2118 			for (ii = 0; ii < distance; ii++) {
2119 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2120 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2121 
2122 			}
2123 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2124 				asoc->mapping_array[ii] = 0;
2125 				asoc->nr_mapping_array[ii] = 0;
2126 			}
2127 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2128 				asoc->highest_tsn_inside_map += (slide_from << 3);
2129 			}
2130 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2131 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2132 			}
2133 			asoc->mapping_array_base_tsn += (slide_from << 3);
2134 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2135 				sctp_log_map(asoc->mapping_array_base_tsn,
2136 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2137 				    SCTP_MAP_SLIDE_RESULT);
2138 			}
2139 		}
2140 	}
2141 }
2142 
2143 void
2144 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2145 {
2146 	struct sctp_association *asoc;
2147 	uint32_t highest_tsn;
2148 
2149 	asoc = &stcb->asoc;
2150 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2151 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2152 	} else {
2153 		highest_tsn = asoc->highest_tsn_inside_map;
2154 	}
2155 
2156 	/*
2157 	 * Now we need to see if we need to queue a sack or just start the
2158 	 * timer (if allowed).
2159 	 */
2160 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2161 		/*
2162 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2163 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2164 		 * SACK
2165 		 */
2166 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2167 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2168 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2169 		}
2170 		sctp_send_shutdown(stcb,
2171 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2172 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2173 	} else {
2174 		int is_a_gap;
2175 
2176 		/* is there a gap now ? */
2177 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2178 
2179 		/*
2180 		 * CMT DAC algorithm: increase number of packets received
2181 		 * since last ack
2182 		 */
2183 		stcb->asoc.cmt_dac_pkts_rcvd++;
2184 
2185 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2186 							 * SACK */
2187 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2188 							 * longer is one */
2189 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2190 		    (is_a_gap) ||	/* is still a gap */
2191 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2192 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2193 		    ) {
2194 
2195 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2196 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2197 			    (stcb->asoc.send_sack == 0) &&
2198 			    (stcb->asoc.numduptsns == 0) &&
2199 			    (stcb->asoc.delayed_ack) &&
2200 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2201 
2202 				/*
2203 				 * CMT DAC algorithm: With CMT, delay acks
2204 				 * even in the face of
2205 				 *
2206 				 * reordering. Therefore, if acks that do not
2207 				 * have to be sent because of the above
2208 				 * reasons, will be delayed. That is, acks
2209 				 * that would have been sent due to gap
2210 				 * reports will be delayed with DAC. Start
2211 				 * the delayed ack timer.
2212 				 */
2213 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2214 				    stcb->sctp_ep, stcb, NULL);
2215 			} else {
2216 				/*
2217 				 * Ok we must build a SACK since the timer
2218 				 * is pending, we got our first packet OR
2219 				 * there are gaps or duplicates.
2220 				 */
2221 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2222 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2223 			}
2224 		} else {
2225 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2226 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2227 				    stcb->sctp_ep, stcb, NULL);
2228 			}
2229 		}
2230 	}
2231 }
2232 
2233 void
2234 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2235 {
2236 	struct sctp_tmit_chunk *chk;
2237 	uint32_t tsize, pd_point;
2238 	uint16_t nxt_todel;
2239 
2240 	if (asoc->fragmented_delivery_inprogress) {
2241 		sctp_service_reassembly(stcb, asoc);
2242 	}
2243 	/* Can we proceed further, i.e. the PD-API is complete */
2244 	if (asoc->fragmented_delivery_inprogress) {
2245 		/* no */
2246 		return;
2247 	}
2248 	/*
2249 	 * Now is there some other chunk I can deliver from the reassembly
2250 	 * queue.
2251 	 */
2252 doit_again:
2253 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2254 	if (chk == NULL) {
2255 		asoc->size_on_reasm_queue = 0;
2256 		asoc->cnt_on_reasm_queue = 0;
2257 		return;
2258 	}
2259 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2260 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2261 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2262 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2263 		/*
2264 		 * Yep the first one is here. We setup to start reception,
2265 		 * by backing down the TSN just in case we can't deliver.
2266 		 */
2267 
2268 		/*
2269 		 * Before we start though either all of the message should
2270 		 * be here or the socket buffer max or nothing on the
2271 		 * delivery queue and something can be delivered.
2272 		 */
2273 		if (stcb->sctp_socket) {
2274 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2275 			    stcb->sctp_ep->partial_delivery_point);
2276 		} else {
2277 			pd_point = stcb->sctp_ep->partial_delivery_point;
2278 		}
2279 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2280 			asoc->fragmented_delivery_inprogress = 1;
2281 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2282 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2283 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2284 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2285 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2286 			sctp_service_reassembly(stcb, asoc);
2287 			if (asoc->fragmented_delivery_inprogress == 0) {
2288 				goto doit_again;
2289 			}
2290 		}
2291 	}
2292 }
2293 
2294 int
2295 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2296     struct sockaddr *src, struct sockaddr *dst,
2297     struct sctphdr *sh, struct sctp_inpcb *inp,
2298     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2299     uint8_t use_mflowid, uint32_t mflowid,
2300     uint32_t vrf_id, uint16_t port)
2301 {
2302 	struct sctp_data_chunk *ch, chunk_buf;
2303 	struct sctp_association *asoc;
2304 	int num_chunks = 0;	/* number of control chunks processed */
2305 	int stop_proc = 0;
2306 	int chk_length, break_flag, last_chunk;
2307 	int abort_flag = 0, was_a_gap;
2308 	struct mbuf *m;
2309 	uint32_t highest_tsn;
2310 
2311 	/* set the rwnd */
2312 	sctp_set_rwnd(stcb, &stcb->asoc);
2313 
2314 	m = *mm;
2315 	SCTP_TCB_LOCK_ASSERT(stcb);
2316 	asoc = &stcb->asoc;
2317 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2318 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2319 	} else {
2320 		highest_tsn = asoc->highest_tsn_inside_map;
2321 	}
2322 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2323 	/*
2324 	 * setup where we got the last DATA packet from for any SACK that
2325 	 * may need to go out. Don't bump the net. This is done ONLY when a
2326 	 * chunk is assigned.
2327 	 */
2328 	asoc->last_data_chunk_from = net;
2329 
2330 	/*-
2331 	 * Now before we proceed we must figure out if this is a wasted
2332 	 * cluster... i.e. it is a small packet sent in and yet the driver
2333 	 * underneath allocated a full cluster for it. If so we must copy it
2334 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2335 	 * with cluster starvation. Note for __Panda__ we don't do this
2336 	 * since it has clusters all the way down to 64 bytes.
2337 	 */
2338 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2339 		/* we only handle mbufs that are singletons.. not chains */
2340 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2341 		if (m) {
2342 			/* ok lets see if we can copy the data up */
2343 			caddr_t *from, *to;
2344 
2345 			/* get the pointers and copy */
2346 			to = mtod(m, caddr_t *);
2347 			from = mtod((*mm), caddr_t *);
2348 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2349 			/* copy the length and free up the old */
2350 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2351 			sctp_m_freem(*mm);
2352 			/* sucess, back copy */
2353 			*mm = m;
2354 		} else {
2355 			/* We are in trouble in the mbuf world .. yikes */
2356 			m = *mm;
2357 		}
2358 	}
2359 	/* get pointer to the first chunk header */
2360 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2361 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2362 	if (ch == NULL) {
2363 		return (1);
2364 	}
2365 	/*
2366 	 * process all DATA chunks...
2367 	 */
2368 	*high_tsn = asoc->cumulative_tsn;
2369 	break_flag = 0;
2370 	asoc->data_pkts_seen++;
2371 	while (stop_proc == 0) {
2372 		/* validate chunk length */
2373 		chk_length = ntohs(ch->ch.chunk_length);
2374 		if (length - *offset < chk_length) {
2375 			/* all done, mutulated chunk */
2376 			stop_proc = 1;
2377 			continue;
2378 		}
2379 		if (ch->ch.chunk_type == SCTP_DATA) {
2380 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2381 				/*
2382 				 * Need to send an abort since we had a
2383 				 * invalid data chunk.
2384 				 */
2385 				struct mbuf *op_err;
2386 				char msg[SCTP_DIAG_INFO_LEN];
2387 
2388 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2389 				    chk_length);
2390 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2391 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2392 				sctp_abort_association(inp, stcb, m, iphlen,
2393 				    src, dst, sh, op_err,
2394 				    use_mflowid, mflowid,
2395 				    vrf_id, port);
2396 				return (2);
2397 			}
2398 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2399 				/*
2400 				 * Need to send an abort since we had an
2401 				 * empty data chunk.
2402 				 */
2403 				struct mbuf *op_err;
2404 
2405 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2406 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2407 				sctp_abort_association(inp, stcb, m, iphlen,
2408 				    src, dst, sh, op_err,
2409 				    use_mflowid, mflowid,
2410 				    vrf_id, port);
2411 				return (2);
2412 			}
2413 #ifdef SCTP_AUDITING_ENABLED
2414 			sctp_audit_log(0xB1, 0);
2415 #endif
2416 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2417 				last_chunk = 1;
2418 			} else {
2419 				last_chunk = 0;
2420 			}
2421 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2422 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2423 			    last_chunk)) {
2424 				num_chunks++;
2425 			}
2426 			if (abort_flag)
2427 				return (2);
2428 
2429 			if (break_flag) {
2430 				/*
2431 				 * Set because of out of rwnd space and no
2432 				 * drop rep space left.
2433 				 */
2434 				stop_proc = 1;
2435 				continue;
2436 			}
2437 		} else {
2438 			/* not a data chunk in the data region */
2439 			switch (ch->ch.chunk_type) {
2440 			case SCTP_INITIATION:
2441 			case SCTP_INITIATION_ACK:
2442 			case SCTP_SELECTIVE_ACK:
2443 			case SCTP_NR_SELECTIVE_ACK:
2444 			case SCTP_HEARTBEAT_REQUEST:
2445 			case SCTP_HEARTBEAT_ACK:
2446 			case SCTP_ABORT_ASSOCIATION:
2447 			case SCTP_SHUTDOWN:
2448 			case SCTP_SHUTDOWN_ACK:
2449 			case SCTP_OPERATION_ERROR:
2450 			case SCTP_COOKIE_ECHO:
2451 			case SCTP_COOKIE_ACK:
2452 			case SCTP_ECN_ECHO:
2453 			case SCTP_ECN_CWR:
2454 			case SCTP_SHUTDOWN_COMPLETE:
2455 			case SCTP_AUTHENTICATION:
2456 			case SCTP_ASCONF_ACK:
2457 			case SCTP_PACKET_DROPPED:
2458 			case SCTP_STREAM_RESET:
2459 			case SCTP_FORWARD_CUM_TSN:
2460 			case SCTP_ASCONF:
2461 				/*
2462 				 * Now, what do we do with KNOWN chunks that
2463 				 * are NOT in the right place?
2464 				 *
2465 				 * For now, I do nothing but ignore them. We
2466 				 * may later want to add sysctl stuff to
2467 				 * switch out and do either an ABORT() or
2468 				 * possibly process them.
2469 				 */
2470 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2471 					struct mbuf *op_err;
2472 
2473 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2474 					sctp_abort_association(inp, stcb,
2475 					    m, iphlen,
2476 					    src, dst,
2477 					    sh, op_err,
2478 					    use_mflowid, mflowid,
2479 					    vrf_id, port);
2480 					return (2);
2481 				}
2482 				break;
2483 			default:
2484 				/* unknown chunk type, use bit rules */
2485 				if (ch->ch.chunk_type & 0x40) {
2486 					/* Add a error report to the queue */
2487 					struct mbuf *merr;
2488 					struct sctp_paramhdr *phd;
2489 
2490 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2491 					if (merr) {
2492 						phd = mtod(merr, struct sctp_paramhdr *);
2493 						/*
2494 						 * We cheat and use param
2495 						 * type since we did not
2496 						 * bother to define a error
2497 						 * cause struct. They are
2498 						 * the same basic format
2499 						 * with different names.
2500 						 */
2501 						phd->param_type =
2502 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2503 						phd->param_length =
2504 						    htons(chk_length + sizeof(*phd));
2505 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2506 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2507 						if (SCTP_BUF_NEXT(merr)) {
2508 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2509 								sctp_m_freem(merr);
2510 							} else {
2511 								sctp_queue_op_err(stcb, merr);
2512 							}
2513 						} else {
2514 							sctp_m_freem(merr);
2515 						}
2516 					}
2517 				}
2518 				if ((ch->ch.chunk_type & 0x80) == 0) {
2519 					/* discard the rest of this packet */
2520 					stop_proc = 1;
2521 				}	/* else skip this bad chunk and
2522 					 * continue... */
2523 				break;
2524 			}	/* switch of chunk type */
2525 		}
2526 		*offset += SCTP_SIZE32(chk_length);
2527 		if ((*offset >= length) || stop_proc) {
2528 			/* no more data left in the mbuf chain */
2529 			stop_proc = 1;
2530 			continue;
2531 		}
2532 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2533 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2534 		if (ch == NULL) {
2535 			*offset = length;
2536 			stop_proc = 1;
2537 			continue;
2538 		}
2539 	}
2540 	if (break_flag) {
2541 		/*
2542 		 * we need to report rwnd overrun drops.
2543 		 */
2544 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2545 	}
2546 	if (num_chunks) {
2547 		/*
2548 		 * Did we get data, if so update the time for auto-close and
2549 		 * give peer credit for being alive.
2550 		 */
2551 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2552 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2553 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2554 			    stcb->asoc.overall_error_count,
2555 			    0,
2556 			    SCTP_FROM_SCTP_INDATA,
2557 			    __LINE__);
2558 		}
2559 		stcb->asoc.overall_error_count = 0;
2560 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2561 	}
2562 	/* now service all of the reassm queue if needed */
2563 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2564 		sctp_service_queues(stcb, asoc);
2565 
2566 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2567 		/* Assure that we ack right away */
2568 		stcb->asoc.send_sack = 1;
2569 	}
2570 	/* Start a sack timer or QUEUE a SACK for sending */
2571 	sctp_sack_check(stcb, was_a_gap);
2572 	return (0);
2573 }
2574 
2575 static int
2576 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2577     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2578     int *num_frs,
2579     uint32_t * biggest_newly_acked_tsn,
2580     uint32_t * this_sack_lowest_newack,
2581     int *rto_ok)
2582 {
2583 	struct sctp_tmit_chunk *tp1;
2584 	unsigned int theTSN;
2585 	int j, wake_him = 0, circled = 0;
2586 
2587 	/* Recover the tp1 we last saw */
2588 	tp1 = *p_tp1;
2589 	if (tp1 == NULL) {
2590 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2591 	}
2592 	for (j = frag_strt; j <= frag_end; j++) {
2593 		theTSN = j + last_tsn;
2594 		while (tp1) {
2595 			if (tp1->rec.data.doing_fast_retransmit)
2596 				(*num_frs) += 1;
2597 
2598 			/*-
2599 			 * CMT: CUCv2 algorithm. For each TSN being
2600 			 * processed from the sent queue, track the
2601 			 * next expected pseudo-cumack, or
2602 			 * rtx_pseudo_cumack, if required. Separate
2603 			 * cumack trackers for first transmissions,
2604 			 * and retransmissions.
2605 			 */
2606 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2607 			    (tp1->snd_count == 1)) {
2608 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2609 				tp1->whoTo->find_pseudo_cumack = 0;
2610 			}
2611 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2612 			    (tp1->snd_count > 1)) {
2613 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2614 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2615 			}
2616 			if (tp1->rec.data.TSN_seq == theTSN) {
2617 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2618 					/*-
2619 					 * must be held until
2620 					 * cum-ack passes
2621 					 */
2622 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2623 						/*-
2624 						 * If it is less than RESEND, it is
2625 						 * now no-longer in flight.
2626 						 * Higher values may already be set
2627 						 * via previous Gap Ack Blocks...
2628 						 * i.e. ACKED or RESEND.
2629 						 */
2630 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2631 						    *biggest_newly_acked_tsn)) {
2632 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2633 						}
2634 						/*-
2635 						 * CMT: SFR algo (and HTNA) - set
2636 						 * saw_newack to 1 for dest being
2637 						 * newly acked. update
2638 						 * this_sack_highest_newack if
2639 						 * appropriate.
2640 						 */
2641 						if (tp1->rec.data.chunk_was_revoked == 0)
2642 							tp1->whoTo->saw_newack = 1;
2643 
2644 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2645 						    tp1->whoTo->this_sack_highest_newack)) {
2646 							tp1->whoTo->this_sack_highest_newack =
2647 							    tp1->rec.data.TSN_seq;
2648 						}
2649 						/*-
2650 						 * CMT DAC algo: also update
2651 						 * this_sack_lowest_newack
2652 						 */
2653 						if (*this_sack_lowest_newack == 0) {
2654 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2655 								sctp_log_sack(*this_sack_lowest_newack,
2656 								    last_tsn,
2657 								    tp1->rec.data.TSN_seq,
2658 								    0,
2659 								    0,
2660 								    SCTP_LOG_TSN_ACKED);
2661 							}
2662 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2663 						}
2664 						/*-
2665 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2666 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2667 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2668 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2669 						 * Separate pseudo_cumack trackers for first transmissions and
2670 						 * retransmissions.
2671 						 */
2672 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2673 							if (tp1->rec.data.chunk_was_revoked == 0) {
2674 								tp1->whoTo->new_pseudo_cumack = 1;
2675 							}
2676 							tp1->whoTo->find_pseudo_cumack = 1;
2677 						}
2678 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2679 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2680 						}
2681 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2682 							if (tp1->rec.data.chunk_was_revoked == 0) {
2683 								tp1->whoTo->new_pseudo_cumack = 1;
2684 							}
2685 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2686 						}
2687 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2688 							sctp_log_sack(*biggest_newly_acked_tsn,
2689 							    last_tsn,
2690 							    tp1->rec.data.TSN_seq,
2691 							    frag_strt,
2692 							    frag_end,
2693 							    SCTP_LOG_TSN_ACKED);
2694 						}
2695 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2696 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2697 							    tp1->whoTo->flight_size,
2698 							    tp1->book_size,
2699 							    (uintptr_t) tp1->whoTo,
2700 							    tp1->rec.data.TSN_seq);
2701 						}
2702 						sctp_flight_size_decrease(tp1);
2703 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2704 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2705 							    tp1);
2706 						}
2707 						sctp_total_flight_decrease(stcb, tp1);
2708 
2709 						tp1->whoTo->net_ack += tp1->send_size;
2710 						if (tp1->snd_count < 2) {
2711 							/*-
2712 							 * True non-retransmited chunk
2713 							 */
2714 							tp1->whoTo->net_ack2 += tp1->send_size;
2715 
2716 							/*-
2717 							 * update RTO too ?
2718 							 */
2719 							if (tp1->do_rtt) {
2720 								if (*rto_ok) {
2721 									tp1->whoTo->RTO =
2722 									    sctp_calculate_rto(stcb,
2723 									    &stcb->asoc,
2724 									    tp1->whoTo,
2725 									    &tp1->sent_rcv_time,
2726 									    sctp_align_safe_nocopy,
2727 									    SCTP_RTT_FROM_DATA);
2728 									*rto_ok = 0;
2729 								}
2730 								if (tp1->whoTo->rto_needed == 0) {
2731 									tp1->whoTo->rto_needed = 1;
2732 								}
2733 								tp1->do_rtt = 0;
2734 							}
2735 						}
2736 					}
2737 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2738 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2739 						    stcb->asoc.this_sack_highest_gap)) {
2740 							stcb->asoc.this_sack_highest_gap =
2741 							    tp1->rec.data.TSN_seq;
2742 						}
2743 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2744 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2745 #ifdef SCTP_AUDITING_ENABLED
2746 							sctp_audit_log(0xB2,
2747 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2748 #endif
2749 						}
2750 					}
2751 					/*-
2752 					 * All chunks NOT UNSENT fall through here and are marked
2753 					 * (leave PR-SCTP ones that are to skip alone though)
2754 					 */
2755 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2756 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2757 						tp1->sent = SCTP_DATAGRAM_MARKED;
2758 					}
2759 					if (tp1->rec.data.chunk_was_revoked) {
2760 						/* deflate the cwnd */
2761 						tp1->whoTo->cwnd -= tp1->book_size;
2762 						tp1->rec.data.chunk_was_revoked = 0;
2763 					}
2764 					/* NR Sack code here */
2765 					if (nr_sacking &&
2766 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2767 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2768 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2769 #ifdef INVARIANTS
2770 						} else {
2771 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2772 #endif
2773 						}
2774 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2775 						if (tp1->data) {
2776 							/*
2777 							 * sa_ignore
2778 							 * NO_NULL_CHK
2779 							 */
2780 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2781 							sctp_m_freem(tp1->data);
2782 							tp1->data = NULL;
2783 						}
2784 						wake_him++;
2785 					}
2786 				}
2787 				break;
2788 			}	/* if (tp1->TSN_seq == theTSN) */
2789 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2790 				break;
2791 			}
2792 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2793 			if ((tp1 == NULL) && (circled == 0)) {
2794 				circled++;
2795 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2796 			}
2797 		}		/* end while (tp1) */
2798 		if (tp1 == NULL) {
2799 			circled = 0;
2800 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2801 		}
2802 		/* In case the fragments were not in order we must reset */
2803 	}			/* end for (j = fragStart */
2804 	*p_tp1 = tp1;
2805 	return (wake_him);	/* Return value only used for nr-sack */
2806 }
2807 
2808 
2809 static int
2810 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2811     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2812     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2813     int num_seg, int num_nr_seg, int *rto_ok)
2814 {
2815 	struct sctp_gap_ack_block *frag, block;
2816 	struct sctp_tmit_chunk *tp1;
2817 	int i;
2818 	int num_frs = 0;
2819 	int chunk_freed;
2820 	int non_revocable;
2821 	uint16_t frag_strt, frag_end, prev_frag_end;
2822 
2823 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2824 	prev_frag_end = 0;
2825 	chunk_freed = 0;
2826 
2827 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2828 		if (i == num_seg) {
2829 			prev_frag_end = 0;
2830 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2831 		}
2832 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2833 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2834 		*offset += sizeof(block);
2835 		if (frag == NULL) {
2836 			return (chunk_freed);
2837 		}
2838 		frag_strt = ntohs(frag->start);
2839 		frag_end = ntohs(frag->end);
2840 
2841 		if (frag_strt > frag_end) {
2842 			/* This gap report is malformed, skip it. */
2843 			continue;
2844 		}
2845 		if (frag_strt <= prev_frag_end) {
2846 			/* This gap report is not in order, so restart. */
2847 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2848 		}
2849 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2850 			*biggest_tsn_acked = last_tsn + frag_end;
2851 		}
2852 		if (i < num_seg) {
2853 			non_revocable = 0;
2854 		} else {
2855 			non_revocable = 1;
2856 		}
2857 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2858 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2859 		    this_sack_lowest_newack, rto_ok)) {
2860 			chunk_freed = 1;
2861 		}
2862 		prev_frag_end = frag_end;
2863 	}
2864 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2865 		if (num_frs)
2866 			sctp_log_fr(*biggest_tsn_acked,
2867 			    *biggest_newly_acked_tsn,
2868 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2869 	}
2870 	return (chunk_freed);
2871 }
2872 
2873 static void
2874 sctp_check_for_revoked(struct sctp_tcb *stcb,
2875     struct sctp_association *asoc, uint32_t cumack,
2876     uint32_t biggest_tsn_acked)
2877 {
2878 	struct sctp_tmit_chunk *tp1;
2879 
2880 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2881 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2882 			/*
2883 			 * ok this guy is either ACK or MARKED. If it is
2884 			 * ACKED it has been previously acked but not this
2885 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2886 			 * again.
2887 			 */
2888 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2889 				break;
2890 			}
2891 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2892 				/* it has been revoked */
2893 				tp1->sent = SCTP_DATAGRAM_SENT;
2894 				tp1->rec.data.chunk_was_revoked = 1;
2895 				/*
2896 				 * We must add this stuff back in to assure
2897 				 * timers and such get started.
2898 				 */
2899 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2900 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2901 					    tp1->whoTo->flight_size,
2902 					    tp1->book_size,
2903 					    (uintptr_t) tp1->whoTo,
2904 					    tp1->rec.data.TSN_seq);
2905 				}
2906 				sctp_flight_size_increase(tp1);
2907 				sctp_total_flight_increase(stcb, tp1);
2908 				/*
2909 				 * We inflate the cwnd to compensate for our
2910 				 * artificial inflation of the flight_size.
2911 				 */
2912 				tp1->whoTo->cwnd += tp1->book_size;
2913 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2914 					sctp_log_sack(asoc->last_acked_seq,
2915 					    cumack,
2916 					    tp1->rec.data.TSN_seq,
2917 					    0,
2918 					    0,
2919 					    SCTP_LOG_TSN_REVOKED);
2920 				}
2921 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2922 				/* it has been re-acked in this SACK */
2923 				tp1->sent = SCTP_DATAGRAM_ACKED;
2924 			}
2925 		}
2926 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2927 			break;
2928 	}
2929 }
2930 
2931 
2932 static void
2933 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2934     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2935 {
2936 	struct sctp_tmit_chunk *tp1;
2937 	int strike_flag = 0;
2938 	struct timeval now;
2939 	int tot_retrans = 0;
2940 	uint32_t sending_seq;
2941 	struct sctp_nets *net;
2942 	int num_dests_sacked = 0;
2943 
2944 	/*
2945 	 * select the sending_seq, this is either the next thing ready to be
2946 	 * sent but not transmitted, OR, the next seq we assign.
2947 	 */
2948 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2949 	if (tp1 == NULL) {
2950 		sending_seq = asoc->sending_seq;
2951 	} else {
2952 		sending_seq = tp1->rec.data.TSN_seq;
2953 	}
2954 
2955 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2956 	if ((asoc->sctp_cmt_on_off > 0) &&
2957 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2958 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2959 			if (net->saw_newack)
2960 				num_dests_sacked++;
2961 		}
2962 	}
2963 	if (stcb->asoc.prsctp_supported) {
2964 		(void)SCTP_GETTIME_TIMEVAL(&now);
2965 	}
2966 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2967 		strike_flag = 0;
2968 		if (tp1->no_fr_allowed) {
2969 			/* this one had a timeout or something */
2970 			continue;
2971 		}
2972 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2973 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2974 				sctp_log_fr(biggest_tsn_newly_acked,
2975 				    tp1->rec.data.TSN_seq,
2976 				    tp1->sent,
2977 				    SCTP_FR_LOG_CHECK_STRIKE);
2978 		}
2979 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2980 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2981 			/* done */
2982 			break;
2983 		}
2984 		if (stcb->asoc.prsctp_supported) {
2985 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2986 				/* Is it expired? */
2987 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2988 					/* Yes so drop it */
2989 					if (tp1->data != NULL) {
2990 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2991 						    SCTP_SO_NOT_LOCKED);
2992 					}
2993 					continue;
2994 				}
2995 			}
2996 		}
2997 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2998 			/* we are beyond the tsn in the sack  */
2999 			break;
3000 		}
3001 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3002 			/* either a RESEND, ACKED, or MARKED */
3003 			/* skip */
3004 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3005 				/* Continue strikin FWD-TSN chunks */
3006 				tp1->rec.data.fwd_tsn_cnt++;
3007 			}
3008 			continue;
3009 		}
3010 		/*
3011 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3012 		 */
3013 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3014 			/*
3015 			 * No new acks were receieved for data sent to this
3016 			 * dest. Therefore, according to the SFR algo for
3017 			 * CMT, no data sent to this dest can be marked for
3018 			 * FR using this SACK.
3019 			 */
3020 			continue;
3021 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3022 		    tp1->whoTo->this_sack_highest_newack)) {
3023 			/*
3024 			 * CMT: New acks were receieved for data sent to
3025 			 * this dest. But no new acks were seen for data
3026 			 * sent after tp1. Therefore, according to the SFR
3027 			 * algo for CMT, tp1 cannot be marked for FR using
3028 			 * this SACK. This step covers part of the DAC algo
3029 			 * and the HTNA algo as well.
3030 			 */
3031 			continue;
3032 		}
3033 		/*
3034 		 * Here we check to see if we were have already done a FR
3035 		 * and if so we see if the biggest TSN we saw in the sack is
3036 		 * smaller than the recovery point. If so we don't strike
3037 		 * the tsn... otherwise we CAN strike the TSN.
3038 		 */
3039 		/*
3040 		 * @@@ JRI: Check for CMT if (accum_moved &&
3041 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3042 		 * 0)) {
3043 		 */
3044 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3045 			/*
3046 			 * Strike the TSN if in fast-recovery and cum-ack
3047 			 * moved.
3048 			 */
3049 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3050 				sctp_log_fr(biggest_tsn_newly_acked,
3051 				    tp1->rec.data.TSN_seq,
3052 				    tp1->sent,
3053 				    SCTP_FR_LOG_STRIKE_CHUNK);
3054 			}
3055 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3056 				tp1->sent++;
3057 			}
3058 			if ((asoc->sctp_cmt_on_off > 0) &&
3059 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3060 				/*
3061 				 * CMT DAC algorithm: If SACK flag is set to
3062 				 * 0, then lowest_newack test will not pass
3063 				 * because it would have been set to the
3064 				 * cumack earlier. If not already to be
3065 				 * rtx'd, If not a mixed sack and if tp1 is
3066 				 * not between two sacked TSNs, then mark by
3067 				 * one more. NOTE that we are marking by one
3068 				 * additional time since the SACK DAC flag
3069 				 * indicates that two packets have been
3070 				 * received after this missing TSN.
3071 				 */
3072 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3073 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3074 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3075 						sctp_log_fr(16 + num_dests_sacked,
3076 						    tp1->rec.data.TSN_seq,
3077 						    tp1->sent,
3078 						    SCTP_FR_LOG_STRIKE_CHUNK);
3079 					}
3080 					tp1->sent++;
3081 				}
3082 			}
3083 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3084 		    (asoc->sctp_cmt_on_off == 0)) {
3085 			/*
3086 			 * For those that have done a FR we must take
3087 			 * special consideration if we strike. I.e the
3088 			 * biggest_newly_acked must be higher than the
3089 			 * sending_seq at the time we did the FR.
3090 			 */
3091 			if (
3092 #ifdef SCTP_FR_TO_ALTERNATE
3093 			/*
3094 			 * If FR's go to new networks, then we must only do
3095 			 * this for singly homed asoc's. However if the FR's
3096 			 * go to the same network (Armando's work) then its
3097 			 * ok to FR multiple times.
3098 			 */
3099 			    (asoc->numnets < 2)
3100 #else
3101 			    (1)
3102 #endif
3103 			    ) {
3104 
3105 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3106 				    tp1->rec.data.fast_retran_tsn)) {
3107 					/*
3108 					 * Strike the TSN, since this ack is
3109 					 * beyond where things were when we
3110 					 * did a FR.
3111 					 */
3112 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3113 						sctp_log_fr(biggest_tsn_newly_acked,
3114 						    tp1->rec.data.TSN_seq,
3115 						    tp1->sent,
3116 						    SCTP_FR_LOG_STRIKE_CHUNK);
3117 					}
3118 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3119 						tp1->sent++;
3120 					}
3121 					strike_flag = 1;
3122 					if ((asoc->sctp_cmt_on_off > 0) &&
3123 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3124 						/*
3125 						 * CMT DAC algorithm: If
3126 						 * SACK flag is set to 0,
3127 						 * then lowest_newack test
3128 						 * will not pass because it
3129 						 * would have been set to
3130 						 * the cumack earlier. If
3131 						 * not already to be rtx'd,
3132 						 * If not a mixed sack and
3133 						 * if tp1 is not between two
3134 						 * sacked TSNs, then mark by
3135 						 * one more. NOTE that we
3136 						 * are marking by one
3137 						 * additional time since the
3138 						 * SACK DAC flag indicates
3139 						 * that two packets have
3140 						 * been received after this
3141 						 * missing TSN.
3142 						 */
3143 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3144 						    (num_dests_sacked == 1) &&
3145 						    SCTP_TSN_GT(this_sack_lowest_newack,
3146 						    tp1->rec.data.TSN_seq)) {
3147 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3148 								sctp_log_fr(32 + num_dests_sacked,
3149 								    tp1->rec.data.TSN_seq,
3150 								    tp1->sent,
3151 								    SCTP_FR_LOG_STRIKE_CHUNK);
3152 							}
3153 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3154 								tp1->sent++;
3155 							}
3156 						}
3157 					}
3158 				}
3159 			}
3160 			/*
3161 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3162 			 * algo covers HTNA.
3163 			 */
3164 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3165 		    biggest_tsn_newly_acked)) {
3166 			/*
3167 			 * We don't strike these: This is the  HTNA
3168 			 * algorithm i.e. we don't strike If our TSN is
3169 			 * larger than the Highest TSN Newly Acked.
3170 			 */
3171 			;
3172 		} else {
3173 			/* Strike the TSN */
3174 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3175 				sctp_log_fr(biggest_tsn_newly_acked,
3176 				    tp1->rec.data.TSN_seq,
3177 				    tp1->sent,
3178 				    SCTP_FR_LOG_STRIKE_CHUNK);
3179 			}
3180 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3181 				tp1->sent++;
3182 			}
3183 			if ((asoc->sctp_cmt_on_off > 0) &&
3184 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3185 				/*
3186 				 * CMT DAC algorithm: If SACK flag is set to
3187 				 * 0, then lowest_newack test will not pass
3188 				 * because it would have been set to the
3189 				 * cumack earlier. If not already to be
3190 				 * rtx'd, If not a mixed sack and if tp1 is
3191 				 * not between two sacked TSNs, then mark by
3192 				 * one more. NOTE that we are marking by one
3193 				 * additional time since the SACK DAC flag
3194 				 * indicates that two packets have been
3195 				 * received after this missing TSN.
3196 				 */
3197 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3198 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3199 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3200 						sctp_log_fr(48 + num_dests_sacked,
3201 						    tp1->rec.data.TSN_seq,
3202 						    tp1->sent,
3203 						    SCTP_FR_LOG_STRIKE_CHUNK);
3204 					}
3205 					tp1->sent++;
3206 				}
3207 			}
3208 		}
3209 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3210 			struct sctp_nets *alt;
3211 
3212 			/* fix counts and things */
3213 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3214 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3215 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3216 				    tp1->book_size,
3217 				    (uintptr_t) tp1->whoTo,
3218 				    tp1->rec.data.TSN_seq);
3219 			}
3220 			if (tp1->whoTo) {
3221 				tp1->whoTo->net_ack++;
3222 				sctp_flight_size_decrease(tp1);
3223 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3224 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3225 					    tp1);
3226 				}
3227 			}
3228 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3229 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3230 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3231 			}
3232 			/* add back to the rwnd */
3233 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3234 
3235 			/* remove from the total flight */
3236 			sctp_total_flight_decrease(stcb, tp1);
3237 
3238 			if ((stcb->asoc.prsctp_supported) &&
3239 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3240 				/*
3241 				 * Has it been retransmitted tv_sec times? -
3242 				 * we store the retran count there.
3243 				 */
3244 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3245 					/* Yes, so drop it */
3246 					if (tp1->data != NULL) {
3247 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3248 						    SCTP_SO_NOT_LOCKED);
3249 					}
3250 					/* Make sure to flag we had a FR */
3251 					tp1->whoTo->net_ack++;
3252 					continue;
3253 				}
3254 			}
3255 			/*
3256 			 * SCTP_PRINTF("OK, we are now ready to FR this
3257 			 * guy\n");
3258 			 */
3259 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3260 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3261 				    0, SCTP_FR_MARKED);
3262 			}
3263 			if (strike_flag) {
3264 				/* This is a subsequent FR */
3265 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3266 			}
3267 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3268 			if (asoc->sctp_cmt_on_off > 0) {
3269 				/*
3270 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3271 				 * If CMT is being used, then pick dest with
3272 				 * largest ssthresh for any retransmission.
3273 				 */
3274 				tp1->no_fr_allowed = 1;
3275 				alt = tp1->whoTo;
3276 				/* sa_ignore NO_NULL_CHK */
3277 				if (asoc->sctp_cmt_pf > 0) {
3278 					/*
3279 					 * JRS 5/18/07 - If CMT PF is on,
3280 					 * use the PF version of
3281 					 * find_alt_net()
3282 					 */
3283 					alt = sctp_find_alternate_net(stcb, alt, 2);
3284 				} else {
3285 					/*
3286 					 * JRS 5/18/07 - If only CMT is on,
3287 					 * use the CMT version of
3288 					 * find_alt_net()
3289 					 */
3290 					/* sa_ignore NO_NULL_CHK */
3291 					alt = sctp_find_alternate_net(stcb, alt, 1);
3292 				}
3293 				if (alt == NULL) {
3294 					alt = tp1->whoTo;
3295 				}
3296 				/*
3297 				 * CUCv2: If a different dest is picked for
3298 				 * the retransmission, then new
3299 				 * (rtx-)pseudo_cumack needs to be tracked
3300 				 * for orig dest. Let CUCv2 track new (rtx-)
3301 				 * pseudo-cumack always.
3302 				 */
3303 				if (tp1->whoTo) {
3304 					tp1->whoTo->find_pseudo_cumack = 1;
3305 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3306 				}
3307 			} else {/* CMT is OFF */
3308 
3309 #ifdef SCTP_FR_TO_ALTERNATE
3310 				/* Can we find an alternate? */
3311 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3312 #else
3313 				/*
3314 				 * default behavior is to NOT retransmit
3315 				 * FR's to an alternate. Armando Caro's
3316 				 * paper details why.
3317 				 */
3318 				alt = tp1->whoTo;
3319 #endif
3320 			}
3321 
3322 			tp1->rec.data.doing_fast_retransmit = 1;
3323 			tot_retrans++;
3324 			/* mark the sending seq for possible subsequent FR's */
3325 			/*
3326 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3327 			 * (uint32_t)tpi->rec.data.TSN_seq);
3328 			 */
3329 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3330 				/*
3331 				 * If the queue of send is empty then its
3332 				 * the next sequence number that will be
3333 				 * assigned so we subtract one from this to
3334 				 * get the one we last sent.
3335 				 */
3336 				tp1->rec.data.fast_retran_tsn = sending_seq;
3337 			} else {
3338 				/*
3339 				 * If there are chunks on the send queue
3340 				 * (unsent data that has made it from the
3341 				 * stream queues but not out the door, we
3342 				 * take the first one (which will have the
3343 				 * lowest TSN) and subtract one to get the
3344 				 * one we last sent.
3345 				 */
3346 				struct sctp_tmit_chunk *ttt;
3347 
3348 				ttt = TAILQ_FIRST(&asoc->send_queue);
3349 				tp1->rec.data.fast_retran_tsn =
3350 				    ttt->rec.data.TSN_seq;
3351 			}
3352 
3353 			if (tp1->do_rtt) {
3354 				/*
3355 				 * this guy had a RTO calculation pending on
3356 				 * it, cancel it
3357 				 */
3358 				if ((tp1->whoTo != NULL) &&
3359 				    (tp1->whoTo->rto_needed == 0)) {
3360 					tp1->whoTo->rto_needed = 1;
3361 				}
3362 				tp1->do_rtt = 0;
3363 			}
3364 			if (alt != tp1->whoTo) {
3365 				/* yes, there is an alternate. */
3366 				sctp_free_remote_addr(tp1->whoTo);
3367 				/* sa_ignore FREED_MEMORY */
3368 				tp1->whoTo = alt;
3369 				atomic_add_int(&alt->ref_count, 1);
3370 			}
3371 		}
3372 	}
3373 }
3374 
3375 struct sctp_tmit_chunk *
3376 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3377     struct sctp_association *asoc)
3378 {
3379 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3380 	struct timeval now;
3381 	int now_filled = 0;
3382 
3383 	if (asoc->prsctp_supported == 0) {
3384 		return (NULL);
3385 	}
3386 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3387 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3388 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3389 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3390 			/* no chance to advance, out of here */
3391 			break;
3392 		}
3393 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3394 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3395 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3396 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3397 				    asoc->advanced_peer_ack_point,
3398 				    tp1->rec.data.TSN_seq, 0, 0);
3399 			}
3400 		}
3401 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3402 			/*
3403 			 * We can't fwd-tsn past any that are reliable aka
3404 			 * retransmitted until the asoc fails.
3405 			 */
3406 			break;
3407 		}
3408 		if (!now_filled) {
3409 			(void)SCTP_GETTIME_TIMEVAL(&now);
3410 			now_filled = 1;
3411 		}
3412 		/*
3413 		 * now we got a chunk which is marked for another
3414 		 * retransmission to a PR-stream but has run out its chances
3415 		 * already maybe OR has been marked to skip now. Can we skip
3416 		 * it if its a resend?
3417 		 */
3418 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3419 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3420 			/*
3421 			 * Now is this one marked for resend and its time is
3422 			 * now up?
3423 			 */
3424 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3425 				/* Yes so drop it */
3426 				if (tp1->data) {
3427 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3428 					    1, SCTP_SO_NOT_LOCKED);
3429 				}
3430 			} else {
3431 				/*
3432 				 * No, we are done when hit one for resend
3433 				 * whos time as not expired.
3434 				 */
3435 				break;
3436 			}
3437 		}
3438 		/*
3439 		 * Ok now if this chunk is marked to drop it we can clean up
3440 		 * the chunk, advance our peer ack point and we can check
3441 		 * the next chunk.
3442 		 */
3443 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3444 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3445 			/* advance PeerAckPoint goes forward */
3446 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3447 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3448 				a_adv = tp1;
3449 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3450 				/* No update but we do save the chk */
3451 				a_adv = tp1;
3452 			}
3453 		} else {
3454 			/*
3455 			 * If it is still in RESEND we can advance no
3456 			 * further
3457 			 */
3458 			break;
3459 		}
3460 	}
3461 	return (a_adv);
3462 }
3463 
3464 static int
3465 sctp_fs_audit(struct sctp_association *asoc)
3466 {
3467 	struct sctp_tmit_chunk *chk;
3468 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3469 	int entry_flight, entry_cnt, ret;
3470 
3471 	entry_flight = asoc->total_flight;
3472 	entry_cnt = asoc->total_flight_count;
3473 	ret = 0;
3474 
3475 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3476 		return (0);
3477 
3478 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3479 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3480 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3481 			    chk->rec.data.TSN_seq,
3482 			    chk->send_size,
3483 			    chk->snd_count);
3484 			inflight++;
3485 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3486 			resend++;
3487 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3488 			inbetween++;
3489 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3490 			above++;
3491 		} else {
3492 			acked++;
3493 		}
3494 	}
3495 
3496 	if ((inflight > 0) || (inbetween > 0)) {
3497 #ifdef INVARIANTS
3498 		panic("Flight size-express incorrect? \n");
3499 #else
3500 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3501 		    entry_flight, entry_cnt);
3502 
3503 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3504 		    inflight, inbetween, resend, above, acked);
3505 		ret = 1;
3506 #endif
3507 	}
3508 	return (ret);
3509 }
3510 
3511 
3512 static void
3513 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3514     struct sctp_association *asoc,
3515     struct sctp_tmit_chunk *tp1)
3516 {
3517 	tp1->window_probe = 0;
3518 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3519 		/* TSN's skipped we do NOT move back. */
3520 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3521 		    tp1->whoTo->flight_size,
3522 		    tp1->book_size,
3523 		    (uintptr_t) tp1->whoTo,
3524 		    tp1->rec.data.TSN_seq);
3525 		return;
3526 	}
3527 	/* First setup this by shrinking flight */
3528 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3529 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3530 		    tp1);
3531 	}
3532 	sctp_flight_size_decrease(tp1);
3533 	sctp_total_flight_decrease(stcb, tp1);
3534 	/* Now mark for resend */
3535 	tp1->sent = SCTP_DATAGRAM_RESEND;
3536 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3537 
3538 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3539 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3540 		    tp1->whoTo->flight_size,
3541 		    tp1->book_size,
3542 		    (uintptr_t) tp1->whoTo,
3543 		    tp1->rec.data.TSN_seq);
3544 	}
3545 }
3546 
3547 void
3548 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3549     uint32_t rwnd, int *abort_now, int ecne_seen)
3550 {
3551 	struct sctp_nets *net;
3552 	struct sctp_association *asoc;
3553 	struct sctp_tmit_chunk *tp1, *tp2;
3554 	uint32_t old_rwnd;
3555 	int win_probe_recovery = 0;
3556 	int win_probe_recovered = 0;
3557 	int j, done_once = 0;
3558 	int rto_ok = 1;
3559 
3560 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3561 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3562 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3563 	}
3564 	SCTP_TCB_LOCK_ASSERT(stcb);
3565 #ifdef SCTP_ASOCLOG_OF_TSNS
3566 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3567 	stcb->asoc.cumack_log_at++;
3568 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3569 		stcb->asoc.cumack_log_at = 0;
3570 	}
3571 #endif
3572 	asoc = &stcb->asoc;
3573 	old_rwnd = asoc->peers_rwnd;
3574 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3575 		/* old ack */
3576 		return;
3577 	} else if (asoc->last_acked_seq == cumack) {
3578 		/* Window update sack */
3579 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3580 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3581 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3582 			/* SWS sender side engages */
3583 			asoc->peers_rwnd = 0;
3584 		}
3585 		if (asoc->peers_rwnd > old_rwnd) {
3586 			goto again;
3587 		}
3588 		return;
3589 	}
3590 	/* First setup for CC stuff */
3591 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3592 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3593 			/* Drag along the window_tsn for cwr's */
3594 			net->cwr_window_tsn = cumack;
3595 		}
3596 		net->prev_cwnd = net->cwnd;
3597 		net->net_ack = 0;
3598 		net->net_ack2 = 0;
3599 
3600 		/*
3601 		 * CMT: Reset CUC and Fast recovery algo variables before
3602 		 * SACK processing
3603 		 */
3604 		net->new_pseudo_cumack = 0;
3605 		net->will_exit_fast_recovery = 0;
3606 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3607 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3608 		}
3609 	}
3610 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3611 		uint32_t send_s;
3612 
3613 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3614 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3615 			    sctpchunk_listhead);
3616 			send_s = tp1->rec.data.TSN_seq + 1;
3617 		} else {
3618 			send_s = asoc->sending_seq;
3619 		}
3620 		if (SCTP_TSN_GE(cumack, send_s)) {
3621 #ifndef INVARIANTS
3622 			struct mbuf *op_err;
3623 			char msg[SCTP_DIAG_INFO_LEN];
3624 
3625 #endif
3626 #ifdef INVARIANTS
3627 			panic("Impossible sack 1");
3628 #else
3629 
3630 			*abort_now = 1;
3631 			/* XXX */
3632 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3633 			    cumack, send_s);
3634 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3635 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3636 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3637 			return;
3638 #endif
3639 		}
3640 	}
3641 	asoc->this_sack_highest_gap = cumack;
3642 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3643 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3644 		    stcb->asoc.overall_error_count,
3645 		    0,
3646 		    SCTP_FROM_SCTP_INDATA,
3647 		    __LINE__);
3648 	}
3649 	stcb->asoc.overall_error_count = 0;
3650 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3651 		/* process the new consecutive TSN first */
3652 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3653 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3654 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3655 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3656 				}
3657 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3658 					/*
3659 					 * If it is less than ACKED, it is
3660 					 * now no-longer in flight. Higher
3661 					 * values may occur during marking
3662 					 */
3663 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3664 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3665 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3666 							    tp1->whoTo->flight_size,
3667 							    tp1->book_size,
3668 							    (uintptr_t) tp1->whoTo,
3669 							    tp1->rec.data.TSN_seq);
3670 						}
3671 						sctp_flight_size_decrease(tp1);
3672 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3673 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3674 							    tp1);
3675 						}
3676 						/* sa_ignore NO_NULL_CHK */
3677 						sctp_total_flight_decrease(stcb, tp1);
3678 					}
3679 					tp1->whoTo->net_ack += tp1->send_size;
3680 					if (tp1->snd_count < 2) {
3681 						/*
3682 						 * True non-retransmited
3683 						 * chunk
3684 						 */
3685 						tp1->whoTo->net_ack2 +=
3686 						    tp1->send_size;
3687 
3688 						/* update RTO too? */
3689 						if (tp1->do_rtt) {
3690 							if (rto_ok) {
3691 								tp1->whoTo->RTO =
3692 								/*
3693 								 * sa_ignore
3694 								 * NO_NULL_CH
3695 								 * K
3696 								 */
3697 								    sctp_calculate_rto(stcb,
3698 								    asoc, tp1->whoTo,
3699 								    &tp1->sent_rcv_time,
3700 								    sctp_align_safe_nocopy,
3701 								    SCTP_RTT_FROM_DATA);
3702 								rto_ok = 0;
3703 							}
3704 							if (tp1->whoTo->rto_needed == 0) {
3705 								tp1->whoTo->rto_needed = 1;
3706 							}
3707 							tp1->do_rtt = 0;
3708 						}
3709 					}
3710 					/*
3711 					 * CMT: CUCv2 algorithm. From the
3712 					 * cumack'd TSNs, for each TSN being
3713 					 * acked for the first time, set the
3714 					 * following variables for the
3715 					 * corresp destination.
3716 					 * new_pseudo_cumack will trigger a
3717 					 * cwnd update.
3718 					 * find_(rtx_)pseudo_cumack will
3719 					 * trigger search for the next
3720 					 * expected (rtx-)pseudo-cumack.
3721 					 */
3722 					tp1->whoTo->new_pseudo_cumack = 1;
3723 					tp1->whoTo->find_pseudo_cumack = 1;
3724 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3725 
3726 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3727 						/* sa_ignore NO_NULL_CHK */
3728 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3729 					}
3730 				}
3731 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3732 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3733 				}
3734 				if (tp1->rec.data.chunk_was_revoked) {
3735 					/* deflate the cwnd */
3736 					tp1->whoTo->cwnd -= tp1->book_size;
3737 					tp1->rec.data.chunk_was_revoked = 0;
3738 				}
3739 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3740 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3741 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3742 #ifdef INVARIANTS
3743 					} else {
3744 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3745 #endif
3746 					}
3747 				}
3748 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3749 				if (tp1->data) {
3750 					/* sa_ignore NO_NULL_CHK */
3751 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3752 					sctp_m_freem(tp1->data);
3753 					tp1->data = NULL;
3754 				}
3755 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3756 					sctp_log_sack(asoc->last_acked_seq,
3757 					    cumack,
3758 					    tp1->rec.data.TSN_seq,
3759 					    0,
3760 					    0,
3761 					    SCTP_LOG_FREE_SENT);
3762 				}
3763 				asoc->sent_queue_cnt--;
3764 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3765 			} else {
3766 				break;
3767 			}
3768 		}
3769 
3770 	}
3771 	/* sa_ignore NO_NULL_CHK */
3772 	if (stcb->sctp_socket) {
3773 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3774 		struct socket *so;
3775 
3776 #endif
3777 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3778 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3779 			/* sa_ignore NO_NULL_CHK */
3780 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3781 		}
3782 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3783 		so = SCTP_INP_SO(stcb->sctp_ep);
3784 		atomic_add_int(&stcb->asoc.refcnt, 1);
3785 		SCTP_TCB_UNLOCK(stcb);
3786 		SCTP_SOCKET_LOCK(so, 1);
3787 		SCTP_TCB_LOCK(stcb);
3788 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3789 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3790 			/* assoc was freed while we were unlocked */
3791 			SCTP_SOCKET_UNLOCK(so, 1);
3792 			return;
3793 		}
3794 #endif
3795 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3797 		SCTP_SOCKET_UNLOCK(so, 1);
3798 #endif
3799 	} else {
3800 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3801 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3802 		}
3803 	}
3804 
3805 	/* JRS - Use the congestion control given in the CC module */
3806 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3807 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3808 			if (net->net_ack2 > 0) {
3809 				/*
3810 				 * Karn's rule applies to clearing error
3811 				 * count, this is optional.
3812 				 */
3813 				net->error_count = 0;
3814 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3815 					/* addr came good */
3816 					net->dest_state |= SCTP_ADDR_REACHABLE;
3817 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3818 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3819 				}
3820 				if (net == stcb->asoc.primary_destination) {
3821 					if (stcb->asoc.alternate) {
3822 						/*
3823 						 * release the alternate,
3824 						 * primary is good
3825 						 */
3826 						sctp_free_remote_addr(stcb->asoc.alternate);
3827 						stcb->asoc.alternate = NULL;
3828 					}
3829 				}
3830 				if (net->dest_state & SCTP_ADDR_PF) {
3831 					net->dest_state &= ~SCTP_ADDR_PF;
3832 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3833 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3834 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3835 					/* Done with this net */
3836 					net->net_ack = 0;
3837 				}
3838 				/* restore any doubled timers */
3839 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3840 				if (net->RTO < stcb->asoc.minrto) {
3841 					net->RTO = stcb->asoc.minrto;
3842 				}
3843 				if (net->RTO > stcb->asoc.maxrto) {
3844 					net->RTO = stcb->asoc.maxrto;
3845 				}
3846 			}
3847 		}
3848 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3849 	}
3850 	asoc->last_acked_seq = cumack;
3851 
3852 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3853 		/* nothing left in-flight */
3854 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3855 			net->flight_size = 0;
3856 			net->partial_bytes_acked = 0;
3857 		}
3858 		asoc->total_flight = 0;
3859 		asoc->total_flight_count = 0;
3860 	}
3861 	/* RWND update */
3862 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3863 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3864 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3865 		/* SWS sender side engages */
3866 		asoc->peers_rwnd = 0;
3867 	}
3868 	if (asoc->peers_rwnd > old_rwnd) {
3869 		win_probe_recovery = 1;
3870 	}
3871 	/* Now assure a timer where data is queued at */
3872 again:
3873 	j = 0;
3874 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3875 		int to_ticks;
3876 
3877 		if (win_probe_recovery && (net->window_probe)) {
3878 			win_probe_recovered = 1;
3879 			/*
3880 			 * Find first chunk that was used with window probe
3881 			 * and clear the sent
3882 			 */
3883 			/* sa_ignore FREED_MEMORY */
3884 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3885 				if (tp1->window_probe) {
3886 					/* move back to data send queue */
3887 					sctp_window_probe_recovery(stcb, asoc, tp1);
3888 					break;
3889 				}
3890 			}
3891 		}
3892 		if (net->RTO == 0) {
3893 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3894 		} else {
3895 			to_ticks = MSEC_TO_TICKS(net->RTO);
3896 		}
3897 		if (net->flight_size) {
3898 			j++;
3899 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3900 			    sctp_timeout_handler, &net->rxt_timer);
3901 			if (net->window_probe) {
3902 				net->window_probe = 0;
3903 			}
3904 		} else {
3905 			if (net->window_probe) {
3906 				/*
3907 				 * In window probes we must assure a timer
3908 				 * is still running there
3909 				 */
3910 				net->window_probe = 0;
3911 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3912 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3913 					    sctp_timeout_handler, &net->rxt_timer);
3914 				}
3915 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3916 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3917 				    stcb, net,
3918 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3919 			}
3920 		}
3921 	}
3922 	if ((j == 0) &&
3923 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3924 	    (asoc->sent_queue_retran_cnt == 0) &&
3925 	    (win_probe_recovered == 0) &&
3926 	    (done_once == 0)) {
3927 		/*
3928 		 * huh, this should not happen unless all packets are
3929 		 * PR-SCTP and marked to skip of course.
3930 		 */
3931 		if (sctp_fs_audit(asoc)) {
3932 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3933 				net->flight_size = 0;
3934 			}
3935 			asoc->total_flight = 0;
3936 			asoc->total_flight_count = 0;
3937 			asoc->sent_queue_retran_cnt = 0;
3938 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3939 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3940 					sctp_flight_size_increase(tp1);
3941 					sctp_total_flight_increase(stcb, tp1);
3942 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3943 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3944 				}
3945 			}
3946 		}
3947 		done_once = 1;
3948 		goto again;
3949 	}
3950 	/**********************************/
3951 	/* Now what about shutdown issues */
3952 	/**********************************/
3953 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3954 		/* nothing left on sendqueue.. consider done */
3955 		/* clean up */
3956 		if ((asoc->stream_queue_cnt == 1) &&
3957 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3958 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3959 		    (asoc->locked_on_sending)
3960 		    ) {
3961 			struct sctp_stream_queue_pending *sp;
3962 
3963 			/*
3964 			 * I may be in a state where we got all across.. but
3965 			 * cannot write more due to a shutdown... we abort
3966 			 * since the user did not indicate EOR in this case.
3967 			 * The sp will be cleaned during free of the asoc.
3968 			 */
3969 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3970 			    sctp_streamhead);
3971 			if ((sp) && (sp->length == 0)) {
3972 				/* Let cleanup code purge it */
3973 				if (sp->msg_is_complete) {
3974 					asoc->stream_queue_cnt--;
3975 				} else {
3976 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3977 					asoc->locked_on_sending = NULL;
3978 					asoc->stream_queue_cnt--;
3979 				}
3980 			}
3981 		}
3982 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3983 		    (asoc->stream_queue_cnt == 0)) {
3984 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3985 				/* Need to abort here */
3986 				struct mbuf *op_err;
3987 
3988 		abort_out_now:
3989 				*abort_now = 1;
3990 				/* XXX */
3991 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3992 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3993 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3994 			} else {
3995 				struct sctp_nets *netp;
3996 
3997 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3998 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3999 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4000 				}
4001 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4002 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4003 				sctp_stop_timers_for_shutdown(stcb);
4004 				if (asoc->alternate) {
4005 					netp = asoc->alternate;
4006 				} else {
4007 					netp = asoc->primary_destination;
4008 				}
4009 				sctp_send_shutdown(stcb, netp);
4010 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4011 				    stcb->sctp_ep, stcb, netp);
4012 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4013 				    stcb->sctp_ep, stcb, netp);
4014 			}
4015 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4016 		    (asoc->stream_queue_cnt == 0)) {
4017 			struct sctp_nets *netp;
4018 
4019 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4020 				goto abort_out_now;
4021 			}
4022 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4023 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4024 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4025 			sctp_stop_timers_for_shutdown(stcb);
4026 			if (asoc->alternate) {
4027 				netp = asoc->alternate;
4028 			} else {
4029 				netp = asoc->primary_destination;
4030 			}
4031 			sctp_send_shutdown_ack(stcb, netp);
4032 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4033 			    stcb->sctp_ep, stcb, netp);
4034 		}
4035 	}
4036 	/*********************************************/
4037 	/* Here we perform PR-SCTP procedures        */
4038 	/* (section 4.2)                             */
4039 	/*********************************************/
4040 	/* C1. update advancedPeerAckPoint */
4041 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4042 		asoc->advanced_peer_ack_point = cumack;
4043 	}
4044 	/* PR-Sctp issues need to be addressed too */
4045 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4046 		struct sctp_tmit_chunk *lchk;
4047 		uint32_t old_adv_peer_ack_point;
4048 
4049 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4050 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4051 		/* C3. See if we need to send a Fwd-TSN */
4052 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4053 			/*
4054 			 * ISSUE with ECN, see FWD-TSN processing.
4055 			 */
4056 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4057 				send_forward_tsn(stcb, asoc);
4058 			} else if (lchk) {
4059 				/* try to FR fwd-tsn's that get lost too */
4060 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4061 					send_forward_tsn(stcb, asoc);
4062 				}
4063 			}
4064 		}
4065 		if (lchk) {
4066 			/* Assure a timer is up */
4067 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4068 			    stcb->sctp_ep, stcb, lchk->whoTo);
4069 		}
4070 	}
4071 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4072 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4073 		    rwnd,
4074 		    stcb->asoc.peers_rwnd,
4075 		    stcb->asoc.total_flight,
4076 		    stcb->asoc.total_output_queue_size);
4077 	}
4078 }
4079 
4080 void
4081 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4082     struct sctp_tcb *stcb,
4083     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4084     int *abort_now, uint8_t flags,
4085     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4086 {
4087 	struct sctp_association *asoc;
4088 	struct sctp_tmit_chunk *tp1, *tp2;
4089 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4090 	uint16_t wake_him = 0;
4091 	uint32_t send_s = 0;
4092 	long j;
4093 	int accum_moved = 0;
4094 	int will_exit_fast_recovery = 0;
4095 	uint32_t a_rwnd, old_rwnd;
4096 	int win_probe_recovery = 0;
4097 	int win_probe_recovered = 0;
4098 	struct sctp_nets *net = NULL;
4099 	int done_once;
4100 	int rto_ok = 1;
4101 	uint8_t reneged_all = 0;
4102 	uint8_t cmt_dac_flag;
4103 
4104 	/*
4105 	 * we take any chance we can to service our queues since we cannot
4106 	 * get awoken when the socket is read from :<
4107 	 */
4108 	/*
4109 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4110 	 * old sack, if so discard. 2) If there is nothing left in the send
4111 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4112 	 * too, update any rwnd change and verify no timers are running.
4113 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4114 	 * moved process these first and note that it moved. 4) Process any
4115 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4116 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4117 	 * sync up flightsizes and things, stop all timers and also check
4118 	 * for shutdown_pending state. If so then go ahead and send off the
4119 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4120 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4121 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4122 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4123 	 * if in shutdown_recv state.
4124 	 */
4125 	SCTP_TCB_LOCK_ASSERT(stcb);
4126 	/* CMT DAC algo */
4127 	this_sack_lowest_newack = 0;
4128 	SCTP_STAT_INCR(sctps_slowpath_sack);
4129 	last_tsn = cum_ack;
4130 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4131 #ifdef SCTP_ASOCLOG_OF_TSNS
4132 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4133 	stcb->asoc.cumack_log_at++;
4134 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4135 		stcb->asoc.cumack_log_at = 0;
4136 	}
4137 #endif
4138 	a_rwnd = rwnd;
4139 
4140 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4141 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4142 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4143 	}
4144 	old_rwnd = stcb->asoc.peers_rwnd;
4145 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4146 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4147 		    stcb->asoc.overall_error_count,
4148 		    0,
4149 		    SCTP_FROM_SCTP_INDATA,
4150 		    __LINE__);
4151 	}
4152 	stcb->asoc.overall_error_count = 0;
4153 	asoc = &stcb->asoc;
4154 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4155 		sctp_log_sack(asoc->last_acked_seq,
4156 		    cum_ack,
4157 		    0,
4158 		    num_seg,
4159 		    num_dup,
4160 		    SCTP_LOG_NEW_SACK);
4161 	}
4162 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4163 		uint16_t i;
4164 		uint32_t *dupdata, dblock;
4165 
4166 		for (i = 0; i < num_dup; i++) {
4167 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4168 			    sizeof(uint32_t), (uint8_t *) & dblock);
4169 			if (dupdata == NULL) {
4170 				break;
4171 			}
4172 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4173 		}
4174 	}
4175 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4176 		/* reality check */
4177 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4178 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4179 			    sctpchunk_listhead);
4180 			send_s = tp1->rec.data.TSN_seq + 1;
4181 		} else {
4182 			tp1 = NULL;
4183 			send_s = asoc->sending_seq;
4184 		}
4185 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4186 			struct mbuf *op_err;
4187 			char msg[SCTP_DIAG_INFO_LEN];
4188 
4189 			/*
4190 			 * no way, we have not even sent this TSN out yet.
4191 			 * Peer is hopelessly messed up with us.
4192 			 */
4193 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4194 			    cum_ack, send_s);
4195 			if (tp1) {
4196 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4197 				    tp1->rec.data.TSN_seq, (void *)tp1);
4198 			}
4199 	hopeless_peer:
4200 			*abort_now = 1;
4201 			/* XXX */
4202 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4203 			    cum_ack, send_s);
4204 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4205 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4206 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4207 			return;
4208 		}
4209 	}
4210 	/**********************/
4211 	/* 1) check the range */
4212 	/**********************/
4213 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4214 		/* acking something behind */
4215 		return;
4216 	}
4217 	/* update the Rwnd of the peer */
4218 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4219 	    TAILQ_EMPTY(&asoc->send_queue) &&
4220 	    (asoc->stream_queue_cnt == 0)) {
4221 		/* nothing left on send/sent and strmq */
4222 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4223 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4224 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4225 		}
4226 		asoc->peers_rwnd = a_rwnd;
4227 		if (asoc->sent_queue_retran_cnt) {
4228 			asoc->sent_queue_retran_cnt = 0;
4229 		}
4230 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4231 			/* SWS sender side engages */
4232 			asoc->peers_rwnd = 0;
4233 		}
4234 		/* stop any timers */
4235 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4236 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4237 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4238 			net->partial_bytes_acked = 0;
4239 			net->flight_size = 0;
4240 		}
4241 		asoc->total_flight = 0;
4242 		asoc->total_flight_count = 0;
4243 		return;
4244 	}
4245 	/*
4246 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4247 	 * things. The total byte count acked is tracked in netAckSz AND
4248 	 * netAck2 is used to track the total bytes acked that are un-
4249 	 * amibguious and were never retransmitted. We track these on a per
4250 	 * destination address basis.
4251 	 */
4252 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4253 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4254 			/* Drag along the window_tsn for cwr's */
4255 			net->cwr_window_tsn = cum_ack;
4256 		}
4257 		net->prev_cwnd = net->cwnd;
4258 		net->net_ack = 0;
4259 		net->net_ack2 = 0;
4260 
4261 		/*
4262 		 * CMT: Reset CUC and Fast recovery algo variables before
4263 		 * SACK processing
4264 		 */
4265 		net->new_pseudo_cumack = 0;
4266 		net->will_exit_fast_recovery = 0;
4267 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4268 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4269 		}
4270 	}
4271 	/* process the new consecutive TSN first */
4272 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4273 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4274 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4275 				accum_moved = 1;
4276 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4277 					/*
4278 					 * If it is less than ACKED, it is
4279 					 * now no-longer in flight. Higher
4280 					 * values may occur during marking
4281 					 */
4282 					if ((tp1->whoTo->dest_state &
4283 					    SCTP_ADDR_UNCONFIRMED) &&
4284 					    (tp1->snd_count < 2)) {
4285 						/*
4286 						 * If there was no retran
4287 						 * and the address is
4288 						 * un-confirmed and we sent
4289 						 * there and are now
4290 						 * sacked.. its confirmed,
4291 						 * mark it so.
4292 						 */
4293 						tp1->whoTo->dest_state &=
4294 						    ~SCTP_ADDR_UNCONFIRMED;
4295 					}
4296 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4297 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4298 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4299 							    tp1->whoTo->flight_size,
4300 							    tp1->book_size,
4301 							    (uintptr_t) tp1->whoTo,
4302 							    tp1->rec.data.TSN_seq);
4303 						}
4304 						sctp_flight_size_decrease(tp1);
4305 						sctp_total_flight_decrease(stcb, tp1);
4306 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4307 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4308 							    tp1);
4309 						}
4310 					}
4311 					tp1->whoTo->net_ack += tp1->send_size;
4312 
4313 					/* CMT SFR and DAC algos */
4314 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4315 					tp1->whoTo->saw_newack = 1;
4316 
4317 					if (tp1->snd_count < 2) {
4318 						/*
4319 						 * True non-retransmited
4320 						 * chunk
4321 						 */
4322 						tp1->whoTo->net_ack2 +=
4323 						    tp1->send_size;
4324 
4325 						/* update RTO too? */
4326 						if (tp1->do_rtt) {
4327 							if (rto_ok) {
4328 								tp1->whoTo->RTO =
4329 								    sctp_calculate_rto(stcb,
4330 								    asoc, tp1->whoTo,
4331 								    &tp1->sent_rcv_time,
4332 								    sctp_align_safe_nocopy,
4333 								    SCTP_RTT_FROM_DATA);
4334 								rto_ok = 0;
4335 							}
4336 							if (tp1->whoTo->rto_needed == 0) {
4337 								tp1->whoTo->rto_needed = 1;
4338 							}
4339 							tp1->do_rtt = 0;
4340 						}
4341 					}
4342 					/*
4343 					 * CMT: CUCv2 algorithm. From the
4344 					 * cumack'd TSNs, for each TSN being
4345 					 * acked for the first time, set the
4346 					 * following variables for the
4347 					 * corresp destination.
4348 					 * new_pseudo_cumack will trigger a
4349 					 * cwnd update.
4350 					 * find_(rtx_)pseudo_cumack will
4351 					 * trigger search for the next
4352 					 * expected (rtx-)pseudo-cumack.
4353 					 */
4354 					tp1->whoTo->new_pseudo_cumack = 1;
4355 					tp1->whoTo->find_pseudo_cumack = 1;
4356 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4357 
4358 
4359 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4360 						sctp_log_sack(asoc->last_acked_seq,
4361 						    cum_ack,
4362 						    tp1->rec.data.TSN_seq,
4363 						    0,
4364 						    0,
4365 						    SCTP_LOG_TSN_ACKED);
4366 					}
4367 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4368 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4369 					}
4370 				}
4371 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4372 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4373 #ifdef SCTP_AUDITING_ENABLED
4374 					sctp_audit_log(0xB3,
4375 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4376 #endif
4377 				}
4378 				if (tp1->rec.data.chunk_was_revoked) {
4379 					/* deflate the cwnd */
4380 					tp1->whoTo->cwnd -= tp1->book_size;
4381 					tp1->rec.data.chunk_was_revoked = 0;
4382 				}
4383 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4384 					tp1->sent = SCTP_DATAGRAM_ACKED;
4385 				}
4386 			}
4387 		} else {
4388 			break;
4389 		}
4390 	}
4391 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4392 	/* always set this up to cum-ack */
4393 	asoc->this_sack_highest_gap = last_tsn;
4394 
4395 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4396 
4397 		/*
4398 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4399 		 * to be greater than the cumack. Also reset saw_newack to 0
4400 		 * for all dests.
4401 		 */
4402 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4403 			net->saw_newack = 0;
4404 			net->this_sack_highest_newack = last_tsn;
4405 		}
4406 
4407 		/*
4408 		 * thisSackHighestGap will increase while handling NEW
4409 		 * segments this_sack_highest_newack will increase while
4410 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4411 		 * used for CMT DAC algo. saw_newack will also change.
4412 		 */
4413 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4414 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4415 		    num_seg, num_nr_seg, &rto_ok)) {
4416 			wake_him++;
4417 		}
4418 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4419 			/*
4420 			 * validate the biggest_tsn_acked in the gap acks if
4421 			 * strict adherence is wanted.
4422 			 */
4423 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4424 				/*
4425 				 * peer is either confused or we are under
4426 				 * attack. We must abort.
4427 				 */
4428 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4429 				    biggest_tsn_acked, send_s);
4430 				goto hopeless_peer;
4431 			}
4432 		}
4433 	}
4434 	/*******************************************/
4435 	/* cancel ALL T3-send timer if accum moved */
4436 	/*******************************************/
4437 	if (asoc->sctp_cmt_on_off > 0) {
4438 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4439 			if (net->new_pseudo_cumack)
4440 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4441 				    stcb, net,
4442 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4443 
4444 		}
4445 	} else {
4446 		if (accum_moved) {
4447 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4448 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4449 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4450 			}
4451 		}
4452 	}
4453 	/********************************************/
4454 	/* drop the acked chunks from the sentqueue */
4455 	/********************************************/
4456 	asoc->last_acked_seq = cum_ack;
4457 
4458 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4459 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4460 			break;
4461 		}
4462 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4463 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4464 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4465 #ifdef INVARIANTS
4466 			} else {
4467 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4468 #endif
4469 			}
4470 		}
4471 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4472 		if (PR_SCTP_ENABLED(tp1->flags)) {
4473 			if (asoc->pr_sctp_cnt != 0)
4474 				asoc->pr_sctp_cnt--;
4475 		}
4476 		asoc->sent_queue_cnt--;
4477 		if (tp1->data) {
4478 			/* sa_ignore NO_NULL_CHK */
4479 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4480 			sctp_m_freem(tp1->data);
4481 			tp1->data = NULL;
4482 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4483 				asoc->sent_queue_cnt_removeable--;
4484 			}
4485 		}
4486 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4487 			sctp_log_sack(asoc->last_acked_seq,
4488 			    cum_ack,
4489 			    tp1->rec.data.TSN_seq,
4490 			    0,
4491 			    0,
4492 			    SCTP_LOG_FREE_SENT);
4493 		}
4494 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4495 		wake_him++;
4496 	}
4497 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4498 #ifdef INVARIANTS
4499 		panic("Warning flight size is postive and should be 0");
4500 #else
4501 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4502 		    asoc->total_flight);
4503 #endif
4504 		asoc->total_flight = 0;
4505 	}
4506 	/* sa_ignore NO_NULL_CHK */
4507 	if ((wake_him) && (stcb->sctp_socket)) {
4508 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4509 		struct socket *so;
4510 
4511 #endif
4512 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4513 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4514 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4515 		}
4516 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4517 		so = SCTP_INP_SO(stcb->sctp_ep);
4518 		atomic_add_int(&stcb->asoc.refcnt, 1);
4519 		SCTP_TCB_UNLOCK(stcb);
4520 		SCTP_SOCKET_LOCK(so, 1);
4521 		SCTP_TCB_LOCK(stcb);
4522 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4523 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4524 			/* assoc was freed while we were unlocked */
4525 			SCTP_SOCKET_UNLOCK(so, 1);
4526 			return;
4527 		}
4528 #endif
4529 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4530 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4531 		SCTP_SOCKET_UNLOCK(so, 1);
4532 #endif
4533 	} else {
4534 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4535 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4536 		}
4537 	}
4538 
4539 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4540 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4541 			/* Setup so we will exit RFC2582 fast recovery */
4542 			will_exit_fast_recovery = 1;
4543 		}
4544 	}
4545 	/*
4546 	 * Check for revoked fragments:
4547 	 *
4548 	 * if Previous sack - Had no frags then we can't have any revoked if
4549 	 * Previous sack - Had frag's then - If we now have frags aka
4550 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4551 	 * some of them. else - The peer revoked all ACKED fragments, since
4552 	 * we had some before and now we have NONE.
4553 	 */
4554 
4555 	if (num_seg) {
4556 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4557 		asoc->saw_sack_with_frags = 1;
4558 	} else if (asoc->saw_sack_with_frags) {
4559 		int cnt_revoked = 0;
4560 
4561 		/* Peer revoked all dg's marked or acked */
4562 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4563 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4564 				tp1->sent = SCTP_DATAGRAM_SENT;
4565 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4566 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4567 					    tp1->whoTo->flight_size,
4568 					    tp1->book_size,
4569 					    (uintptr_t) tp1->whoTo,
4570 					    tp1->rec.data.TSN_seq);
4571 				}
4572 				sctp_flight_size_increase(tp1);
4573 				sctp_total_flight_increase(stcb, tp1);
4574 				tp1->rec.data.chunk_was_revoked = 1;
4575 				/*
4576 				 * To ensure that this increase in
4577 				 * flightsize, which is artificial, does not
4578 				 * throttle the sender, we also increase the
4579 				 * cwnd artificially.
4580 				 */
4581 				tp1->whoTo->cwnd += tp1->book_size;
4582 				cnt_revoked++;
4583 			}
4584 		}
4585 		if (cnt_revoked) {
4586 			reneged_all = 1;
4587 		}
4588 		asoc->saw_sack_with_frags = 0;
4589 	}
4590 	if (num_nr_seg > 0)
4591 		asoc->saw_sack_with_nr_frags = 1;
4592 	else
4593 		asoc->saw_sack_with_nr_frags = 0;
4594 
4595 	/* JRS - Use the congestion control given in the CC module */
4596 	if (ecne_seen == 0) {
4597 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4598 			if (net->net_ack2 > 0) {
4599 				/*
4600 				 * Karn's rule applies to clearing error
4601 				 * count, this is optional.
4602 				 */
4603 				net->error_count = 0;
4604 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4605 					/* addr came good */
4606 					net->dest_state |= SCTP_ADDR_REACHABLE;
4607 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4608 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4609 				}
4610 				if (net == stcb->asoc.primary_destination) {
4611 					if (stcb->asoc.alternate) {
4612 						/*
4613 						 * release the alternate,
4614 						 * primary is good
4615 						 */
4616 						sctp_free_remote_addr(stcb->asoc.alternate);
4617 						stcb->asoc.alternate = NULL;
4618 					}
4619 				}
4620 				if (net->dest_state & SCTP_ADDR_PF) {
4621 					net->dest_state &= ~SCTP_ADDR_PF;
4622 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4623 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4624 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4625 					/* Done with this net */
4626 					net->net_ack = 0;
4627 				}
4628 				/* restore any doubled timers */
4629 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4630 				if (net->RTO < stcb->asoc.minrto) {
4631 					net->RTO = stcb->asoc.minrto;
4632 				}
4633 				if (net->RTO > stcb->asoc.maxrto) {
4634 					net->RTO = stcb->asoc.maxrto;
4635 				}
4636 			}
4637 		}
4638 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4639 	}
4640 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4641 		/* nothing left in-flight */
4642 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4643 			/* stop all timers */
4644 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4645 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4646 			net->flight_size = 0;
4647 			net->partial_bytes_acked = 0;
4648 		}
4649 		asoc->total_flight = 0;
4650 		asoc->total_flight_count = 0;
4651 	}
4652 	/**********************************/
4653 	/* Now what about shutdown issues */
4654 	/**********************************/
4655 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4656 		/* nothing left on sendqueue.. consider done */
4657 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4658 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4659 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4660 		}
4661 		asoc->peers_rwnd = a_rwnd;
4662 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4663 			/* SWS sender side engages */
4664 			asoc->peers_rwnd = 0;
4665 		}
4666 		/* clean up */
4667 		if ((asoc->stream_queue_cnt == 1) &&
4668 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4669 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4670 		    (asoc->locked_on_sending)
4671 		    ) {
4672 			struct sctp_stream_queue_pending *sp;
4673 
4674 			/*
4675 			 * I may be in a state where we got all across.. but
4676 			 * cannot write more due to a shutdown... we abort
4677 			 * since the user did not indicate EOR in this case.
4678 			 */
4679 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4680 			    sctp_streamhead);
4681 			if ((sp) && (sp->length == 0)) {
4682 				asoc->locked_on_sending = NULL;
4683 				if (sp->msg_is_complete) {
4684 					asoc->stream_queue_cnt--;
4685 				} else {
4686 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4687 					asoc->stream_queue_cnt--;
4688 				}
4689 			}
4690 		}
4691 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4692 		    (asoc->stream_queue_cnt == 0)) {
4693 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4694 				/* Need to abort here */
4695 				struct mbuf *op_err;
4696 
4697 		abort_out_now:
4698 				*abort_now = 1;
4699 				/* XXX */
4700 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4701 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4702 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4703 				return;
4704 			} else {
4705 				struct sctp_nets *netp;
4706 
4707 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4708 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4709 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4710 				}
4711 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4712 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4713 				sctp_stop_timers_for_shutdown(stcb);
4714 				if (asoc->alternate) {
4715 					netp = asoc->alternate;
4716 				} else {
4717 					netp = asoc->primary_destination;
4718 				}
4719 				sctp_send_shutdown(stcb, netp);
4720 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4721 				    stcb->sctp_ep, stcb, netp);
4722 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4723 				    stcb->sctp_ep, stcb, netp);
4724 			}
4725 			return;
4726 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4727 		    (asoc->stream_queue_cnt == 0)) {
4728 			struct sctp_nets *netp;
4729 
4730 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4731 				goto abort_out_now;
4732 			}
4733 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4734 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4735 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4736 			sctp_stop_timers_for_shutdown(stcb);
4737 			if (asoc->alternate) {
4738 				netp = asoc->alternate;
4739 			} else {
4740 				netp = asoc->primary_destination;
4741 			}
4742 			sctp_send_shutdown_ack(stcb, netp);
4743 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4744 			    stcb->sctp_ep, stcb, netp);
4745 			return;
4746 		}
4747 	}
4748 	/*
4749 	 * Now here we are going to recycle net_ack for a different use...
4750 	 * HEADS UP.
4751 	 */
4752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4753 		net->net_ack = 0;
4754 	}
4755 
4756 	/*
4757 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4758 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4759 	 * automatically ensure that.
4760 	 */
4761 	if ((asoc->sctp_cmt_on_off > 0) &&
4762 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4763 	    (cmt_dac_flag == 0)) {
4764 		this_sack_lowest_newack = cum_ack;
4765 	}
4766 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4767 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4768 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4769 	}
4770 	/* JRS - Use the congestion control given in the CC module */
4771 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4772 
4773 	/* Now are we exiting loss recovery ? */
4774 	if (will_exit_fast_recovery) {
4775 		/* Ok, we must exit fast recovery */
4776 		asoc->fast_retran_loss_recovery = 0;
4777 	}
4778 	if ((asoc->sat_t3_loss_recovery) &&
4779 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4780 		/* end satellite t3 loss recovery */
4781 		asoc->sat_t3_loss_recovery = 0;
4782 	}
4783 	/*
4784 	 * CMT Fast recovery
4785 	 */
4786 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4787 		if (net->will_exit_fast_recovery) {
4788 			/* Ok, we must exit fast recovery */
4789 			net->fast_retran_loss_recovery = 0;
4790 		}
4791 	}
4792 
4793 	/* Adjust and set the new rwnd value */
4794 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4795 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4796 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4797 	}
4798 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4799 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4800 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4801 		/* SWS sender side engages */
4802 		asoc->peers_rwnd = 0;
4803 	}
4804 	if (asoc->peers_rwnd > old_rwnd) {
4805 		win_probe_recovery = 1;
4806 	}
4807 	/*
4808 	 * Now we must setup so we have a timer up for anyone with
4809 	 * outstanding data.
4810 	 */
4811 	done_once = 0;
4812 again:
4813 	j = 0;
4814 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4815 		if (win_probe_recovery && (net->window_probe)) {
4816 			win_probe_recovered = 1;
4817 			/*-
4818 			 * Find first chunk that was used with
4819 			 * window probe and clear the event. Put
4820 			 * it back into the send queue as if has
4821 			 * not been sent.
4822 			 */
4823 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4824 				if (tp1->window_probe) {
4825 					sctp_window_probe_recovery(stcb, asoc, tp1);
4826 					break;
4827 				}
4828 			}
4829 		}
4830 		if (net->flight_size) {
4831 			j++;
4832 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4833 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4834 				    stcb->sctp_ep, stcb, net);
4835 			}
4836 			if (net->window_probe) {
4837 				net->window_probe = 0;
4838 			}
4839 		} else {
4840 			if (net->window_probe) {
4841 				/*
4842 				 * In window probes we must assure a timer
4843 				 * is still running there
4844 				 */
4845 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4846 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4847 					    stcb->sctp_ep, stcb, net);
4848 
4849 				}
4850 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4851 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4852 				    stcb, net,
4853 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4854 			}
4855 		}
4856 	}
4857 	if ((j == 0) &&
4858 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4859 	    (asoc->sent_queue_retran_cnt == 0) &&
4860 	    (win_probe_recovered == 0) &&
4861 	    (done_once == 0)) {
4862 		/*
4863 		 * huh, this should not happen unless all packets are
4864 		 * PR-SCTP and marked to skip of course.
4865 		 */
4866 		if (sctp_fs_audit(asoc)) {
4867 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4868 				net->flight_size = 0;
4869 			}
4870 			asoc->total_flight = 0;
4871 			asoc->total_flight_count = 0;
4872 			asoc->sent_queue_retran_cnt = 0;
4873 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4874 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4875 					sctp_flight_size_increase(tp1);
4876 					sctp_total_flight_increase(stcb, tp1);
4877 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4878 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4879 				}
4880 			}
4881 		}
4882 		done_once = 1;
4883 		goto again;
4884 	}
4885 	/*********************************************/
4886 	/* Here we perform PR-SCTP procedures        */
4887 	/* (section 4.2)                             */
4888 	/*********************************************/
4889 	/* C1. update advancedPeerAckPoint */
4890 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4891 		asoc->advanced_peer_ack_point = cum_ack;
4892 	}
4893 	/* C2. try to further move advancedPeerAckPoint ahead */
4894 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4895 		struct sctp_tmit_chunk *lchk;
4896 		uint32_t old_adv_peer_ack_point;
4897 
4898 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4899 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4900 		/* C3. See if we need to send a Fwd-TSN */
4901 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4902 			/*
4903 			 * ISSUE with ECN, see FWD-TSN processing.
4904 			 */
4905 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4906 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4907 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4908 				    old_adv_peer_ack_point);
4909 			}
4910 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4911 				send_forward_tsn(stcb, asoc);
4912 			} else if (lchk) {
4913 				/* try to FR fwd-tsn's that get lost too */
4914 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4915 					send_forward_tsn(stcb, asoc);
4916 				}
4917 			}
4918 		}
4919 		if (lchk) {
4920 			/* Assure a timer is up */
4921 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4922 			    stcb->sctp_ep, stcb, lchk->whoTo);
4923 		}
4924 	}
4925 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4926 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4927 		    a_rwnd,
4928 		    stcb->asoc.peers_rwnd,
4929 		    stcb->asoc.total_flight,
4930 		    stcb->asoc.total_output_queue_size);
4931 	}
4932 }
4933 
4934 void
4935 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4936 {
4937 	/* Copy cum-ack */
4938 	uint32_t cum_ack, a_rwnd;
4939 
4940 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4941 	/* Arrange so a_rwnd does NOT change */
4942 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4943 
4944 	/* Now call the express sack handling */
4945 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4946 }
4947 
4948 static void
4949 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4950     struct sctp_stream_in *strmin)
4951 {
4952 	struct sctp_queued_to_read *ctl, *nctl;
4953 	struct sctp_association *asoc;
4954 	uint16_t tt;
4955 
4956 	asoc = &stcb->asoc;
4957 	tt = strmin->last_sequence_delivered;
4958 	/*
4959 	 * First deliver anything prior to and including the stream no that
4960 	 * came in
4961 	 */
4962 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4963 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4964 			/* this is deliverable now */
4965 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4966 			/* subtract pending on streams */
4967 			asoc->size_on_all_streams -= ctl->length;
4968 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4969 			/* deliver it to at least the delivery-q */
4970 			if (stcb->sctp_socket) {
4971 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4972 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4973 				    ctl,
4974 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4975 			}
4976 		} else {
4977 			/* no more delivery now. */
4978 			break;
4979 		}
4980 	}
4981 	/*
4982 	 * now we must deliver things in queue the normal way  if any are
4983 	 * now ready.
4984 	 */
4985 	tt = strmin->last_sequence_delivered + 1;
4986 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4987 		if (tt == ctl->sinfo_ssn) {
4988 			/* this is deliverable now */
4989 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4990 			/* subtract pending on streams */
4991 			asoc->size_on_all_streams -= ctl->length;
4992 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4993 			/* deliver it to at least the delivery-q */
4994 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4995 			if (stcb->sctp_socket) {
4996 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4997 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4998 				    ctl,
4999 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5000 
5001 			}
5002 			tt = strmin->last_sequence_delivered + 1;
5003 		} else {
5004 			break;
5005 		}
5006 	}
5007 }
5008 
5009 static void
5010 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5011     struct sctp_association *asoc,
5012     uint16_t stream, uint16_t seq)
5013 {
5014 	struct sctp_tmit_chunk *chk, *nchk;
5015 
5016 	/* For each one on here see if we need to toss it */
5017 	/*
5018 	 * For now large messages held on the reasmqueue that are complete
5019 	 * will be tossed too. We could in theory do more work to spin
5020 	 * through and stop after dumping one msg aka seeing the start of a
5021 	 * new msg at the head, and call the delivery function... to see if
5022 	 * it can be delivered... But for now we just dump everything on the
5023 	 * queue.
5024 	 */
5025 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5026 		/*
5027 		 * Do not toss it if on a different stream or marked for
5028 		 * unordered delivery in which case the stream sequence
5029 		 * number has no meaning.
5030 		 */
5031 		if ((chk->rec.data.stream_number != stream) ||
5032 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5033 			continue;
5034 		}
5035 		if (chk->rec.data.stream_seq == seq) {
5036 			/* It needs to be tossed */
5037 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5038 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5039 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5040 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5041 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5042 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5043 			}
5044 			asoc->size_on_reasm_queue -= chk->send_size;
5045 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5046 
5047 			/* Clear up any stream problem */
5048 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5049 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5050 				/*
5051 				 * We must dump forward this streams
5052 				 * sequence number if the chunk is not
5053 				 * unordered that is being skipped. There is
5054 				 * a chance that if the peer does not
5055 				 * include the last fragment in its FWD-TSN
5056 				 * we WILL have a problem here since you
5057 				 * would have a partial chunk in queue that
5058 				 * may not be deliverable. Also if a Partial
5059 				 * delivery API as started the user may get
5060 				 * a partial chunk. The next read returning
5061 				 * a new chunk... really ugly but I see no
5062 				 * way around it! Maybe a notify??
5063 				 */
5064 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5065 			}
5066 			if (chk->data) {
5067 				sctp_m_freem(chk->data);
5068 				chk->data = NULL;
5069 			}
5070 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5071 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5072 			/*
5073 			 * If the stream_seq is > than the purging one, we
5074 			 * are done
5075 			 */
5076 			break;
5077 		}
5078 	}
5079 }
5080 
5081 
5082 void
5083 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5084     struct sctp_forward_tsn_chunk *fwd,
5085     int *abort_flag, struct mbuf *m, int offset)
5086 {
5087 	/* The pr-sctp fwd tsn */
5088 	/*
5089 	 * here we will perform all the data receiver side steps for
5090 	 * processing FwdTSN, as required in by pr-sctp draft:
5091 	 *
5092 	 * Assume we get FwdTSN(x):
5093 	 *
5094 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5095 	 * others we have 3) examine and update re-ordering queue on
5096 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5097 	 * report where we are.
5098 	 */
5099 	struct sctp_association *asoc;
5100 	uint32_t new_cum_tsn, gap;
5101 	unsigned int i, fwd_sz, m_size;
5102 	uint32_t str_seq;
5103 	struct sctp_stream_in *strm;
5104 	struct sctp_tmit_chunk *chk, *nchk;
5105 	struct sctp_queued_to_read *ctl, *sv;
5106 
5107 	asoc = &stcb->asoc;
5108 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5109 		SCTPDBG(SCTP_DEBUG_INDATA1,
5110 		    "Bad size too small/big fwd-tsn\n");
5111 		return;
5112 	}
5113 	m_size = (stcb->asoc.mapping_array_size << 3);
5114 	/*************************************************************/
5115 	/* 1. Here we update local cumTSN and shift the bitmap array */
5116 	/*************************************************************/
5117 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5118 
5119 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5120 		/* Already got there ... */
5121 		return;
5122 	}
5123 	/*
5124 	 * now we know the new TSN is more advanced, let's find the actual
5125 	 * gap
5126 	 */
5127 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5128 	asoc->cumulative_tsn = new_cum_tsn;
5129 	if (gap >= m_size) {
5130 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5131 			struct mbuf *op_err;
5132 			char msg[SCTP_DIAG_INFO_LEN];
5133 
5134 			/*
5135 			 * out of range (of single byte chunks in the rwnd I
5136 			 * give out). This must be an attacker.
5137 			 */
5138 			*abort_flag = 1;
5139 			snprintf(msg, sizeof(msg),
5140 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5141 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5142 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5143 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5144 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5145 			return;
5146 		}
5147 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5148 
5149 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5150 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5151 		asoc->highest_tsn_inside_map = new_cum_tsn;
5152 
5153 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5154 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5155 
5156 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5157 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5158 		}
5159 	} else {
5160 		SCTP_TCB_LOCK_ASSERT(stcb);
5161 		for (i = 0; i <= gap; i++) {
5162 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5163 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5164 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5165 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5166 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5167 				}
5168 			}
5169 		}
5170 	}
5171 	/*************************************************************/
5172 	/* 2. Clear up re-assembly queue                             */
5173 	/*************************************************************/
5174 	/*
5175 	 * First service it if pd-api is up, just in case we can progress it
5176 	 * forward
5177 	 */
5178 	if (asoc->fragmented_delivery_inprogress) {
5179 		sctp_service_reassembly(stcb, asoc);
5180 	}
5181 	/* For each one on here see if we need to toss it */
5182 	/*
5183 	 * For now large messages held on the reasmqueue that are complete
5184 	 * will be tossed too. We could in theory do more work to spin
5185 	 * through and stop after dumping one msg aka seeing the start of a
5186 	 * new msg at the head, and call the delivery function... to see if
5187 	 * it can be delivered... But for now we just dump everything on the
5188 	 * queue.
5189 	 */
5190 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5191 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5192 			/* It needs to be tossed */
5193 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5194 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5195 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5196 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5197 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5198 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5199 			}
5200 			asoc->size_on_reasm_queue -= chk->send_size;
5201 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5202 
5203 			/* Clear up any stream problem */
5204 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5205 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5206 				/*
5207 				 * We must dump forward this streams
5208 				 * sequence number if the chunk is not
5209 				 * unordered that is being skipped. There is
5210 				 * a chance that if the peer does not
5211 				 * include the last fragment in its FWD-TSN
5212 				 * we WILL have a problem here since you
5213 				 * would have a partial chunk in queue that
5214 				 * may not be deliverable. Also if a Partial
5215 				 * delivery API as started the user may get
5216 				 * a partial chunk. The next read returning
5217 				 * a new chunk... really ugly but I see no
5218 				 * way around it! Maybe a notify??
5219 				 */
5220 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5221 			}
5222 			if (chk->data) {
5223 				sctp_m_freem(chk->data);
5224 				chk->data = NULL;
5225 			}
5226 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5227 		} else {
5228 			/*
5229 			 * Ok we have gone beyond the end of the fwd-tsn's
5230 			 * mark.
5231 			 */
5232 			break;
5233 		}
5234 	}
5235 	/*******************************************************/
5236 	/* 3. Update the PR-stream re-ordering queues and fix  */
5237 	/* delivery issues as needed.                       */
5238 	/*******************************************************/
5239 	fwd_sz -= sizeof(*fwd);
5240 	if (m && fwd_sz) {
5241 		/* New method. */
5242 		unsigned int num_str;
5243 		struct sctp_strseq *stseq, strseqbuf;
5244 
5245 		offset += sizeof(*fwd);
5246 
5247 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5248 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5249 		for (i = 0; i < num_str; i++) {
5250 			uint16_t st;
5251 
5252 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5253 			    sizeof(struct sctp_strseq),
5254 			    (uint8_t *) & strseqbuf);
5255 			offset += sizeof(struct sctp_strseq);
5256 			if (stseq == NULL) {
5257 				break;
5258 			}
5259 			/* Convert */
5260 			st = ntohs(stseq->stream);
5261 			stseq->stream = st;
5262 			st = ntohs(stseq->sequence);
5263 			stseq->sequence = st;
5264 
5265 			/* now process */
5266 
5267 			/*
5268 			 * Ok we now look for the stream/seq on the read
5269 			 * queue where its not all delivered. If we find it
5270 			 * we transmute the read entry into a PDI_ABORTED.
5271 			 */
5272 			if (stseq->stream >= asoc->streamincnt) {
5273 				/* screwed up streams, stop!  */
5274 				break;
5275 			}
5276 			if ((asoc->str_of_pdapi == stseq->stream) &&
5277 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5278 				/*
5279 				 * If this is the one we were partially
5280 				 * delivering now then we no longer are.
5281 				 * Note this will change with the reassembly
5282 				 * re-write.
5283 				 */
5284 				asoc->fragmented_delivery_inprogress = 0;
5285 			}
5286 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5287 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5288 				if ((ctl->sinfo_stream == stseq->stream) &&
5289 				    (ctl->sinfo_ssn == stseq->sequence)) {
5290 					str_seq = (stseq->stream << 16) | stseq->sequence;
5291 					ctl->end_added = 1;
5292 					ctl->pdapi_aborted = 1;
5293 					sv = stcb->asoc.control_pdapi;
5294 					stcb->asoc.control_pdapi = ctl;
5295 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5296 					    stcb,
5297 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5298 					    (void *)&str_seq,
5299 					    SCTP_SO_NOT_LOCKED);
5300 					stcb->asoc.control_pdapi = sv;
5301 					break;
5302 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5303 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5304 					/* We are past our victim SSN */
5305 					break;
5306 				}
5307 			}
5308 			strm = &asoc->strmin[stseq->stream];
5309 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5310 				/* Update the sequence number */
5311 				strm->last_sequence_delivered = stseq->sequence;
5312 			}
5313 			/* now kick the stream the new way */
5314 			/* sa_ignore NO_NULL_CHK */
5315 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5316 		}
5317 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5318 	}
5319 	/*
5320 	 * Now slide thing forward.
5321 	 */
5322 	sctp_slide_mapping_arrays(stcb);
5323 
5324 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5325 		/* now lets kick out and check for more fragmented delivery */
5326 		/* sa_ignore NO_NULL_CHK */
5327 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5328 	}
5329 }
5330