xref: /freebsd/sys/netinet/sctp_indata.c (revision 864c53ead899f7838cd2e1cca3b485a4a82f5cdc)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 		cmh->cmsg_level = IPPROTO_SCTP;
255 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 		cmh->cmsg_type = SCTP_RCVINFO;
257 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 		rcvinfo->rcv_context = sinfo->sinfo_context;
265 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268 	}
269 	if (provide_nxt) {
270 		cmh->cmsg_level = IPPROTO_SCTP;
271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 		cmh->cmsg_type = SCTP_NXTINFO;
273 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 		nxtinfo->nxt_flags = 0;
276 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278 		}
279 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281 		}
282 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284 		}
285 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/*
322 		 * this tsn is behind the cum ack and thus we don't need to
323 		 * worry about it being moved from one to the other.
324 		 */
325 		return;
326 	}
327 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 		sctp_print_mapping_array(asoc);
331 #ifdef INVARIANTS
332 		panic("Things are really messed up now!!");
333 #endif
334 	}
335 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 		asoc->highest_tsn_inside_nr_map = tsn;
339 	}
340 	if (tsn == asoc->highest_tsn_inside_map) {
341 		/* We must back down to see what the new highest is */
342 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 				asoc->highest_tsn_inside_map = i;
346 				fnd = 1;
347 				break;
348 			}
349 		}
350 		if (!fnd) {
351 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352 		}
353 	}
354 }
355 
356 
357 /*
358  * We are delivering currently from the reassembly queue. We must continue to
359  * deliver until we either: 1) run out of space. 2) run out of sequential
360  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361  */
362 static void
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364 {
365 	struct sctp_tmit_chunk *chk, *nchk;
366 	uint16_t nxt_todel;
367 	uint16_t stream_no;
368 	int end = 0;
369 	int cntDel;
370 	struct sctp_queued_to_read *control, *ctl, *nctl;
371 
372 	if (stcb == NULL)
373 		return;
374 
375 	cntDel = stream_no = 0;
376 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 		/* socket above is long gone or going.. */
380 abandon:
381 		asoc->fragmented_delivery_inprogress = 0;
382 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 			asoc->size_on_reasm_queue -= chk->send_size;
385 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386 			/*
387 			 * Lose the data pointer, since its in the socket
388 			 * buffer
389 			 */
390 			if (chk->data) {
391 				sctp_m_freem(chk->data);
392 				chk->data = NULL;
393 			}
394 			/* Now free the address and data */
395 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 			/* sa_ignore FREED_MEMORY */
397 		}
398 		return;
399 	}
400 	SCTP_TCB_LOCK_ASSERT(stcb);
401 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 			/* Can't deliver more :< */
404 			return;
405 		}
406 		stream_no = chk->rec.data.stream_number;
407 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 		if (nxt_todel != chk->rec.data.stream_seq &&
409 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410 			/*
411 			 * Not the next sequence to deliver in its stream OR
412 			 * unordered
413 			 */
414 			return;
415 		}
416 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417 
418 			control = sctp_build_readq_entry_chk(stcb, chk);
419 			if (control == NULL) {
420 				/* out of memory? */
421 				return;
422 			}
423 			/* save it off for our future deliveries */
424 			stcb->asoc.control_pdapi = control;
425 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 				end = 1;
427 			else
428 				end = 0;
429 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 			sctp_add_to_readq(stcb->sctp_ep,
431 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433 			cntDel++;
434 		} else {
435 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436 				end = 1;
437 			else
438 				end = 0;
439 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 			    stcb->asoc.control_pdapi,
442 			    chk->data, end, chk->rec.data.TSN_seq,
443 			    &stcb->sctp_socket->so_rcv)) {
444 				/*
445 				 * something is very wrong, either
446 				 * control_pdapi is NULL, or the tail_mbuf
447 				 * is corrupt, or there is a EOM already on
448 				 * the mbuf chain.
449 				 */
450 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451 					goto abandon;
452 				} else {
453 #ifdef INVARIANTS
454 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 						panic("This should not happen control_pdapi NULL?");
456 					}
457 					/* if we did not panic, it was a EOM */
458 					panic("Bad chunking ??");
459 #else
460 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462 					}
463 					SCTP_PRINTF("Bad chunking ??\n");
464 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465 
466 #endif
467 					goto abandon;
468 				}
469 			}
470 			cntDel++;
471 		}
472 		/* pull it we did it */
473 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 			asoc->fragmented_delivery_inprogress = 0;
476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 				asoc->strmin[stream_no].last_sequence_delivered++;
478 			}
479 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481 			}
482 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483 			/*
484 			 * turn the flag back on since we just  delivered
485 			 * yet another one.
486 			 */
487 			asoc->fragmented_delivery_inprogress = 1;
488 		}
489 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493 
494 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->size_on_reasm_queue -= chk->send_size;
496 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 		/* free up the chk */
498 		chk->data = NULL;
499 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500 
501 		if (asoc->fragmented_delivery_inprogress == 0) {
502 			/*
503 			 * Now lets see if we can deliver the next one on
504 			 * the stream
505 			 */
506 			struct sctp_stream_in *strm;
507 
508 			strm = &asoc->strmin[stream_no];
509 			nxt_todel = strm->last_sequence_delivered + 1;
510 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 				/* Deliver more if we can. */
512 				if (nxt_todel == ctl->sinfo_ssn) {
513 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 					asoc->size_on_all_streams -= ctl->length;
515 					sctp_ucount_decr(asoc->cnt_on_all_streams);
516 					strm->last_sequence_delivered++;
517 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 					sctp_add_to_readq(stcb->sctp_ep, stcb,
519 					    ctl,
520 					    &stcb->sctp_socket->so_rcv, 1,
521 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 				} else {
523 					break;
524 				}
525 				nxt_todel = strm->last_sequence_delivered + 1;
526 			}
527 			break;
528 		}
529 	}
530 }
531 
532 /*
533  * Queue the chunk either right into the socket buffer if it is the next one
534  * to go OR put it in the correct place in the delivery queue.  If we do
535  * append to the so_buf, keep doing so until we are out of order. One big
536  * question still remains, what to do when the socket buffer is FULL??
537  */
538 static void
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540     struct sctp_queued_to_read *control, int *abort_flag)
541 {
542 	/*
543 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 	 * all the data in one stream this could happen quite rapidly. One
545 	 * could use the TSN to keep track of things, but this scheme breaks
546 	 * down in the other type of stream useage that could occur. Send a
547 	 * single msg to stream 0, send 4Billion messages to stream 1, now
548 	 * send a message to stream 0. You have a situation where the TSN
549 	 * has wrapped but not in the stream. Is this worth worrying about
550 	 * or should we just change our queue sort at the bottom to be by
551 	 * TSN.
552 	 *
553 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 	 * assignment this could happen... and I don't see how this would be
556 	 * a violation. So for now I am undecided an will leave the sort by
557 	 * SSN alone. Maybe a hybred approach is the answer
558 	 *
559 	 */
560 	struct sctp_stream_in *strm;
561 	struct sctp_queued_to_read *at;
562 	int queue_needed;
563 	uint16_t nxt_todel;
564 	struct mbuf *op_err;
565 	char msg[SCTP_DIAG_INFO_LEN];
566 
567 	queue_needed = 1;
568 	asoc->size_on_all_streams += control->length;
569 	sctp_ucount_incr(asoc->cnt_on_all_streams);
570 	strm = &asoc->strmin[control->sinfo_stream];
571 	nxt_todel = strm->last_sequence_delivered + 1;
572 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
573 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
574 	}
575 	SCTPDBG(SCTP_DEBUG_INDATA1,
576 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
577 	    (uint32_t) control->sinfo_stream,
578 	    (uint32_t) strm->last_sequence_delivered,
579 	    (uint32_t) nxt_todel);
580 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
581 		/* The incoming sseq is behind where we last delivered? */
582 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
583 		    control->sinfo_ssn, strm->last_sequence_delivered);
584 protocol_error:
585 		/*
586 		 * throw it in the stream so it gets cleaned up in
587 		 * association destruction
588 		 */
589 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
590 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
591 		    strm->last_sequence_delivered, control->sinfo_tsn,
592 		    control->sinfo_stream, control->sinfo_ssn);
593 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
594 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
595 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
596 		*abort_flag = 1;
597 		return;
598 
599 	}
600 	if (nxt_todel == control->sinfo_ssn) {
601 		/* can be delivered right away? */
602 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
604 		}
605 		/* EY it wont be queued if it could be delivered directly */
606 		queue_needed = 0;
607 		asoc->size_on_all_streams -= control->length;
608 		sctp_ucount_decr(asoc->cnt_on_all_streams);
609 		strm->last_sequence_delivered++;
610 
611 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
612 		sctp_add_to_readq(stcb->sctp_ep, stcb,
613 		    control,
614 		    &stcb->sctp_socket->so_rcv, 1,
615 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
616 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
617 			/* all delivered */
618 			nxt_todel = strm->last_sequence_delivered + 1;
619 			if (nxt_todel == control->sinfo_ssn) {
620 				TAILQ_REMOVE(&strm->inqueue, control, next);
621 				asoc->size_on_all_streams -= control->length;
622 				sctp_ucount_decr(asoc->cnt_on_all_streams);
623 				strm->last_sequence_delivered++;
624 				/*
625 				 * We ignore the return of deliver_data here
626 				 * since we always can hold the chunk on the
627 				 * d-queue. And we have a finite number that
628 				 * can be delivered from the strq.
629 				 */
630 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 					sctp_log_strm_del(control, NULL,
632 					    SCTP_STR_LOG_FROM_IMMED_DEL);
633 				}
634 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635 				sctp_add_to_readq(stcb->sctp_ep, stcb,
636 				    control,
637 				    &stcb->sctp_socket->so_rcv, 1,
638 				    SCTP_READ_LOCK_NOT_HELD,
639 				    SCTP_SO_NOT_LOCKED);
640 				continue;
641 			}
642 			break;
643 		}
644 	}
645 	if (queue_needed) {
646 		/*
647 		 * Ok, we did not deliver this guy, find the correct place
648 		 * to put it on the queue.
649 		 */
650 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
651 			goto protocol_error;
652 		}
653 		if (TAILQ_EMPTY(&strm->inqueue)) {
654 			/* Empty queue */
655 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
656 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
657 			}
658 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
659 		} else {
660 			TAILQ_FOREACH(at, &strm->inqueue, next) {
661 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
662 					/*
663 					 * one in queue is bigger than the
664 					 * new one, insert before this one
665 					 */
666 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
667 						sctp_log_strm_del(control, at,
668 						    SCTP_STR_LOG_FROM_INSERT_MD);
669 					}
670 					TAILQ_INSERT_BEFORE(at, control, next);
671 					break;
672 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
673 					/*
674 					 * Gak, He sent me a duplicate str
675 					 * seq number
676 					 */
677 					/*
678 					 * foo bar, I guess I will just free
679 					 * this new guy, should we abort
680 					 * too? FIX ME MAYBE? Or it COULD be
681 					 * that the SSN's have wrapped.
682 					 * Maybe I should compare to TSN
683 					 * somehow... sigh for now just blow
684 					 * away the chunk!
685 					 */
686 
687 					if (control->data)
688 						sctp_m_freem(control->data);
689 					control->data = NULL;
690 					asoc->size_on_all_streams -= control->length;
691 					sctp_ucount_decr(asoc->cnt_on_all_streams);
692 					if (control->whoFrom) {
693 						sctp_free_remote_addr(control->whoFrom);
694 						control->whoFrom = NULL;
695 					}
696 					sctp_free_a_readq(stcb, control);
697 					return;
698 				} else {
699 					if (TAILQ_NEXT(at, next) == NULL) {
700 						/*
701 						 * We are at the end, insert
702 						 * it after this one
703 						 */
704 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
705 							sctp_log_strm_del(control, at,
706 							    SCTP_STR_LOG_FROM_INSERT_TL);
707 						}
708 						TAILQ_INSERT_AFTER(&strm->inqueue,
709 						    at, control, next);
710 						break;
711 					}
712 				}
713 			}
714 		}
715 	}
716 }
717 
718 /*
719  * Returns two things: You get the total size of the deliverable parts of the
720  * first fragmented message on the reassembly queue. And you get a 1 back if
721  * all of the message is ready or a 0 back if the message is still incomplete
722  */
723 static int
724 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
725 {
726 	struct sctp_tmit_chunk *chk;
727 	uint32_t tsn;
728 
729 	*t_size = 0;
730 	chk = TAILQ_FIRST(&asoc->reasmqueue);
731 	if (chk == NULL) {
732 		/* nothing on the queue */
733 		return (0);
734 	}
735 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
736 		/* Not a first on the queue */
737 		return (0);
738 	}
739 	tsn = chk->rec.data.TSN_seq;
740 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
741 		if (tsn != chk->rec.data.TSN_seq) {
742 			return (0);
743 		}
744 		*t_size += chk->send_size;
745 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
746 			return (1);
747 		}
748 		tsn++;
749 	}
750 	return (0);
751 }
752 
753 static void
754 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
755 {
756 	struct sctp_tmit_chunk *chk;
757 	uint16_t nxt_todel;
758 	uint32_t tsize, pd_point;
759 
760 doit_again:
761 	chk = TAILQ_FIRST(&asoc->reasmqueue);
762 	if (chk == NULL) {
763 		/* Huh? */
764 		asoc->size_on_reasm_queue = 0;
765 		asoc->cnt_on_reasm_queue = 0;
766 		return;
767 	}
768 	if (asoc->fragmented_delivery_inprogress == 0) {
769 		nxt_todel =
770 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772 		    (nxt_todel == chk->rec.data.stream_seq ||
773 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
774 			/*
775 			 * Yep the first one is here and its ok to deliver
776 			 * but should we?
777 			 */
778 			if (stcb->sctp_socket) {
779 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
780 				    stcb->sctp_ep->partial_delivery_point);
781 			} else {
782 				pd_point = stcb->sctp_ep->partial_delivery_point;
783 			}
784 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
785 				/*
786 				 * Yes, we setup to start reception, by
787 				 * backing down the TSN just in case we
788 				 * can't deliver. If we
789 				 */
790 				asoc->fragmented_delivery_inprogress = 1;
791 				asoc->tsn_last_delivered =
792 				    chk->rec.data.TSN_seq - 1;
793 				asoc->str_of_pdapi =
794 				    chk->rec.data.stream_number;
795 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
797 				asoc->fragment_flags = chk->rec.data.rcv_flags;
798 				sctp_service_reassembly(stcb, asoc);
799 			}
800 		}
801 	} else {
802 		/*
803 		 * Service re-assembly will deliver stream data queued at
804 		 * the end of fragmented delivery.. but it wont know to go
805 		 * back and call itself again... we do that here with the
806 		 * got doit_again
807 		 */
808 		sctp_service_reassembly(stcb, asoc);
809 		if (asoc->fragmented_delivery_inprogress == 0) {
810 			/*
811 			 * finished our Fragmented delivery, could be more
812 			 * waiting?
813 			 */
814 			goto doit_again;
815 		}
816 	}
817 }
818 
819 /*
820  * Dump onto the re-assembly queue, in its proper place. After dumping on the
821  * queue, see if anthing can be delivered. If so pull it off (or as much as
822  * we can. If we run out of space then we must dump what we can and set the
823  * appropriate flag to say we queued what we could.
824  */
825 static void
826 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827     struct sctp_tmit_chunk *chk, int *abort_flag)
828 {
829 	struct mbuf *op_err;
830 	char msg[SCTP_DIAG_INFO_LEN];
831 	uint32_t cum_ackp1, prev_tsn, post_tsn;
832 	struct sctp_tmit_chunk *at, *prev, *next;
833 
834 	prev = next = NULL;
835 	cum_ackp1 = asoc->tsn_last_delivered + 1;
836 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
837 		/* This is the first one on the queue */
838 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
839 		/*
840 		 * we do not check for delivery of anything when only one
841 		 * fragment is here
842 		 */
843 		asoc->size_on_reasm_queue = chk->send_size;
844 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
845 		if (chk->rec.data.TSN_seq == cum_ackp1) {
846 			if (asoc->fragmented_delivery_inprogress == 0 &&
847 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
848 			    SCTP_DATA_FIRST_FRAG) {
849 				/*
850 				 * An empty queue, no delivery inprogress,
851 				 * we hit the next one and it does NOT have
852 				 * a FIRST fragment mark.
853 				 */
854 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
855 				snprintf(msg, sizeof(msg),
856 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
857 				    chk->rec.data.TSN_seq,
858 				    chk->rec.data.stream_number,
859 				    chk->rec.data.stream_seq);
860 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
861 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
862 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
863 				*abort_flag = 1;
864 			} else if (asoc->fragmented_delivery_inprogress &&
865 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
866 				/*
867 				 * We are doing a partial delivery and the
868 				 * NEXT chunk MUST be either the LAST or
869 				 * MIDDLE fragment NOT a FIRST
870 				 */
871 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
872 				snprintf(msg, sizeof(msg),
873 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
874 				    chk->rec.data.TSN_seq,
875 				    chk->rec.data.stream_number,
876 				    chk->rec.data.stream_seq);
877 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
878 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
879 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
880 				*abort_flag = 1;
881 			} else if (asoc->fragmented_delivery_inprogress) {
882 				/*
883 				 * Here we are ok with a MIDDLE or LAST
884 				 * piece
885 				 */
886 				if (chk->rec.data.stream_number !=
887 				    asoc->str_of_pdapi) {
888 					/* Got to be the right STR No */
889 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
890 					    chk->rec.data.stream_number,
891 					    asoc->str_of_pdapi);
892 					snprintf(msg, sizeof(msg),
893 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
894 					    asoc->str_of_pdapi,
895 					    chk->rec.data.TSN_seq,
896 					    chk->rec.data.stream_number,
897 					    chk->rec.data.stream_seq);
898 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
899 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
900 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
901 					*abort_flag = 1;
902 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
903 					    SCTP_DATA_UNORDERED &&
904 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
905 					/* Got to be the right STR Seq */
906 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
907 					    chk->rec.data.stream_seq,
908 					    asoc->ssn_of_pdapi);
909 					snprintf(msg, sizeof(msg),
910 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
911 					    asoc->ssn_of_pdapi,
912 					    chk->rec.data.TSN_seq,
913 					    chk->rec.data.stream_number,
914 					    chk->rec.data.stream_seq);
915 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
916 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
917 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
918 					*abort_flag = 1;
919 				}
920 			}
921 		}
922 		return;
923 	}
924 	/* Find its place */
925 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
926 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
927 			/*
928 			 * one in queue is bigger than the new one, insert
929 			 * before this one
930 			 */
931 			/* A check */
932 			asoc->size_on_reasm_queue += chk->send_size;
933 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
934 			next = at;
935 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
936 			break;
937 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
938 			/* Gak, He sent me a duplicate str seq number */
939 			/*
940 			 * foo bar, I guess I will just free this new guy,
941 			 * should we abort too? FIX ME MAYBE? Or it COULD be
942 			 * that the SSN's have wrapped. Maybe I should
943 			 * compare to TSN somehow... sigh for now just blow
944 			 * away the chunk!
945 			 */
946 			if (chk->data) {
947 				sctp_m_freem(chk->data);
948 				chk->data = NULL;
949 			}
950 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
951 			return;
952 		} else {
953 			prev = at;
954 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
955 				/*
956 				 * We are at the end, insert it after this
957 				 * one
958 				 */
959 				/* check it first */
960 				asoc->size_on_reasm_queue += chk->send_size;
961 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
963 				break;
964 			}
965 		}
966 	}
967 	/* Now the audits */
968 	if (prev) {
969 		prev_tsn = chk->rec.data.TSN_seq - 1;
970 		if (prev_tsn == prev->rec.data.TSN_seq) {
971 			/*
972 			 * Ok the one I am dropping onto the end is the
973 			 * NEXT. A bit of valdiation here.
974 			 */
975 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
976 			    SCTP_DATA_FIRST_FRAG ||
977 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
978 			    SCTP_DATA_MIDDLE_FRAG) {
979 				/*
980 				 * Insert chk MUST be a MIDDLE or LAST
981 				 * fragment
982 				 */
983 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
984 				    SCTP_DATA_FIRST_FRAG) {
985 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
986 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
987 					snprintf(msg, sizeof(msg),
988 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
989 					    chk->rec.data.TSN_seq,
990 					    chk->rec.data.stream_number,
991 					    chk->rec.data.stream_seq);
992 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
993 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
994 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
995 					*abort_flag = 1;
996 					return;
997 				}
998 				if (chk->rec.data.stream_number !=
999 				    prev->rec.data.stream_number) {
1000 					/*
1001 					 * Huh, need the correct STR here,
1002 					 * they must be the same.
1003 					 */
1004 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1005 					    chk->rec.data.stream_number,
1006 					    prev->rec.data.stream_number);
1007 					snprintf(msg, sizeof(msg),
1008 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1009 					    prev->rec.data.stream_number,
1010 					    chk->rec.data.TSN_seq,
1011 					    chk->rec.data.stream_number,
1012 					    chk->rec.data.stream_seq);
1013 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1014 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1015 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1016 					*abort_flag = 1;
1017 					return;
1018 				}
1019 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1020 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1021 					/*
1022 					 * Huh, need the same ordering here,
1023 					 * they must be the same.
1024 					 */
1025 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1026 					snprintf(msg, sizeof(msg),
1027 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1028 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1029 					    chk->rec.data.TSN_seq,
1030 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1031 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1032 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1033 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1034 					*abort_flag = 1;
1035 					return;
1036 				}
1037 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1038 				    chk->rec.data.stream_seq !=
1039 				    prev->rec.data.stream_seq) {
1040 					/*
1041 					 * Huh, need the correct STR here,
1042 					 * they must be the same.
1043 					 */
1044 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1045 					    chk->rec.data.stream_seq,
1046 					    prev->rec.data.stream_seq);
1047 					snprintf(msg, sizeof(msg),
1048 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1049 					    prev->rec.data.stream_seq,
1050 					    chk->rec.data.TSN_seq,
1051 					    chk->rec.data.stream_number,
1052 					    chk->rec.data.stream_seq);
1053 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1054 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1055 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1056 					*abort_flag = 1;
1057 					return;
1058 				}
1059 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1060 			    SCTP_DATA_LAST_FRAG) {
1061 				/* Insert chk MUST be a FIRST */
1062 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1063 				    SCTP_DATA_FIRST_FRAG) {
1064 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1065 					snprintf(msg, sizeof(msg),
1066 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1067 					    chk->rec.data.TSN_seq,
1068 					    chk->rec.data.stream_number,
1069 					    chk->rec.data.stream_seq);
1070 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1071 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1072 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1073 					*abort_flag = 1;
1074 					return;
1075 				}
1076 			}
1077 		}
1078 	}
1079 	if (next) {
1080 		post_tsn = chk->rec.data.TSN_seq + 1;
1081 		if (post_tsn == next->rec.data.TSN_seq) {
1082 			/*
1083 			 * Ok the one I am inserting ahead of is my NEXT
1084 			 * one. A bit of valdiation here.
1085 			 */
1086 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1087 				/* Insert chk MUST be a last fragment */
1088 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1089 				    != SCTP_DATA_LAST_FRAG) {
1090 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1091 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1092 					snprintf(msg, sizeof(msg),
1093 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1094 					    chk->rec.data.TSN_seq,
1095 					    chk->rec.data.stream_number,
1096 					    chk->rec.data.stream_seq);
1097 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1098 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1099 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1100 					*abort_flag = 1;
1101 					return;
1102 				}
1103 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1104 				    SCTP_DATA_MIDDLE_FRAG ||
1105 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1106 			    SCTP_DATA_LAST_FRAG) {
1107 				/*
1108 				 * Insert chk CAN be MIDDLE or FIRST NOT
1109 				 * LAST
1110 				 */
1111 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1112 				    SCTP_DATA_LAST_FRAG) {
1113 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1114 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1115 					snprintf(msg, sizeof(msg),
1116 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1117 					    chk->rec.data.TSN_seq,
1118 					    chk->rec.data.stream_number,
1119 					    chk->rec.data.stream_seq);
1120 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1121 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1122 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1123 					*abort_flag = 1;
1124 					return;
1125 				}
1126 				if (chk->rec.data.stream_number !=
1127 				    next->rec.data.stream_number) {
1128 					/*
1129 					 * Huh, need the correct STR here,
1130 					 * they must be the same.
1131 					 */
1132 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1133 					    chk->rec.data.stream_number,
1134 					    next->rec.data.stream_number);
1135 					snprintf(msg, sizeof(msg),
1136 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1137 					    next->rec.data.stream_number,
1138 					    chk->rec.data.TSN_seq,
1139 					    chk->rec.data.stream_number,
1140 					    chk->rec.data.stream_seq);
1141 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1142 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1143 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1144 					*abort_flag = 1;
1145 					return;
1146 				}
1147 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1148 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1149 					/*
1150 					 * Huh, need the same ordering here,
1151 					 * they must be the same.
1152 					 */
1153 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1154 					snprintf(msg, sizeof(msg),
1155 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1156 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1157 					    chk->rec.data.TSN_seq,
1158 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1159 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1160 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1161 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1162 					*abort_flag = 1;
1163 					return;
1164 				}
1165 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1166 				    chk->rec.data.stream_seq !=
1167 				    next->rec.data.stream_seq) {
1168 					/*
1169 					 * Huh, need the correct STR here,
1170 					 * they must be the same.
1171 					 */
1172 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1173 					    chk->rec.data.stream_seq,
1174 					    next->rec.data.stream_seq);
1175 					snprintf(msg, sizeof(msg),
1176 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1177 					    next->rec.data.stream_seq,
1178 					    chk->rec.data.TSN_seq,
1179 					    chk->rec.data.stream_number,
1180 					    chk->rec.data.stream_seq);
1181 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1182 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1183 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1184 					*abort_flag = 1;
1185 					return;
1186 				}
1187 			}
1188 		}
1189 	}
1190 	/* Do we need to do some delivery? check */
1191 	sctp_deliver_reasm_check(stcb, asoc);
1192 }
1193 
1194 /*
1195  * This is an unfortunate routine. It checks to make sure a evil guy is not
1196  * stuffing us full of bad packet fragments. A broken peer could also do this
1197  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1198  * :< more cycles.
1199  */
1200 static int
1201 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1202     uint32_t TSN_seq)
1203 {
1204 	struct sctp_tmit_chunk *at;
1205 	uint32_t tsn_est;
1206 
1207 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1208 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1209 			/* is it one bigger? */
1210 			tsn_est = at->rec.data.TSN_seq + 1;
1211 			if (tsn_est == TSN_seq) {
1212 				/* yep. It better be a last then */
1213 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1214 				    SCTP_DATA_LAST_FRAG) {
1215 					/*
1216 					 * Ok this guy belongs next to a guy
1217 					 * that is NOT last, it should be a
1218 					 * middle/last, not a complete
1219 					 * chunk.
1220 					 */
1221 					return (1);
1222 				} else {
1223 					/*
1224 					 * This guy is ok since its a LAST
1225 					 * and the new chunk is a fully
1226 					 * self- contained one.
1227 					 */
1228 					return (0);
1229 				}
1230 			}
1231 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1232 			/* Software error since I have a dup? */
1233 			return (1);
1234 		} else {
1235 			/*
1236 			 * Ok, 'at' is larger than new chunk but does it
1237 			 * need to be right before it.
1238 			 */
1239 			tsn_est = TSN_seq + 1;
1240 			if (tsn_est == at->rec.data.TSN_seq) {
1241 				/* Yep, It better be a first */
1242 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1243 				    SCTP_DATA_FIRST_FRAG) {
1244 					return (1);
1245 				} else {
1246 					return (0);
1247 				}
1248 			}
1249 		}
1250 	}
1251 	return (0);
1252 }
1253 
1254 static int
1255 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1256     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1257     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1258     int *break_flag, int last_chunk)
1259 {
1260 	/* Process a data chunk */
1261 	/* struct sctp_tmit_chunk *chk; */
1262 	struct sctp_tmit_chunk *chk;
1263 	uint32_t tsn, gap;
1264 	struct mbuf *dmbuf;
1265 	int the_len;
1266 	int need_reasm_check = 0;
1267 	uint16_t strmno, strmseq;
1268 	struct mbuf *op_err;
1269 	char msg[SCTP_DIAG_INFO_LEN];
1270 	struct sctp_queued_to_read *control;
1271 	int ordered;
1272 	uint32_t protocol_id;
1273 	uint8_t chunk_flags;
1274 	struct sctp_stream_reset_list *liste;
1275 
1276 	chk = NULL;
1277 	tsn = ntohl(ch->dp.tsn);
1278 	chunk_flags = ch->ch.chunk_flags;
1279 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1280 		asoc->send_sack = 1;
1281 	}
1282 	protocol_id = ch->dp.protocol_id;
1283 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1284 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1285 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1286 	}
1287 	if (stcb == NULL) {
1288 		return (0);
1289 	}
1290 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1291 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1292 		/* It is a duplicate */
1293 		SCTP_STAT_INCR(sctps_recvdupdata);
1294 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1295 			/* Record a dup for the next outbound sack */
1296 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1297 			asoc->numduptsns++;
1298 		}
1299 		asoc->send_sack = 1;
1300 		return (0);
1301 	}
1302 	/* Calculate the number of TSN's between the base and this TSN */
1303 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1304 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1305 		/* Can't hold the bit in the mapping at max array, toss it */
1306 		return (0);
1307 	}
1308 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1309 		SCTP_TCB_LOCK_ASSERT(stcb);
1310 		if (sctp_expand_mapping_array(asoc, gap)) {
1311 			/* Can't expand, drop it */
1312 			return (0);
1313 		}
1314 	}
1315 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1316 		*high_tsn = tsn;
1317 	}
1318 	/* See if we have received this one already */
1319 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1320 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1321 		SCTP_STAT_INCR(sctps_recvdupdata);
1322 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323 			/* Record a dup for the next outbound sack */
1324 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1325 			asoc->numduptsns++;
1326 		}
1327 		asoc->send_sack = 1;
1328 		return (0);
1329 	}
1330 	/*
1331 	 * Check to see about the GONE flag, duplicates would cause a sack
1332 	 * to be sent up above
1333 	 */
1334 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1335 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1336 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1337 		/*
1338 		 * wait a minute, this guy is gone, there is no longer a
1339 		 * receiver. Send peer an ABORT!
1340 		 */
1341 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1342 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1343 		*abort_flag = 1;
1344 		return (0);
1345 	}
1346 	/*
1347 	 * Now before going further we see if there is room. If NOT then we
1348 	 * MAY let one through only IF this TSN is the one we are waiting
1349 	 * for on a partial delivery API.
1350 	 */
1351 
1352 	/* now do the tests */
1353 	if (((asoc->cnt_on_all_streams +
1354 	    asoc->cnt_on_reasm_queue +
1355 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1356 	    (((int)asoc->my_rwnd) <= 0)) {
1357 		/*
1358 		 * When we have NO room in the rwnd we check to make sure
1359 		 * the reader is doing its job...
1360 		 */
1361 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1362 			/* some to read, wake-up */
1363 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1364 			struct socket *so;
1365 
1366 			so = SCTP_INP_SO(stcb->sctp_ep);
1367 			atomic_add_int(&stcb->asoc.refcnt, 1);
1368 			SCTP_TCB_UNLOCK(stcb);
1369 			SCTP_SOCKET_LOCK(so, 1);
1370 			SCTP_TCB_LOCK(stcb);
1371 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1372 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1373 				/* assoc was freed while we were unlocked */
1374 				SCTP_SOCKET_UNLOCK(so, 1);
1375 				return (0);
1376 			}
1377 #endif
1378 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1379 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1380 			SCTP_SOCKET_UNLOCK(so, 1);
1381 #endif
1382 		}
1383 		/* now is it in the mapping array of what we have accepted? */
1384 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1385 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1386 			/* Nope not in the valid range dump it */
1387 			sctp_set_rwnd(stcb, asoc);
1388 			if ((asoc->cnt_on_all_streams +
1389 			    asoc->cnt_on_reasm_queue +
1390 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1391 				SCTP_STAT_INCR(sctps_datadropchklmt);
1392 			} else {
1393 				SCTP_STAT_INCR(sctps_datadroprwnd);
1394 			}
1395 			*break_flag = 1;
1396 			return (0);
1397 		}
1398 	}
1399 	strmno = ntohs(ch->dp.stream_id);
1400 	if (strmno >= asoc->streamincnt) {
1401 		struct sctp_paramhdr *phdr;
1402 		struct mbuf *mb;
1403 
1404 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1405 		    0, M_NOWAIT, 1, MT_DATA);
1406 		if (mb != NULL) {
1407 			/* add some space up front so prepend will work well */
1408 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1409 			phdr = mtod(mb, struct sctp_paramhdr *);
1410 			/*
1411 			 * Error causes are just param's and this one has
1412 			 * two back to back phdr, one with the error type
1413 			 * and size, the other with the streamid and a rsvd
1414 			 */
1415 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1416 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1417 			phdr->param_length =
1418 			    htons(sizeof(struct sctp_paramhdr) * 2);
1419 			phdr++;
1420 			/* We insert the stream in the type field */
1421 			phdr->param_type = ch->dp.stream_id;
1422 			/* And set the length to 0 for the rsvd field */
1423 			phdr->param_length = 0;
1424 			sctp_queue_op_err(stcb, mb);
1425 		}
1426 		SCTP_STAT_INCR(sctps_badsid);
1427 		SCTP_TCB_LOCK_ASSERT(stcb);
1428 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1429 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1430 			asoc->highest_tsn_inside_nr_map = tsn;
1431 		}
1432 		if (tsn == (asoc->cumulative_tsn + 1)) {
1433 			/* Update cum-ack */
1434 			asoc->cumulative_tsn = tsn;
1435 		}
1436 		return (0);
1437 	}
1438 	/*
1439 	 * Before we continue lets validate that we are not being fooled by
1440 	 * an evil attacker. We can only have 4k chunks based on our TSN
1441 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1442 	 * way our stream sequence numbers could have wrapped. We of course
1443 	 * only validate the FIRST fragment so the bit must be set.
1444 	 */
1445 	strmseq = ntohs(ch->dp.stream_sequence);
1446 #ifdef SCTP_ASOCLOG_OF_TSNS
1447 	SCTP_TCB_LOCK_ASSERT(stcb);
1448 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1449 		asoc->tsn_in_at = 0;
1450 		asoc->tsn_in_wrapped = 1;
1451 	}
1452 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1453 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1454 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1455 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1456 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1457 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1458 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1459 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1460 	asoc->tsn_in_at++;
1461 #endif
1462 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1463 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1464 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1465 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1466 		/* The incoming sseq is behind where we last delivered? */
1467 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1468 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1469 
1470 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1471 		    asoc->strmin[strmno].last_sequence_delivered,
1472 		    tsn, strmno, strmseq);
1473 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1474 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1475 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1476 		*abort_flag = 1;
1477 		return (0);
1478 	}
1479 	/************************************
1480 	 * From here down we may find ch-> invalid
1481 	 * so its a good idea NOT to use it.
1482 	 *************************************/
1483 
1484 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1485 	if (last_chunk == 0) {
1486 		dmbuf = SCTP_M_COPYM(*m,
1487 		    (offset + sizeof(struct sctp_data_chunk)),
1488 		    the_len, M_NOWAIT);
1489 #ifdef SCTP_MBUF_LOGGING
1490 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1491 			struct mbuf *mat;
1492 
1493 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1494 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1495 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1496 				}
1497 			}
1498 		}
1499 #endif
1500 	} else {
1501 		/* We can steal the last chunk */
1502 		int l_len;
1503 
1504 		dmbuf = *m;
1505 		/* lop off the top part */
1506 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1507 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1508 			l_len = SCTP_BUF_LEN(dmbuf);
1509 		} else {
1510 			/*
1511 			 * need to count up the size hopefully does not hit
1512 			 * this to often :-0
1513 			 */
1514 			struct mbuf *lat;
1515 
1516 			l_len = 0;
1517 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1518 				l_len += SCTP_BUF_LEN(lat);
1519 			}
1520 		}
1521 		if (l_len > the_len) {
1522 			/* Trim the end round bytes off  too */
1523 			m_adj(dmbuf, -(l_len - the_len));
1524 		}
1525 	}
1526 	if (dmbuf == NULL) {
1527 		SCTP_STAT_INCR(sctps_nomem);
1528 		return (0);
1529 	}
1530 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1531 	    asoc->fragmented_delivery_inprogress == 0 &&
1532 	    TAILQ_EMPTY(&asoc->resetHead) &&
1533 	    ((ordered == 0) ||
1534 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1535 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1536 		/* Candidate for express delivery */
1537 		/*
1538 		 * Its not fragmented, No PD-API is up, Nothing in the
1539 		 * delivery queue, Its un-ordered OR ordered and the next to
1540 		 * deliver AND nothing else is stuck on the stream queue,
1541 		 * And there is room for it in the socket buffer. Lets just
1542 		 * stuff it up the buffer....
1543 		 */
1544 
1545 		/* It would be nice to avoid this copy if we could :< */
1546 		sctp_alloc_a_readq(stcb, control);
1547 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1548 		    protocol_id,
1549 		    strmno, strmseq,
1550 		    chunk_flags,
1551 		    dmbuf);
1552 		if (control == NULL) {
1553 			goto failed_express_del;
1554 		}
1555 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1556 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1557 			asoc->highest_tsn_inside_nr_map = tsn;
1558 		}
1559 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1560 		    control, &stcb->sctp_socket->so_rcv,
1561 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1562 
1563 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1564 			/* for ordered, bump what we delivered */
1565 			asoc->strmin[strmno].last_sequence_delivered++;
1566 		}
1567 		SCTP_STAT_INCR(sctps_recvexpress);
1568 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1569 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1570 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1571 		}
1572 		control = NULL;
1573 
1574 		goto finish_express_del;
1575 	}
1576 failed_express_del:
1577 	/* If we reach here this is a new chunk */
1578 	chk = NULL;
1579 	control = NULL;
1580 	/* Express for fragmented delivery? */
1581 	if ((asoc->fragmented_delivery_inprogress) &&
1582 	    (stcb->asoc.control_pdapi) &&
1583 	    (asoc->str_of_pdapi == strmno) &&
1584 	    (asoc->ssn_of_pdapi == strmseq)
1585 	    ) {
1586 		control = stcb->asoc.control_pdapi;
1587 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1588 			/* Can't be another first? */
1589 			goto failed_pdapi_express_del;
1590 		}
1591 		if (tsn == (control->sinfo_tsn + 1)) {
1592 			/* Yep, we can add it on */
1593 			int end = 0;
1594 
1595 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1596 				end = 1;
1597 			}
1598 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1599 			    tsn,
1600 			    &stcb->sctp_socket->so_rcv)) {
1601 				SCTP_PRINTF("Append fails end:%d\n", end);
1602 				goto failed_pdapi_express_del;
1603 			}
1604 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1605 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1606 				asoc->highest_tsn_inside_nr_map = tsn;
1607 			}
1608 			SCTP_STAT_INCR(sctps_recvexpressm);
1609 			asoc->tsn_last_delivered = tsn;
1610 			asoc->fragment_flags = chunk_flags;
1611 			asoc->tsn_of_pdapi_last_delivered = tsn;
1612 			asoc->last_flags_delivered = chunk_flags;
1613 			asoc->last_strm_seq_delivered = strmseq;
1614 			asoc->last_strm_no_delivered = strmno;
1615 			if (end) {
1616 				/* clean up the flags and such */
1617 				asoc->fragmented_delivery_inprogress = 0;
1618 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1619 					asoc->strmin[strmno].last_sequence_delivered++;
1620 				}
1621 				stcb->asoc.control_pdapi = NULL;
1622 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1623 					/*
1624 					 * There could be another message
1625 					 * ready
1626 					 */
1627 					need_reasm_check = 1;
1628 				}
1629 			}
1630 			control = NULL;
1631 			goto finish_express_del;
1632 		}
1633 	}
1634 failed_pdapi_express_del:
1635 	control = NULL;
1636 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1637 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1638 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1639 			asoc->highest_tsn_inside_nr_map = tsn;
1640 		}
1641 	} else {
1642 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1643 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1644 			asoc->highest_tsn_inside_map = tsn;
1645 		}
1646 	}
1647 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1648 		sctp_alloc_a_chunk(stcb, chk);
1649 		if (chk == NULL) {
1650 			/* No memory so we drop the chunk */
1651 			SCTP_STAT_INCR(sctps_nomem);
1652 			if (last_chunk == 0) {
1653 				/* we copied it, free the copy */
1654 				sctp_m_freem(dmbuf);
1655 			}
1656 			return (0);
1657 		}
1658 		chk->rec.data.TSN_seq = tsn;
1659 		chk->no_fr_allowed = 0;
1660 		chk->rec.data.stream_seq = strmseq;
1661 		chk->rec.data.stream_number = strmno;
1662 		chk->rec.data.payloadtype = protocol_id;
1663 		chk->rec.data.context = stcb->asoc.context;
1664 		chk->rec.data.doing_fast_retransmit = 0;
1665 		chk->rec.data.rcv_flags = chunk_flags;
1666 		chk->asoc = asoc;
1667 		chk->send_size = the_len;
1668 		chk->whoTo = net;
1669 		atomic_add_int(&net->ref_count, 1);
1670 		chk->data = dmbuf;
1671 	} else {
1672 		sctp_alloc_a_readq(stcb, control);
1673 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1674 		    protocol_id,
1675 		    strmno, strmseq,
1676 		    chunk_flags,
1677 		    dmbuf);
1678 		if (control == NULL) {
1679 			/* No memory so we drop the chunk */
1680 			SCTP_STAT_INCR(sctps_nomem);
1681 			if (last_chunk == 0) {
1682 				/* we copied it, free the copy */
1683 				sctp_m_freem(dmbuf);
1684 			}
1685 			return (0);
1686 		}
1687 		control->length = the_len;
1688 	}
1689 
1690 	/* Mark it as received */
1691 	/* Now queue it where it belongs */
1692 	if (control != NULL) {
1693 		/* First a sanity check */
1694 		if (asoc->fragmented_delivery_inprogress) {
1695 			/*
1696 			 * Ok, we have a fragmented delivery in progress if
1697 			 * this chunk is next to deliver OR belongs in our
1698 			 * view to the reassembly, the peer is evil or
1699 			 * broken.
1700 			 */
1701 			uint32_t estimate_tsn;
1702 
1703 			estimate_tsn = asoc->tsn_last_delivered + 1;
1704 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1705 			    (estimate_tsn == control->sinfo_tsn)) {
1706 				/* Evil/Broke peer */
1707 				sctp_m_freem(control->data);
1708 				control->data = NULL;
1709 				if (control->whoFrom) {
1710 					sctp_free_remote_addr(control->whoFrom);
1711 					control->whoFrom = NULL;
1712 				}
1713 				sctp_free_a_readq(stcb, control);
1714 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1715 				    tsn, strmno, strmseq);
1716 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1717 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1718 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1719 				*abort_flag = 1;
1720 				if (last_chunk) {
1721 					*m = NULL;
1722 				}
1723 				return (0);
1724 			} else {
1725 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1726 					sctp_m_freem(control->data);
1727 					control->data = NULL;
1728 					if (control->whoFrom) {
1729 						sctp_free_remote_addr(control->whoFrom);
1730 						control->whoFrom = NULL;
1731 					}
1732 					sctp_free_a_readq(stcb, control);
1733 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1734 					    tsn, strmno, strmseq);
1735 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1736 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1737 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1738 					*abort_flag = 1;
1739 					if (last_chunk) {
1740 						*m = NULL;
1741 					}
1742 					return (0);
1743 				}
1744 			}
1745 		} else {
1746 			/* No PDAPI running */
1747 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1748 				/*
1749 				 * Reassembly queue is NOT empty validate
1750 				 * that this tsn does not need to be in
1751 				 * reasembly queue. If it does then our peer
1752 				 * is broken or evil.
1753 				 */
1754 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1755 					sctp_m_freem(control->data);
1756 					control->data = NULL;
1757 					if (control->whoFrom) {
1758 						sctp_free_remote_addr(control->whoFrom);
1759 						control->whoFrom = NULL;
1760 					}
1761 					sctp_free_a_readq(stcb, control);
1762 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1763 					    tsn, strmno, strmseq);
1764 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1765 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1766 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1767 					*abort_flag = 1;
1768 					if (last_chunk) {
1769 						*m = NULL;
1770 					}
1771 					return (0);
1772 				}
1773 			}
1774 		}
1775 		/* ok, if we reach here we have passed the sanity checks */
1776 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1777 			/* queue directly into socket buffer */
1778 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1779 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1780 			    control,
1781 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1782 		} else {
1783 			/*
1784 			 * Special check for when streams are resetting. We
1785 			 * could be more smart about this and check the
1786 			 * actual stream to see if it is not being reset..
1787 			 * that way we would not create a HOLB when amongst
1788 			 * streams being reset and those not being reset.
1789 			 *
1790 			 * We take complete messages that have a stream reset
1791 			 * intervening (aka the TSN is after where our
1792 			 * cum-ack needs to be) off and put them on a
1793 			 * pending_reply_queue. The reassembly ones we do
1794 			 * not have to worry about since they are all sorted
1795 			 * and proceessed by TSN order. It is only the
1796 			 * singletons I must worry about.
1797 			 */
1798 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1799 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1800 				/*
1801 				 * yep its past where we need to reset... go
1802 				 * ahead and queue it.
1803 				 */
1804 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1805 					/* first one on */
1806 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1807 				} else {
1808 					struct sctp_queued_to_read *ctlOn,
1809 					                   *nctlOn;
1810 					unsigned char inserted = 0;
1811 
1812 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1813 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1814 							continue;
1815 						} else {
1816 							/* found it */
1817 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1818 							inserted = 1;
1819 							break;
1820 						}
1821 					}
1822 					if (inserted == 0) {
1823 						/*
1824 						 * must be put at end, use
1825 						 * prevP (all setup from
1826 						 * loop) to setup nextP.
1827 						 */
1828 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1829 					}
1830 				}
1831 			} else {
1832 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1833 				if (*abort_flag) {
1834 					if (last_chunk) {
1835 						*m = NULL;
1836 					}
1837 					return (0);
1838 				}
1839 			}
1840 		}
1841 	} else {
1842 		/* Into the re-assembly queue */
1843 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1844 		if (*abort_flag) {
1845 			/*
1846 			 * the assoc is now gone and chk was put onto the
1847 			 * reasm queue, which has all been freed.
1848 			 */
1849 			if (last_chunk) {
1850 				*m = NULL;
1851 			}
1852 			return (0);
1853 		}
1854 	}
1855 finish_express_del:
1856 	if (tsn == (asoc->cumulative_tsn + 1)) {
1857 		/* Update cum-ack */
1858 		asoc->cumulative_tsn = tsn;
1859 	}
1860 	if (last_chunk) {
1861 		*m = NULL;
1862 	}
1863 	if (ordered) {
1864 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1865 	} else {
1866 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1867 	}
1868 	SCTP_STAT_INCR(sctps_recvdata);
1869 	/* Set it present please */
1870 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1871 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1872 	}
1873 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1874 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1875 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1876 	}
1877 	/* check the special flag for stream resets */
1878 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1879 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1880 		/*
1881 		 * we have finished working through the backlogged TSN's now
1882 		 * time to reset streams. 1: call reset function. 2: free
1883 		 * pending_reply space 3: distribute any chunks in
1884 		 * pending_reply_queue.
1885 		 */
1886 		struct sctp_queued_to_read *ctl, *nctl;
1887 
1888 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1889 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1890 		SCTP_FREE(liste, SCTP_M_STRESET);
1891 		/* sa_ignore FREED_MEMORY */
1892 		liste = TAILQ_FIRST(&asoc->resetHead);
1893 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1894 			/* All can be removed */
1895 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1896 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1897 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1898 				if (*abort_flag) {
1899 					return (0);
1900 				}
1901 			}
1902 		} else {
1903 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1904 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1905 					break;
1906 				}
1907 				/*
1908 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1909 				 * process it which is the NOT of
1910 				 * ctl->sinfo_tsn > liste->tsn
1911 				 */
1912 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1913 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1914 				if (*abort_flag) {
1915 					return (0);
1916 				}
1917 			}
1918 		}
1919 		/*
1920 		 * Now service re-assembly to pick up anything that has been
1921 		 * held on reassembly queue?
1922 		 */
1923 		sctp_deliver_reasm_check(stcb, asoc);
1924 		need_reasm_check = 0;
1925 	}
1926 	if (need_reasm_check) {
1927 		/* Another one waits ? */
1928 		sctp_deliver_reasm_check(stcb, asoc);
1929 	}
1930 	return (1);
1931 }
1932 
1933 int8_t sctp_map_lookup_tab[256] = {
1934 	0, 1, 0, 2, 0, 1, 0, 3,
1935 	0, 1, 0, 2, 0, 1, 0, 4,
1936 	0, 1, 0, 2, 0, 1, 0, 3,
1937 	0, 1, 0, 2, 0, 1, 0, 5,
1938 	0, 1, 0, 2, 0, 1, 0, 3,
1939 	0, 1, 0, 2, 0, 1, 0, 4,
1940 	0, 1, 0, 2, 0, 1, 0, 3,
1941 	0, 1, 0, 2, 0, 1, 0, 6,
1942 	0, 1, 0, 2, 0, 1, 0, 3,
1943 	0, 1, 0, 2, 0, 1, 0, 4,
1944 	0, 1, 0, 2, 0, 1, 0, 3,
1945 	0, 1, 0, 2, 0, 1, 0, 5,
1946 	0, 1, 0, 2, 0, 1, 0, 3,
1947 	0, 1, 0, 2, 0, 1, 0, 4,
1948 	0, 1, 0, 2, 0, 1, 0, 3,
1949 	0, 1, 0, 2, 0, 1, 0, 7,
1950 	0, 1, 0, 2, 0, 1, 0, 3,
1951 	0, 1, 0, 2, 0, 1, 0, 4,
1952 	0, 1, 0, 2, 0, 1, 0, 3,
1953 	0, 1, 0, 2, 0, 1, 0, 5,
1954 	0, 1, 0, 2, 0, 1, 0, 3,
1955 	0, 1, 0, 2, 0, 1, 0, 4,
1956 	0, 1, 0, 2, 0, 1, 0, 3,
1957 	0, 1, 0, 2, 0, 1, 0, 6,
1958 	0, 1, 0, 2, 0, 1, 0, 3,
1959 	0, 1, 0, 2, 0, 1, 0, 4,
1960 	0, 1, 0, 2, 0, 1, 0, 3,
1961 	0, 1, 0, 2, 0, 1, 0, 5,
1962 	0, 1, 0, 2, 0, 1, 0, 3,
1963 	0, 1, 0, 2, 0, 1, 0, 4,
1964 	0, 1, 0, 2, 0, 1, 0, 3,
1965 	0, 1, 0, 2, 0, 1, 0, 8
1966 };
1967 
1968 
1969 void
1970 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1971 {
1972 	/*
1973 	 * Now we also need to check the mapping array in a couple of ways.
1974 	 * 1) Did we move the cum-ack point?
1975 	 *
1976 	 * When you first glance at this you might think that all entries that
1977 	 * make up the postion of the cum-ack would be in the nr-mapping
1978 	 * array only.. i.e. things up to the cum-ack are always
1979 	 * deliverable. Thats true with one exception, when its a fragmented
1980 	 * message we may not deliver the data until some threshold (or all
1981 	 * of it) is in place. So we must OR the nr_mapping_array and
1982 	 * mapping_array to get a true picture of the cum-ack.
1983 	 */
1984 	struct sctp_association *asoc;
1985 	int at;
1986 	uint8_t val;
1987 	int slide_from, slide_end, lgap, distance;
1988 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1989 
1990 	asoc = &stcb->asoc;
1991 
1992 	old_cumack = asoc->cumulative_tsn;
1993 	old_base = asoc->mapping_array_base_tsn;
1994 	old_highest = asoc->highest_tsn_inside_map;
1995 	/*
1996 	 * We could probably improve this a small bit by calculating the
1997 	 * offset of the current cum-ack as the starting point.
1998 	 */
1999 	at = 0;
2000 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2001 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2002 		if (val == 0xff) {
2003 			at += 8;
2004 		} else {
2005 			/* there is a 0 bit */
2006 			at += sctp_map_lookup_tab[val];
2007 			break;
2008 		}
2009 	}
2010 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2011 
2012 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2013 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2014 #ifdef INVARIANTS
2015 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2016 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2017 #else
2018 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2019 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2020 		sctp_print_mapping_array(asoc);
2021 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2022 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2023 		}
2024 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2025 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2026 #endif
2027 	}
2028 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2029 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2030 	} else {
2031 		highest_tsn = asoc->highest_tsn_inside_map;
2032 	}
2033 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2034 		/* The complete array was completed by a single FR */
2035 		/* highest becomes the cum-ack */
2036 		int clr;
2037 
2038 #ifdef INVARIANTS
2039 		unsigned int i;
2040 
2041 #endif
2042 
2043 		/* clear the array */
2044 		clr = ((at + 7) >> 3);
2045 		if (clr > asoc->mapping_array_size) {
2046 			clr = asoc->mapping_array_size;
2047 		}
2048 		memset(asoc->mapping_array, 0, clr);
2049 		memset(asoc->nr_mapping_array, 0, clr);
2050 #ifdef INVARIANTS
2051 		for (i = 0; i < asoc->mapping_array_size; i++) {
2052 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2053 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2054 				sctp_print_mapping_array(asoc);
2055 			}
2056 		}
2057 #endif
2058 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2059 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2060 	} else if (at >= 8) {
2061 		/* we can slide the mapping array down */
2062 		/* slide_from holds where we hit the first NON 0xff byte */
2063 
2064 		/*
2065 		 * now calculate the ceiling of the move using our highest
2066 		 * TSN value
2067 		 */
2068 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2069 		slide_end = (lgap >> 3);
2070 		if (slide_end < slide_from) {
2071 			sctp_print_mapping_array(asoc);
2072 #ifdef INVARIANTS
2073 			panic("impossible slide");
2074 #else
2075 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2076 			    lgap, slide_end, slide_from, at);
2077 			return;
2078 #endif
2079 		}
2080 		if (slide_end > asoc->mapping_array_size) {
2081 #ifdef INVARIANTS
2082 			panic("would overrun buffer");
2083 #else
2084 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2085 			    asoc->mapping_array_size, slide_end);
2086 			slide_end = asoc->mapping_array_size;
2087 #endif
2088 		}
2089 		distance = (slide_end - slide_from) + 1;
2090 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2091 			sctp_log_map(old_base, old_cumack, old_highest,
2092 			    SCTP_MAP_PREPARE_SLIDE);
2093 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2094 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2095 		}
2096 		if (distance + slide_from > asoc->mapping_array_size ||
2097 		    distance < 0) {
2098 			/*
2099 			 * Here we do NOT slide forward the array so that
2100 			 * hopefully when more data comes in to fill it up
2101 			 * we will be able to slide it forward. Really I
2102 			 * don't think this should happen :-0
2103 			 */
2104 
2105 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2106 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2107 				    (uint32_t) asoc->mapping_array_size,
2108 				    SCTP_MAP_SLIDE_NONE);
2109 			}
2110 		} else {
2111 			int ii;
2112 
2113 			for (ii = 0; ii < distance; ii++) {
2114 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2115 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2116 
2117 			}
2118 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2119 				asoc->mapping_array[ii] = 0;
2120 				asoc->nr_mapping_array[ii] = 0;
2121 			}
2122 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2123 				asoc->highest_tsn_inside_map += (slide_from << 3);
2124 			}
2125 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2126 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2127 			}
2128 			asoc->mapping_array_base_tsn += (slide_from << 3);
2129 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2130 				sctp_log_map(asoc->mapping_array_base_tsn,
2131 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2132 				    SCTP_MAP_SLIDE_RESULT);
2133 			}
2134 		}
2135 	}
2136 }
2137 
2138 void
2139 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2140 {
2141 	struct sctp_association *asoc;
2142 	uint32_t highest_tsn;
2143 
2144 	asoc = &stcb->asoc;
2145 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2146 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2147 	} else {
2148 		highest_tsn = asoc->highest_tsn_inside_map;
2149 	}
2150 
2151 	/*
2152 	 * Now we need to see if we need to queue a sack or just start the
2153 	 * timer (if allowed).
2154 	 */
2155 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2156 		/*
2157 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2158 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2159 		 * SACK
2160 		 */
2161 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2162 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2163 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2164 		}
2165 		sctp_send_shutdown(stcb,
2166 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2167 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2168 	} else {
2169 		int is_a_gap;
2170 
2171 		/* is there a gap now ? */
2172 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2173 
2174 		/*
2175 		 * CMT DAC algorithm: increase number of packets received
2176 		 * since last ack
2177 		 */
2178 		stcb->asoc.cmt_dac_pkts_rcvd++;
2179 
2180 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2181 							 * SACK */
2182 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2183 							 * longer is one */
2184 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2185 		    (is_a_gap) ||	/* is still a gap */
2186 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2187 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2188 		    ) {
2189 
2190 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2191 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2192 			    (stcb->asoc.send_sack == 0) &&
2193 			    (stcb->asoc.numduptsns == 0) &&
2194 			    (stcb->asoc.delayed_ack) &&
2195 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2196 
2197 				/*
2198 				 * CMT DAC algorithm: With CMT, delay acks
2199 				 * even in the face of
2200 				 *
2201 				 * reordering. Therefore, if acks that do not
2202 				 * have to be sent because of the above
2203 				 * reasons, will be delayed. That is, acks
2204 				 * that would have been sent due to gap
2205 				 * reports will be delayed with DAC. Start
2206 				 * the delayed ack timer.
2207 				 */
2208 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2209 				    stcb->sctp_ep, stcb, NULL);
2210 			} else {
2211 				/*
2212 				 * Ok we must build a SACK since the timer
2213 				 * is pending, we got our first packet OR
2214 				 * there are gaps or duplicates.
2215 				 */
2216 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2217 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2218 			}
2219 		} else {
2220 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2221 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2222 				    stcb->sctp_ep, stcb, NULL);
2223 			}
2224 		}
2225 	}
2226 }
2227 
2228 void
2229 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2230 {
2231 	struct sctp_tmit_chunk *chk;
2232 	uint32_t tsize, pd_point;
2233 	uint16_t nxt_todel;
2234 
2235 	if (asoc->fragmented_delivery_inprogress) {
2236 		sctp_service_reassembly(stcb, asoc);
2237 	}
2238 	/* Can we proceed further, i.e. the PD-API is complete */
2239 	if (asoc->fragmented_delivery_inprogress) {
2240 		/* no */
2241 		return;
2242 	}
2243 	/*
2244 	 * Now is there some other chunk I can deliver from the reassembly
2245 	 * queue.
2246 	 */
2247 doit_again:
2248 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2249 	if (chk == NULL) {
2250 		asoc->size_on_reasm_queue = 0;
2251 		asoc->cnt_on_reasm_queue = 0;
2252 		return;
2253 	}
2254 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2255 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2256 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2257 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2258 		/*
2259 		 * Yep the first one is here. We setup to start reception,
2260 		 * by backing down the TSN just in case we can't deliver.
2261 		 */
2262 
2263 		/*
2264 		 * Before we start though either all of the message should
2265 		 * be here or the socket buffer max or nothing on the
2266 		 * delivery queue and something can be delivered.
2267 		 */
2268 		if (stcb->sctp_socket) {
2269 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2270 			    stcb->sctp_ep->partial_delivery_point);
2271 		} else {
2272 			pd_point = stcb->sctp_ep->partial_delivery_point;
2273 		}
2274 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2275 			asoc->fragmented_delivery_inprogress = 1;
2276 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2277 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2278 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2279 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2280 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2281 			sctp_service_reassembly(stcb, asoc);
2282 			if (asoc->fragmented_delivery_inprogress == 0) {
2283 				goto doit_again;
2284 			}
2285 		}
2286 	}
2287 }
2288 
2289 int
2290 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2291     struct sockaddr *src, struct sockaddr *dst,
2292     struct sctphdr *sh, struct sctp_inpcb *inp,
2293     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2294     uint8_t use_mflowid, uint32_t mflowid,
2295     uint32_t vrf_id, uint16_t port)
2296 {
2297 	struct sctp_data_chunk *ch, chunk_buf;
2298 	struct sctp_association *asoc;
2299 	int num_chunks = 0;	/* number of control chunks processed */
2300 	int stop_proc = 0;
2301 	int chk_length, break_flag, last_chunk;
2302 	int abort_flag = 0, was_a_gap;
2303 	struct mbuf *m;
2304 	uint32_t highest_tsn;
2305 
2306 	/* set the rwnd */
2307 	sctp_set_rwnd(stcb, &stcb->asoc);
2308 
2309 	m = *mm;
2310 	SCTP_TCB_LOCK_ASSERT(stcb);
2311 	asoc = &stcb->asoc;
2312 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2313 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2314 	} else {
2315 		highest_tsn = asoc->highest_tsn_inside_map;
2316 	}
2317 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2318 	/*
2319 	 * setup where we got the last DATA packet from for any SACK that
2320 	 * may need to go out. Don't bump the net. This is done ONLY when a
2321 	 * chunk is assigned.
2322 	 */
2323 	asoc->last_data_chunk_from = net;
2324 
2325 	/*-
2326 	 * Now before we proceed we must figure out if this is a wasted
2327 	 * cluster... i.e. it is a small packet sent in and yet the driver
2328 	 * underneath allocated a full cluster for it. If so we must copy it
2329 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2330 	 * with cluster starvation. Note for __Panda__ we don't do this
2331 	 * since it has clusters all the way down to 64 bytes.
2332 	 */
2333 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2334 		/* we only handle mbufs that are singletons.. not chains */
2335 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2336 		if (m) {
2337 			/* ok lets see if we can copy the data up */
2338 			caddr_t *from, *to;
2339 
2340 			/* get the pointers and copy */
2341 			to = mtod(m, caddr_t *);
2342 			from = mtod((*mm), caddr_t *);
2343 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2344 			/* copy the length and free up the old */
2345 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2346 			sctp_m_freem(*mm);
2347 			/* sucess, back copy */
2348 			*mm = m;
2349 		} else {
2350 			/* We are in trouble in the mbuf world .. yikes */
2351 			m = *mm;
2352 		}
2353 	}
2354 	/* get pointer to the first chunk header */
2355 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2356 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2357 	if (ch == NULL) {
2358 		return (1);
2359 	}
2360 	/*
2361 	 * process all DATA chunks...
2362 	 */
2363 	*high_tsn = asoc->cumulative_tsn;
2364 	break_flag = 0;
2365 	asoc->data_pkts_seen++;
2366 	while (stop_proc == 0) {
2367 		/* validate chunk length */
2368 		chk_length = ntohs(ch->ch.chunk_length);
2369 		if (length - *offset < chk_length) {
2370 			/* all done, mutulated chunk */
2371 			stop_proc = 1;
2372 			continue;
2373 		}
2374 		if (ch->ch.chunk_type == SCTP_DATA) {
2375 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2376 				/*
2377 				 * Need to send an abort since we had a
2378 				 * invalid data chunk.
2379 				 */
2380 				struct mbuf *op_err;
2381 				char msg[SCTP_DIAG_INFO_LEN];
2382 
2383 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2384 				    chk_length);
2385 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2386 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2387 				sctp_abort_association(inp, stcb, m, iphlen,
2388 				    src, dst, sh, op_err,
2389 				    use_mflowid, mflowid,
2390 				    vrf_id, port);
2391 				return (2);
2392 			}
2393 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2394 				/*
2395 				 * Need to send an abort since we had an
2396 				 * empty data chunk.
2397 				 */
2398 				struct mbuf *op_err;
2399 
2400 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2401 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2402 				sctp_abort_association(inp, stcb, m, iphlen,
2403 				    src, dst, sh, op_err,
2404 				    use_mflowid, mflowid,
2405 				    vrf_id, port);
2406 				return (2);
2407 			}
2408 #ifdef SCTP_AUDITING_ENABLED
2409 			sctp_audit_log(0xB1, 0);
2410 #endif
2411 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2412 				last_chunk = 1;
2413 			} else {
2414 				last_chunk = 0;
2415 			}
2416 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2417 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2418 			    last_chunk)) {
2419 				num_chunks++;
2420 			}
2421 			if (abort_flag)
2422 				return (2);
2423 
2424 			if (break_flag) {
2425 				/*
2426 				 * Set because of out of rwnd space and no
2427 				 * drop rep space left.
2428 				 */
2429 				stop_proc = 1;
2430 				continue;
2431 			}
2432 		} else {
2433 			/* not a data chunk in the data region */
2434 			switch (ch->ch.chunk_type) {
2435 			case SCTP_INITIATION:
2436 			case SCTP_INITIATION_ACK:
2437 			case SCTP_SELECTIVE_ACK:
2438 			case SCTP_NR_SELECTIVE_ACK:
2439 			case SCTP_HEARTBEAT_REQUEST:
2440 			case SCTP_HEARTBEAT_ACK:
2441 			case SCTP_ABORT_ASSOCIATION:
2442 			case SCTP_SHUTDOWN:
2443 			case SCTP_SHUTDOWN_ACK:
2444 			case SCTP_OPERATION_ERROR:
2445 			case SCTP_COOKIE_ECHO:
2446 			case SCTP_COOKIE_ACK:
2447 			case SCTP_ECN_ECHO:
2448 			case SCTP_ECN_CWR:
2449 			case SCTP_SHUTDOWN_COMPLETE:
2450 			case SCTP_AUTHENTICATION:
2451 			case SCTP_ASCONF_ACK:
2452 			case SCTP_PACKET_DROPPED:
2453 			case SCTP_STREAM_RESET:
2454 			case SCTP_FORWARD_CUM_TSN:
2455 			case SCTP_ASCONF:
2456 				/*
2457 				 * Now, what do we do with KNOWN chunks that
2458 				 * are NOT in the right place?
2459 				 *
2460 				 * For now, I do nothing but ignore them. We
2461 				 * may later want to add sysctl stuff to
2462 				 * switch out and do either an ABORT() or
2463 				 * possibly process them.
2464 				 */
2465 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2466 					struct mbuf *op_err;
2467 
2468 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2469 					sctp_abort_association(inp, stcb,
2470 					    m, iphlen,
2471 					    src, dst,
2472 					    sh, op_err,
2473 					    use_mflowid, mflowid,
2474 					    vrf_id, port);
2475 					return (2);
2476 				}
2477 				break;
2478 			default:
2479 				/* unknown chunk type, use bit rules */
2480 				if (ch->ch.chunk_type & 0x40) {
2481 					/* Add a error report to the queue */
2482 					struct mbuf *merr;
2483 					struct sctp_paramhdr *phd;
2484 
2485 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2486 					if (merr) {
2487 						phd = mtod(merr, struct sctp_paramhdr *);
2488 						/*
2489 						 * We cheat and use param
2490 						 * type since we did not
2491 						 * bother to define a error
2492 						 * cause struct. They are
2493 						 * the same basic format
2494 						 * with different names.
2495 						 */
2496 						phd->param_type =
2497 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2498 						phd->param_length =
2499 						    htons(chk_length + sizeof(*phd));
2500 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2501 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2502 						if (SCTP_BUF_NEXT(merr)) {
2503 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2504 								sctp_m_freem(merr);
2505 							} else {
2506 								sctp_queue_op_err(stcb, merr);
2507 							}
2508 						} else {
2509 							sctp_m_freem(merr);
2510 						}
2511 					}
2512 				}
2513 				if ((ch->ch.chunk_type & 0x80) == 0) {
2514 					/* discard the rest of this packet */
2515 					stop_proc = 1;
2516 				}	/* else skip this bad chunk and
2517 					 * continue... */
2518 				break;
2519 			}	/* switch of chunk type */
2520 		}
2521 		*offset += SCTP_SIZE32(chk_length);
2522 		if ((*offset >= length) || stop_proc) {
2523 			/* no more data left in the mbuf chain */
2524 			stop_proc = 1;
2525 			continue;
2526 		}
2527 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2528 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2529 		if (ch == NULL) {
2530 			*offset = length;
2531 			stop_proc = 1;
2532 			continue;
2533 		}
2534 	}
2535 	if (break_flag) {
2536 		/*
2537 		 * we need to report rwnd overrun drops.
2538 		 */
2539 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2540 	}
2541 	if (num_chunks) {
2542 		/*
2543 		 * Did we get data, if so update the time for auto-close and
2544 		 * give peer credit for being alive.
2545 		 */
2546 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2547 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2548 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2549 			    stcb->asoc.overall_error_count,
2550 			    0,
2551 			    SCTP_FROM_SCTP_INDATA,
2552 			    __LINE__);
2553 		}
2554 		stcb->asoc.overall_error_count = 0;
2555 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2556 	}
2557 	/* now service all of the reassm queue if needed */
2558 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2559 		sctp_service_queues(stcb, asoc);
2560 
2561 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2562 		/* Assure that we ack right away */
2563 		stcb->asoc.send_sack = 1;
2564 	}
2565 	/* Start a sack timer or QUEUE a SACK for sending */
2566 	sctp_sack_check(stcb, was_a_gap);
2567 	return (0);
2568 }
2569 
2570 static int
2571 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2572     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2573     int *num_frs,
2574     uint32_t * biggest_newly_acked_tsn,
2575     uint32_t * this_sack_lowest_newack,
2576     int *rto_ok)
2577 {
2578 	struct sctp_tmit_chunk *tp1;
2579 	unsigned int theTSN;
2580 	int j, wake_him = 0, circled = 0;
2581 
2582 	/* Recover the tp1 we last saw */
2583 	tp1 = *p_tp1;
2584 	if (tp1 == NULL) {
2585 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2586 	}
2587 	for (j = frag_strt; j <= frag_end; j++) {
2588 		theTSN = j + last_tsn;
2589 		while (tp1) {
2590 			if (tp1->rec.data.doing_fast_retransmit)
2591 				(*num_frs) += 1;
2592 
2593 			/*-
2594 			 * CMT: CUCv2 algorithm. For each TSN being
2595 			 * processed from the sent queue, track the
2596 			 * next expected pseudo-cumack, or
2597 			 * rtx_pseudo_cumack, if required. Separate
2598 			 * cumack trackers for first transmissions,
2599 			 * and retransmissions.
2600 			 */
2601 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2602 			    (tp1->snd_count == 1)) {
2603 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2604 				tp1->whoTo->find_pseudo_cumack = 0;
2605 			}
2606 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2607 			    (tp1->snd_count > 1)) {
2608 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2609 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2610 			}
2611 			if (tp1->rec.data.TSN_seq == theTSN) {
2612 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2613 					/*-
2614 					 * must be held until
2615 					 * cum-ack passes
2616 					 */
2617 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2618 						/*-
2619 						 * If it is less than RESEND, it is
2620 						 * now no-longer in flight.
2621 						 * Higher values may already be set
2622 						 * via previous Gap Ack Blocks...
2623 						 * i.e. ACKED or RESEND.
2624 						 */
2625 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2626 						    *biggest_newly_acked_tsn)) {
2627 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2628 						}
2629 						/*-
2630 						 * CMT: SFR algo (and HTNA) - set
2631 						 * saw_newack to 1 for dest being
2632 						 * newly acked. update
2633 						 * this_sack_highest_newack if
2634 						 * appropriate.
2635 						 */
2636 						if (tp1->rec.data.chunk_was_revoked == 0)
2637 							tp1->whoTo->saw_newack = 1;
2638 
2639 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2640 						    tp1->whoTo->this_sack_highest_newack)) {
2641 							tp1->whoTo->this_sack_highest_newack =
2642 							    tp1->rec.data.TSN_seq;
2643 						}
2644 						/*-
2645 						 * CMT DAC algo: also update
2646 						 * this_sack_lowest_newack
2647 						 */
2648 						if (*this_sack_lowest_newack == 0) {
2649 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2650 								sctp_log_sack(*this_sack_lowest_newack,
2651 								    last_tsn,
2652 								    tp1->rec.data.TSN_seq,
2653 								    0,
2654 								    0,
2655 								    SCTP_LOG_TSN_ACKED);
2656 							}
2657 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2658 						}
2659 						/*-
2660 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2661 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2662 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2663 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2664 						 * Separate pseudo_cumack trackers for first transmissions and
2665 						 * retransmissions.
2666 						 */
2667 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2668 							if (tp1->rec.data.chunk_was_revoked == 0) {
2669 								tp1->whoTo->new_pseudo_cumack = 1;
2670 							}
2671 							tp1->whoTo->find_pseudo_cumack = 1;
2672 						}
2673 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2674 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2675 						}
2676 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2677 							if (tp1->rec.data.chunk_was_revoked == 0) {
2678 								tp1->whoTo->new_pseudo_cumack = 1;
2679 							}
2680 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2681 						}
2682 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2683 							sctp_log_sack(*biggest_newly_acked_tsn,
2684 							    last_tsn,
2685 							    tp1->rec.data.TSN_seq,
2686 							    frag_strt,
2687 							    frag_end,
2688 							    SCTP_LOG_TSN_ACKED);
2689 						}
2690 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2691 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2692 							    tp1->whoTo->flight_size,
2693 							    tp1->book_size,
2694 							    (uintptr_t) tp1->whoTo,
2695 							    tp1->rec.data.TSN_seq);
2696 						}
2697 						sctp_flight_size_decrease(tp1);
2698 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2699 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2700 							    tp1);
2701 						}
2702 						sctp_total_flight_decrease(stcb, tp1);
2703 
2704 						tp1->whoTo->net_ack += tp1->send_size;
2705 						if (tp1->snd_count < 2) {
2706 							/*-
2707 							 * True non-retransmited chunk
2708 							 */
2709 							tp1->whoTo->net_ack2 += tp1->send_size;
2710 
2711 							/*-
2712 							 * update RTO too ?
2713 							 */
2714 							if (tp1->do_rtt) {
2715 								if (*rto_ok) {
2716 									tp1->whoTo->RTO =
2717 									    sctp_calculate_rto(stcb,
2718 									    &stcb->asoc,
2719 									    tp1->whoTo,
2720 									    &tp1->sent_rcv_time,
2721 									    sctp_align_safe_nocopy,
2722 									    SCTP_RTT_FROM_DATA);
2723 									*rto_ok = 0;
2724 								}
2725 								if (tp1->whoTo->rto_needed == 0) {
2726 									tp1->whoTo->rto_needed = 1;
2727 								}
2728 								tp1->do_rtt = 0;
2729 							}
2730 						}
2731 					}
2732 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2733 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2734 						    stcb->asoc.this_sack_highest_gap)) {
2735 							stcb->asoc.this_sack_highest_gap =
2736 							    tp1->rec.data.TSN_seq;
2737 						}
2738 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2739 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2740 #ifdef SCTP_AUDITING_ENABLED
2741 							sctp_audit_log(0xB2,
2742 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2743 #endif
2744 						}
2745 					}
2746 					/*-
2747 					 * All chunks NOT UNSENT fall through here and are marked
2748 					 * (leave PR-SCTP ones that are to skip alone though)
2749 					 */
2750 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2751 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2752 						tp1->sent = SCTP_DATAGRAM_MARKED;
2753 					}
2754 					if (tp1->rec.data.chunk_was_revoked) {
2755 						/* deflate the cwnd */
2756 						tp1->whoTo->cwnd -= tp1->book_size;
2757 						tp1->rec.data.chunk_was_revoked = 0;
2758 					}
2759 					/* NR Sack code here */
2760 					if (nr_sacking &&
2761 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2762 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2763 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2764 #ifdef INVARIANTS
2765 						} else {
2766 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2767 #endif
2768 						}
2769 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2770 						if (tp1->data) {
2771 							/*
2772 							 * sa_ignore
2773 							 * NO_NULL_CHK
2774 							 */
2775 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2776 							sctp_m_freem(tp1->data);
2777 							tp1->data = NULL;
2778 						}
2779 						wake_him++;
2780 					}
2781 				}
2782 				break;
2783 			}	/* if (tp1->TSN_seq == theTSN) */
2784 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2785 				break;
2786 			}
2787 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2788 			if ((tp1 == NULL) && (circled == 0)) {
2789 				circled++;
2790 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2791 			}
2792 		}		/* end while (tp1) */
2793 		if (tp1 == NULL) {
2794 			circled = 0;
2795 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2796 		}
2797 		/* In case the fragments were not in order we must reset */
2798 	}			/* end for (j = fragStart */
2799 	*p_tp1 = tp1;
2800 	return (wake_him);	/* Return value only used for nr-sack */
2801 }
2802 
2803 
2804 static int
2805 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2806     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2807     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2808     int num_seg, int num_nr_seg, int *rto_ok)
2809 {
2810 	struct sctp_gap_ack_block *frag, block;
2811 	struct sctp_tmit_chunk *tp1;
2812 	int i;
2813 	int num_frs = 0;
2814 	int chunk_freed;
2815 	int non_revocable;
2816 	uint16_t frag_strt, frag_end, prev_frag_end;
2817 
2818 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2819 	prev_frag_end = 0;
2820 	chunk_freed = 0;
2821 
2822 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2823 		if (i == num_seg) {
2824 			prev_frag_end = 0;
2825 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2826 		}
2827 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2828 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2829 		*offset += sizeof(block);
2830 		if (frag == NULL) {
2831 			return (chunk_freed);
2832 		}
2833 		frag_strt = ntohs(frag->start);
2834 		frag_end = ntohs(frag->end);
2835 
2836 		if (frag_strt > frag_end) {
2837 			/* This gap report is malformed, skip it. */
2838 			continue;
2839 		}
2840 		if (frag_strt <= prev_frag_end) {
2841 			/* This gap report is not in order, so restart. */
2842 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2843 		}
2844 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2845 			*biggest_tsn_acked = last_tsn + frag_end;
2846 		}
2847 		if (i < num_seg) {
2848 			non_revocable = 0;
2849 		} else {
2850 			non_revocable = 1;
2851 		}
2852 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2853 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2854 		    this_sack_lowest_newack, rto_ok)) {
2855 			chunk_freed = 1;
2856 		}
2857 		prev_frag_end = frag_end;
2858 	}
2859 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2860 		if (num_frs)
2861 			sctp_log_fr(*biggest_tsn_acked,
2862 			    *biggest_newly_acked_tsn,
2863 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2864 	}
2865 	return (chunk_freed);
2866 }
2867 
2868 static void
2869 sctp_check_for_revoked(struct sctp_tcb *stcb,
2870     struct sctp_association *asoc, uint32_t cumack,
2871     uint32_t biggest_tsn_acked)
2872 {
2873 	struct sctp_tmit_chunk *tp1;
2874 
2875 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2876 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2877 			/*
2878 			 * ok this guy is either ACK or MARKED. If it is
2879 			 * ACKED it has been previously acked but not this
2880 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2881 			 * again.
2882 			 */
2883 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2884 				break;
2885 			}
2886 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2887 				/* it has been revoked */
2888 				tp1->sent = SCTP_DATAGRAM_SENT;
2889 				tp1->rec.data.chunk_was_revoked = 1;
2890 				/*
2891 				 * We must add this stuff back in to assure
2892 				 * timers and such get started.
2893 				 */
2894 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2895 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2896 					    tp1->whoTo->flight_size,
2897 					    tp1->book_size,
2898 					    (uintptr_t) tp1->whoTo,
2899 					    tp1->rec.data.TSN_seq);
2900 				}
2901 				sctp_flight_size_increase(tp1);
2902 				sctp_total_flight_increase(stcb, tp1);
2903 				/*
2904 				 * We inflate the cwnd to compensate for our
2905 				 * artificial inflation of the flight_size.
2906 				 */
2907 				tp1->whoTo->cwnd += tp1->book_size;
2908 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2909 					sctp_log_sack(asoc->last_acked_seq,
2910 					    cumack,
2911 					    tp1->rec.data.TSN_seq,
2912 					    0,
2913 					    0,
2914 					    SCTP_LOG_TSN_REVOKED);
2915 				}
2916 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2917 				/* it has been re-acked in this SACK */
2918 				tp1->sent = SCTP_DATAGRAM_ACKED;
2919 			}
2920 		}
2921 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2922 			break;
2923 	}
2924 }
2925 
2926 
2927 static void
2928 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2929     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2930 {
2931 	struct sctp_tmit_chunk *tp1;
2932 	int strike_flag = 0;
2933 	struct timeval now;
2934 	int tot_retrans = 0;
2935 	uint32_t sending_seq;
2936 	struct sctp_nets *net;
2937 	int num_dests_sacked = 0;
2938 
2939 	/*
2940 	 * select the sending_seq, this is either the next thing ready to be
2941 	 * sent but not transmitted, OR, the next seq we assign.
2942 	 */
2943 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2944 	if (tp1 == NULL) {
2945 		sending_seq = asoc->sending_seq;
2946 	} else {
2947 		sending_seq = tp1->rec.data.TSN_seq;
2948 	}
2949 
2950 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2951 	if ((asoc->sctp_cmt_on_off > 0) &&
2952 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2953 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2954 			if (net->saw_newack)
2955 				num_dests_sacked++;
2956 		}
2957 	}
2958 	if (stcb->asoc.peer_supports_prsctp) {
2959 		(void)SCTP_GETTIME_TIMEVAL(&now);
2960 	}
2961 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2962 		strike_flag = 0;
2963 		if (tp1->no_fr_allowed) {
2964 			/* this one had a timeout or something */
2965 			continue;
2966 		}
2967 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2968 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2969 				sctp_log_fr(biggest_tsn_newly_acked,
2970 				    tp1->rec.data.TSN_seq,
2971 				    tp1->sent,
2972 				    SCTP_FR_LOG_CHECK_STRIKE);
2973 		}
2974 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2975 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2976 			/* done */
2977 			break;
2978 		}
2979 		if (stcb->asoc.peer_supports_prsctp) {
2980 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2981 				/* Is it expired? */
2982 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2983 					/* Yes so drop it */
2984 					if (tp1->data != NULL) {
2985 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2986 						    SCTP_SO_NOT_LOCKED);
2987 					}
2988 					continue;
2989 				}
2990 			}
2991 		}
2992 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2993 			/* we are beyond the tsn in the sack  */
2994 			break;
2995 		}
2996 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2997 			/* either a RESEND, ACKED, or MARKED */
2998 			/* skip */
2999 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3000 				/* Continue strikin FWD-TSN chunks */
3001 				tp1->rec.data.fwd_tsn_cnt++;
3002 			}
3003 			continue;
3004 		}
3005 		/*
3006 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3007 		 */
3008 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3009 			/*
3010 			 * No new acks were receieved for data sent to this
3011 			 * dest. Therefore, according to the SFR algo for
3012 			 * CMT, no data sent to this dest can be marked for
3013 			 * FR using this SACK.
3014 			 */
3015 			continue;
3016 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3017 		    tp1->whoTo->this_sack_highest_newack)) {
3018 			/*
3019 			 * CMT: New acks were receieved for data sent to
3020 			 * this dest. But no new acks were seen for data
3021 			 * sent after tp1. Therefore, according to the SFR
3022 			 * algo for CMT, tp1 cannot be marked for FR using
3023 			 * this SACK. This step covers part of the DAC algo
3024 			 * and the HTNA algo as well.
3025 			 */
3026 			continue;
3027 		}
3028 		/*
3029 		 * Here we check to see if we were have already done a FR
3030 		 * and if so we see if the biggest TSN we saw in the sack is
3031 		 * smaller than the recovery point. If so we don't strike
3032 		 * the tsn... otherwise we CAN strike the TSN.
3033 		 */
3034 		/*
3035 		 * @@@ JRI: Check for CMT if (accum_moved &&
3036 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3037 		 * 0)) {
3038 		 */
3039 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3040 			/*
3041 			 * Strike the TSN if in fast-recovery and cum-ack
3042 			 * moved.
3043 			 */
3044 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3045 				sctp_log_fr(biggest_tsn_newly_acked,
3046 				    tp1->rec.data.TSN_seq,
3047 				    tp1->sent,
3048 				    SCTP_FR_LOG_STRIKE_CHUNK);
3049 			}
3050 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3051 				tp1->sent++;
3052 			}
3053 			if ((asoc->sctp_cmt_on_off > 0) &&
3054 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3055 				/*
3056 				 * CMT DAC algorithm: If SACK flag is set to
3057 				 * 0, then lowest_newack test will not pass
3058 				 * because it would have been set to the
3059 				 * cumack earlier. If not already to be
3060 				 * rtx'd, If not a mixed sack and if tp1 is
3061 				 * not between two sacked TSNs, then mark by
3062 				 * one more. NOTE that we are marking by one
3063 				 * additional time since the SACK DAC flag
3064 				 * indicates that two packets have been
3065 				 * received after this missing TSN.
3066 				 */
3067 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3068 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3069 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3070 						sctp_log_fr(16 + num_dests_sacked,
3071 						    tp1->rec.data.TSN_seq,
3072 						    tp1->sent,
3073 						    SCTP_FR_LOG_STRIKE_CHUNK);
3074 					}
3075 					tp1->sent++;
3076 				}
3077 			}
3078 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3079 		    (asoc->sctp_cmt_on_off == 0)) {
3080 			/*
3081 			 * For those that have done a FR we must take
3082 			 * special consideration if we strike. I.e the
3083 			 * biggest_newly_acked must be higher than the
3084 			 * sending_seq at the time we did the FR.
3085 			 */
3086 			if (
3087 #ifdef SCTP_FR_TO_ALTERNATE
3088 			/*
3089 			 * If FR's go to new networks, then we must only do
3090 			 * this for singly homed asoc's. However if the FR's
3091 			 * go to the same network (Armando's work) then its
3092 			 * ok to FR multiple times.
3093 			 */
3094 			    (asoc->numnets < 2)
3095 #else
3096 			    (1)
3097 #endif
3098 			    ) {
3099 
3100 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3101 				    tp1->rec.data.fast_retran_tsn)) {
3102 					/*
3103 					 * Strike the TSN, since this ack is
3104 					 * beyond where things were when we
3105 					 * did a FR.
3106 					 */
3107 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3108 						sctp_log_fr(biggest_tsn_newly_acked,
3109 						    tp1->rec.data.TSN_seq,
3110 						    tp1->sent,
3111 						    SCTP_FR_LOG_STRIKE_CHUNK);
3112 					}
3113 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3114 						tp1->sent++;
3115 					}
3116 					strike_flag = 1;
3117 					if ((asoc->sctp_cmt_on_off > 0) &&
3118 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3119 						/*
3120 						 * CMT DAC algorithm: If
3121 						 * SACK flag is set to 0,
3122 						 * then lowest_newack test
3123 						 * will not pass because it
3124 						 * would have been set to
3125 						 * the cumack earlier. If
3126 						 * not already to be rtx'd,
3127 						 * If not a mixed sack and
3128 						 * if tp1 is not between two
3129 						 * sacked TSNs, then mark by
3130 						 * one more. NOTE that we
3131 						 * are marking by one
3132 						 * additional time since the
3133 						 * SACK DAC flag indicates
3134 						 * that two packets have
3135 						 * been received after this
3136 						 * missing TSN.
3137 						 */
3138 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3139 						    (num_dests_sacked == 1) &&
3140 						    SCTP_TSN_GT(this_sack_lowest_newack,
3141 						    tp1->rec.data.TSN_seq)) {
3142 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3143 								sctp_log_fr(32 + num_dests_sacked,
3144 								    tp1->rec.data.TSN_seq,
3145 								    tp1->sent,
3146 								    SCTP_FR_LOG_STRIKE_CHUNK);
3147 							}
3148 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3149 								tp1->sent++;
3150 							}
3151 						}
3152 					}
3153 				}
3154 			}
3155 			/*
3156 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3157 			 * algo covers HTNA.
3158 			 */
3159 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3160 		    biggest_tsn_newly_acked)) {
3161 			/*
3162 			 * We don't strike these: This is the  HTNA
3163 			 * algorithm i.e. we don't strike If our TSN is
3164 			 * larger than the Highest TSN Newly Acked.
3165 			 */
3166 			;
3167 		} else {
3168 			/* Strike the TSN */
3169 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3170 				sctp_log_fr(biggest_tsn_newly_acked,
3171 				    tp1->rec.data.TSN_seq,
3172 				    tp1->sent,
3173 				    SCTP_FR_LOG_STRIKE_CHUNK);
3174 			}
3175 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3176 				tp1->sent++;
3177 			}
3178 			if ((asoc->sctp_cmt_on_off > 0) &&
3179 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3180 				/*
3181 				 * CMT DAC algorithm: If SACK flag is set to
3182 				 * 0, then lowest_newack test will not pass
3183 				 * because it would have been set to the
3184 				 * cumack earlier. If not already to be
3185 				 * rtx'd, If not a mixed sack and if tp1 is
3186 				 * not between two sacked TSNs, then mark by
3187 				 * one more. NOTE that we are marking by one
3188 				 * additional time since the SACK DAC flag
3189 				 * indicates that two packets have been
3190 				 * received after this missing TSN.
3191 				 */
3192 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3193 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3194 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3195 						sctp_log_fr(48 + num_dests_sacked,
3196 						    tp1->rec.data.TSN_seq,
3197 						    tp1->sent,
3198 						    SCTP_FR_LOG_STRIKE_CHUNK);
3199 					}
3200 					tp1->sent++;
3201 				}
3202 			}
3203 		}
3204 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3205 			struct sctp_nets *alt;
3206 
3207 			/* fix counts and things */
3208 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3209 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3210 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3211 				    tp1->book_size,
3212 				    (uintptr_t) tp1->whoTo,
3213 				    tp1->rec.data.TSN_seq);
3214 			}
3215 			if (tp1->whoTo) {
3216 				tp1->whoTo->net_ack++;
3217 				sctp_flight_size_decrease(tp1);
3218 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3219 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3220 					    tp1);
3221 				}
3222 			}
3223 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3224 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3225 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3226 			}
3227 			/* add back to the rwnd */
3228 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3229 
3230 			/* remove from the total flight */
3231 			sctp_total_flight_decrease(stcb, tp1);
3232 
3233 			if ((stcb->asoc.peer_supports_prsctp) &&
3234 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3235 				/*
3236 				 * Has it been retransmitted tv_sec times? -
3237 				 * we store the retran count there.
3238 				 */
3239 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3240 					/* Yes, so drop it */
3241 					if (tp1->data != NULL) {
3242 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3243 						    SCTP_SO_NOT_LOCKED);
3244 					}
3245 					/* Make sure to flag we had a FR */
3246 					tp1->whoTo->net_ack++;
3247 					continue;
3248 				}
3249 			}
3250 			/*
3251 			 * SCTP_PRINTF("OK, we are now ready to FR this
3252 			 * guy\n");
3253 			 */
3254 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3255 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3256 				    0, SCTP_FR_MARKED);
3257 			}
3258 			if (strike_flag) {
3259 				/* This is a subsequent FR */
3260 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3261 			}
3262 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3263 			if (asoc->sctp_cmt_on_off > 0) {
3264 				/*
3265 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3266 				 * If CMT is being used, then pick dest with
3267 				 * largest ssthresh for any retransmission.
3268 				 */
3269 				tp1->no_fr_allowed = 1;
3270 				alt = tp1->whoTo;
3271 				/* sa_ignore NO_NULL_CHK */
3272 				if (asoc->sctp_cmt_pf > 0) {
3273 					/*
3274 					 * JRS 5/18/07 - If CMT PF is on,
3275 					 * use the PF version of
3276 					 * find_alt_net()
3277 					 */
3278 					alt = sctp_find_alternate_net(stcb, alt, 2);
3279 				} else {
3280 					/*
3281 					 * JRS 5/18/07 - If only CMT is on,
3282 					 * use the CMT version of
3283 					 * find_alt_net()
3284 					 */
3285 					/* sa_ignore NO_NULL_CHK */
3286 					alt = sctp_find_alternate_net(stcb, alt, 1);
3287 				}
3288 				if (alt == NULL) {
3289 					alt = tp1->whoTo;
3290 				}
3291 				/*
3292 				 * CUCv2: If a different dest is picked for
3293 				 * the retransmission, then new
3294 				 * (rtx-)pseudo_cumack needs to be tracked
3295 				 * for orig dest. Let CUCv2 track new (rtx-)
3296 				 * pseudo-cumack always.
3297 				 */
3298 				if (tp1->whoTo) {
3299 					tp1->whoTo->find_pseudo_cumack = 1;
3300 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3301 				}
3302 			} else {/* CMT is OFF */
3303 
3304 #ifdef SCTP_FR_TO_ALTERNATE
3305 				/* Can we find an alternate? */
3306 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3307 #else
3308 				/*
3309 				 * default behavior is to NOT retransmit
3310 				 * FR's to an alternate. Armando Caro's
3311 				 * paper details why.
3312 				 */
3313 				alt = tp1->whoTo;
3314 #endif
3315 			}
3316 
3317 			tp1->rec.data.doing_fast_retransmit = 1;
3318 			tot_retrans++;
3319 			/* mark the sending seq for possible subsequent FR's */
3320 			/*
3321 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3322 			 * (uint32_t)tpi->rec.data.TSN_seq);
3323 			 */
3324 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3325 				/*
3326 				 * If the queue of send is empty then its
3327 				 * the next sequence number that will be
3328 				 * assigned so we subtract one from this to
3329 				 * get the one we last sent.
3330 				 */
3331 				tp1->rec.data.fast_retran_tsn = sending_seq;
3332 			} else {
3333 				/*
3334 				 * If there are chunks on the send queue
3335 				 * (unsent data that has made it from the
3336 				 * stream queues but not out the door, we
3337 				 * take the first one (which will have the
3338 				 * lowest TSN) and subtract one to get the
3339 				 * one we last sent.
3340 				 */
3341 				struct sctp_tmit_chunk *ttt;
3342 
3343 				ttt = TAILQ_FIRST(&asoc->send_queue);
3344 				tp1->rec.data.fast_retran_tsn =
3345 				    ttt->rec.data.TSN_seq;
3346 			}
3347 
3348 			if (tp1->do_rtt) {
3349 				/*
3350 				 * this guy had a RTO calculation pending on
3351 				 * it, cancel it
3352 				 */
3353 				if ((tp1->whoTo != NULL) &&
3354 				    (tp1->whoTo->rto_needed == 0)) {
3355 					tp1->whoTo->rto_needed = 1;
3356 				}
3357 				tp1->do_rtt = 0;
3358 			}
3359 			if (alt != tp1->whoTo) {
3360 				/* yes, there is an alternate. */
3361 				sctp_free_remote_addr(tp1->whoTo);
3362 				/* sa_ignore FREED_MEMORY */
3363 				tp1->whoTo = alt;
3364 				atomic_add_int(&alt->ref_count, 1);
3365 			}
3366 		}
3367 	}
3368 }
3369 
3370 struct sctp_tmit_chunk *
3371 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3372     struct sctp_association *asoc)
3373 {
3374 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3375 	struct timeval now;
3376 	int now_filled = 0;
3377 
3378 	if (asoc->peer_supports_prsctp == 0) {
3379 		return (NULL);
3380 	}
3381 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3382 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3383 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3384 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3385 			/* no chance to advance, out of here */
3386 			break;
3387 		}
3388 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3389 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3390 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3391 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3392 				    asoc->advanced_peer_ack_point,
3393 				    tp1->rec.data.TSN_seq, 0, 0);
3394 			}
3395 		}
3396 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3397 			/*
3398 			 * We can't fwd-tsn past any that are reliable aka
3399 			 * retransmitted until the asoc fails.
3400 			 */
3401 			break;
3402 		}
3403 		if (!now_filled) {
3404 			(void)SCTP_GETTIME_TIMEVAL(&now);
3405 			now_filled = 1;
3406 		}
3407 		/*
3408 		 * now we got a chunk which is marked for another
3409 		 * retransmission to a PR-stream but has run out its chances
3410 		 * already maybe OR has been marked to skip now. Can we skip
3411 		 * it if its a resend?
3412 		 */
3413 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3414 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3415 			/*
3416 			 * Now is this one marked for resend and its time is
3417 			 * now up?
3418 			 */
3419 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3420 				/* Yes so drop it */
3421 				if (tp1->data) {
3422 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3423 					    1, SCTP_SO_NOT_LOCKED);
3424 				}
3425 			} else {
3426 				/*
3427 				 * No, we are done when hit one for resend
3428 				 * whos time as not expired.
3429 				 */
3430 				break;
3431 			}
3432 		}
3433 		/*
3434 		 * Ok now if this chunk is marked to drop it we can clean up
3435 		 * the chunk, advance our peer ack point and we can check
3436 		 * the next chunk.
3437 		 */
3438 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3439 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3440 			/* advance PeerAckPoint goes forward */
3441 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3442 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3443 				a_adv = tp1;
3444 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3445 				/* No update but we do save the chk */
3446 				a_adv = tp1;
3447 			}
3448 		} else {
3449 			/*
3450 			 * If it is still in RESEND we can advance no
3451 			 * further
3452 			 */
3453 			break;
3454 		}
3455 	}
3456 	return (a_adv);
3457 }
3458 
3459 static int
3460 sctp_fs_audit(struct sctp_association *asoc)
3461 {
3462 	struct sctp_tmit_chunk *chk;
3463 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3464 	int entry_flight, entry_cnt, ret;
3465 
3466 	entry_flight = asoc->total_flight;
3467 	entry_cnt = asoc->total_flight_count;
3468 	ret = 0;
3469 
3470 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3471 		return (0);
3472 
3473 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3474 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3475 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3476 			    chk->rec.data.TSN_seq,
3477 			    chk->send_size,
3478 			    chk->snd_count);
3479 			inflight++;
3480 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3481 			resend++;
3482 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3483 			inbetween++;
3484 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3485 			above++;
3486 		} else {
3487 			acked++;
3488 		}
3489 	}
3490 
3491 	if ((inflight > 0) || (inbetween > 0)) {
3492 #ifdef INVARIANTS
3493 		panic("Flight size-express incorrect? \n");
3494 #else
3495 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3496 		    entry_flight, entry_cnt);
3497 
3498 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3499 		    inflight, inbetween, resend, above, acked);
3500 		ret = 1;
3501 #endif
3502 	}
3503 	return (ret);
3504 }
3505 
3506 
3507 static void
3508 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3509     struct sctp_association *asoc,
3510     struct sctp_tmit_chunk *tp1)
3511 {
3512 	tp1->window_probe = 0;
3513 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3514 		/* TSN's skipped we do NOT move back. */
3515 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3516 		    tp1->whoTo->flight_size,
3517 		    tp1->book_size,
3518 		    (uintptr_t) tp1->whoTo,
3519 		    tp1->rec.data.TSN_seq);
3520 		return;
3521 	}
3522 	/* First setup this by shrinking flight */
3523 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3524 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3525 		    tp1);
3526 	}
3527 	sctp_flight_size_decrease(tp1);
3528 	sctp_total_flight_decrease(stcb, tp1);
3529 	/* Now mark for resend */
3530 	tp1->sent = SCTP_DATAGRAM_RESEND;
3531 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3532 
3533 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3534 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3535 		    tp1->whoTo->flight_size,
3536 		    tp1->book_size,
3537 		    (uintptr_t) tp1->whoTo,
3538 		    tp1->rec.data.TSN_seq);
3539 	}
3540 }
3541 
3542 void
3543 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3544     uint32_t rwnd, int *abort_now, int ecne_seen)
3545 {
3546 	struct sctp_nets *net;
3547 	struct sctp_association *asoc;
3548 	struct sctp_tmit_chunk *tp1, *tp2;
3549 	uint32_t old_rwnd;
3550 	int win_probe_recovery = 0;
3551 	int win_probe_recovered = 0;
3552 	int j, done_once = 0;
3553 	int rto_ok = 1;
3554 
3555 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3556 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3557 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3558 	}
3559 	SCTP_TCB_LOCK_ASSERT(stcb);
3560 #ifdef SCTP_ASOCLOG_OF_TSNS
3561 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3562 	stcb->asoc.cumack_log_at++;
3563 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3564 		stcb->asoc.cumack_log_at = 0;
3565 	}
3566 #endif
3567 	asoc = &stcb->asoc;
3568 	old_rwnd = asoc->peers_rwnd;
3569 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3570 		/* old ack */
3571 		return;
3572 	} else if (asoc->last_acked_seq == cumack) {
3573 		/* Window update sack */
3574 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3575 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3576 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3577 			/* SWS sender side engages */
3578 			asoc->peers_rwnd = 0;
3579 		}
3580 		if (asoc->peers_rwnd > old_rwnd) {
3581 			goto again;
3582 		}
3583 		return;
3584 	}
3585 	/* First setup for CC stuff */
3586 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3587 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3588 			/* Drag along the window_tsn for cwr's */
3589 			net->cwr_window_tsn = cumack;
3590 		}
3591 		net->prev_cwnd = net->cwnd;
3592 		net->net_ack = 0;
3593 		net->net_ack2 = 0;
3594 
3595 		/*
3596 		 * CMT: Reset CUC and Fast recovery algo variables before
3597 		 * SACK processing
3598 		 */
3599 		net->new_pseudo_cumack = 0;
3600 		net->will_exit_fast_recovery = 0;
3601 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3602 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3603 		}
3604 	}
3605 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3606 		uint32_t send_s;
3607 
3608 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3609 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3610 			    sctpchunk_listhead);
3611 			send_s = tp1->rec.data.TSN_seq + 1;
3612 		} else {
3613 			send_s = asoc->sending_seq;
3614 		}
3615 		if (SCTP_TSN_GE(cumack, send_s)) {
3616 #ifndef INVARIANTS
3617 			struct mbuf *op_err;
3618 			char msg[SCTP_DIAG_INFO_LEN];
3619 
3620 #endif
3621 #ifdef INVARIANTS
3622 			panic("Impossible sack 1");
3623 #else
3624 
3625 			*abort_now = 1;
3626 			/* XXX */
3627 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3628 			    cumack, send_s);
3629 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3630 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3631 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3632 			return;
3633 #endif
3634 		}
3635 	}
3636 	asoc->this_sack_highest_gap = cumack;
3637 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3638 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3639 		    stcb->asoc.overall_error_count,
3640 		    0,
3641 		    SCTP_FROM_SCTP_INDATA,
3642 		    __LINE__);
3643 	}
3644 	stcb->asoc.overall_error_count = 0;
3645 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3646 		/* process the new consecutive TSN first */
3647 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3648 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3649 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3650 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3651 				}
3652 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3653 					/*
3654 					 * If it is less than ACKED, it is
3655 					 * now no-longer in flight. Higher
3656 					 * values may occur during marking
3657 					 */
3658 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3659 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3660 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3661 							    tp1->whoTo->flight_size,
3662 							    tp1->book_size,
3663 							    (uintptr_t) tp1->whoTo,
3664 							    tp1->rec.data.TSN_seq);
3665 						}
3666 						sctp_flight_size_decrease(tp1);
3667 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3668 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3669 							    tp1);
3670 						}
3671 						/* sa_ignore NO_NULL_CHK */
3672 						sctp_total_flight_decrease(stcb, tp1);
3673 					}
3674 					tp1->whoTo->net_ack += tp1->send_size;
3675 					if (tp1->snd_count < 2) {
3676 						/*
3677 						 * True non-retransmited
3678 						 * chunk
3679 						 */
3680 						tp1->whoTo->net_ack2 +=
3681 						    tp1->send_size;
3682 
3683 						/* update RTO too? */
3684 						if (tp1->do_rtt) {
3685 							if (rto_ok) {
3686 								tp1->whoTo->RTO =
3687 								/*
3688 								 * sa_ignore
3689 								 * NO_NULL_CH
3690 								 * K
3691 								 */
3692 								    sctp_calculate_rto(stcb,
3693 								    asoc, tp1->whoTo,
3694 								    &tp1->sent_rcv_time,
3695 								    sctp_align_safe_nocopy,
3696 								    SCTP_RTT_FROM_DATA);
3697 								rto_ok = 0;
3698 							}
3699 							if (tp1->whoTo->rto_needed == 0) {
3700 								tp1->whoTo->rto_needed = 1;
3701 							}
3702 							tp1->do_rtt = 0;
3703 						}
3704 					}
3705 					/*
3706 					 * CMT: CUCv2 algorithm. From the
3707 					 * cumack'd TSNs, for each TSN being
3708 					 * acked for the first time, set the
3709 					 * following variables for the
3710 					 * corresp destination.
3711 					 * new_pseudo_cumack will trigger a
3712 					 * cwnd update.
3713 					 * find_(rtx_)pseudo_cumack will
3714 					 * trigger search for the next
3715 					 * expected (rtx-)pseudo-cumack.
3716 					 */
3717 					tp1->whoTo->new_pseudo_cumack = 1;
3718 					tp1->whoTo->find_pseudo_cumack = 1;
3719 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3720 
3721 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3722 						/* sa_ignore NO_NULL_CHK */
3723 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3724 					}
3725 				}
3726 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3727 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3728 				}
3729 				if (tp1->rec.data.chunk_was_revoked) {
3730 					/* deflate the cwnd */
3731 					tp1->whoTo->cwnd -= tp1->book_size;
3732 					tp1->rec.data.chunk_was_revoked = 0;
3733 				}
3734 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3735 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3736 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3737 #ifdef INVARIANTS
3738 					} else {
3739 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3740 #endif
3741 					}
3742 				}
3743 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3744 				if (tp1->data) {
3745 					/* sa_ignore NO_NULL_CHK */
3746 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3747 					sctp_m_freem(tp1->data);
3748 					tp1->data = NULL;
3749 				}
3750 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3751 					sctp_log_sack(asoc->last_acked_seq,
3752 					    cumack,
3753 					    tp1->rec.data.TSN_seq,
3754 					    0,
3755 					    0,
3756 					    SCTP_LOG_FREE_SENT);
3757 				}
3758 				asoc->sent_queue_cnt--;
3759 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3760 			} else {
3761 				break;
3762 			}
3763 		}
3764 
3765 	}
3766 	/* sa_ignore NO_NULL_CHK */
3767 	if (stcb->sctp_socket) {
3768 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3769 		struct socket *so;
3770 
3771 #endif
3772 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3773 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3774 			/* sa_ignore NO_NULL_CHK */
3775 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3776 		}
3777 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3778 		so = SCTP_INP_SO(stcb->sctp_ep);
3779 		atomic_add_int(&stcb->asoc.refcnt, 1);
3780 		SCTP_TCB_UNLOCK(stcb);
3781 		SCTP_SOCKET_LOCK(so, 1);
3782 		SCTP_TCB_LOCK(stcb);
3783 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3784 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3785 			/* assoc was freed while we were unlocked */
3786 			SCTP_SOCKET_UNLOCK(so, 1);
3787 			return;
3788 		}
3789 #endif
3790 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3791 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3792 		SCTP_SOCKET_UNLOCK(so, 1);
3793 #endif
3794 	} else {
3795 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3796 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3797 		}
3798 	}
3799 
3800 	/* JRS - Use the congestion control given in the CC module */
3801 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3802 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3803 			if (net->net_ack2 > 0) {
3804 				/*
3805 				 * Karn's rule applies to clearing error
3806 				 * count, this is optional.
3807 				 */
3808 				net->error_count = 0;
3809 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3810 					/* addr came good */
3811 					net->dest_state |= SCTP_ADDR_REACHABLE;
3812 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3813 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3814 				}
3815 				if (net == stcb->asoc.primary_destination) {
3816 					if (stcb->asoc.alternate) {
3817 						/*
3818 						 * release the alternate,
3819 						 * primary is good
3820 						 */
3821 						sctp_free_remote_addr(stcb->asoc.alternate);
3822 						stcb->asoc.alternate = NULL;
3823 					}
3824 				}
3825 				if (net->dest_state & SCTP_ADDR_PF) {
3826 					net->dest_state &= ~SCTP_ADDR_PF;
3827 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3828 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3829 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3830 					/* Done with this net */
3831 					net->net_ack = 0;
3832 				}
3833 				/* restore any doubled timers */
3834 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3835 				if (net->RTO < stcb->asoc.minrto) {
3836 					net->RTO = stcb->asoc.minrto;
3837 				}
3838 				if (net->RTO > stcb->asoc.maxrto) {
3839 					net->RTO = stcb->asoc.maxrto;
3840 				}
3841 			}
3842 		}
3843 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3844 	}
3845 	asoc->last_acked_seq = cumack;
3846 
3847 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3848 		/* nothing left in-flight */
3849 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3850 			net->flight_size = 0;
3851 			net->partial_bytes_acked = 0;
3852 		}
3853 		asoc->total_flight = 0;
3854 		asoc->total_flight_count = 0;
3855 	}
3856 	/* RWND update */
3857 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3858 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3859 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3860 		/* SWS sender side engages */
3861 		asoc->peers_rwnd = 0;
3862 	}
3863 	if (asoc->peers_rwnd > old_rwnd) {
3864 		win_probe_recovery = 1;
3865 	}
3866 	/* Now assure a timer where data is queued at */
3867 again:
3868 	j = 0;
3869 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3870 		int to_ticks;
3871 
3872 		if (win_probe_recovery && (net->window_probe)) {
3873 			win_probe_recovered = 1;
3874 			/*
3875 			 * Find first chunk that was used with window probe
3876 			 * and clear the sent
3877 			 */
3878 			/* sa_ignore FREED_MEMORY */
3879 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3880 				if (tp1->window_probe) {
3881 					/* move back to data send queue */
3882 					sctp_window_probe_recovery(stcb, asoc, tp1);
3883 					break;
3884 				}
3885 			}
3886 		}
3887 		if (net->RTO == 0) {
3888 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3889 		} else {
3890 			to_ticks = MSEC_TO_TICKS(net->RTO);
3891 		}
3892 		if (net->flight_size) {
3893 			j++;
3894 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3895 			    sctp_timeout_handler, &net->rxt_timer);
3896 			if (net->window_probe) {
3897 				net->window_probe = 0;
3898 			}
3899 		} else {
3900 			if (net->window_probe) {
3901 				/*
3902 				 * In window probes we must assure a timer
3903 				 * is still running there
3904 				 */
3905 				net->window_probe = 0;
3906 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3907 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3908 					    sctp_timeout_handler, &net->rxt_timer);
3909 				}
3910 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3911 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3912 				    stcb, net,
3913 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3914 			}
3915 		}
3916 	}
3917 	if ((j == 0) &&
3918 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3919 	    (asoc->sent_queue_retran_cnt == 0) &&
3920 	    (win_probe_recovered == 0) &&
3921 	    (done_once == 0)) {
3922 		/*
3923 		 * huh, this should not happen unless all packets are
3924 		 * PR-SCTP and marked to skip of course.
3925 		 */
3926 		if (sctp_fs_audit(asoc)) {
3927 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3928 				net->flight_size = 0;
3929 			}
3930 			asoc->total_flight = 0;
3931 			asoc->total_flight_count = 0;
3932 			asoc->sent_queue_retran_cnt = 0;
3933 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3934 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3935 					sctp_flight_size_increase(tp1);
3936 					sctp_total_flight_increase(stcb, tp1);
3937 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3938 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3939 				}
3940 			}
3941 		}
3942 		done_once = 1;
3943 		goto again;
3944 	}
3945 	/**********************************/
3946 	/* Now what about shutdown issues */
3947 	/**********************************/
3948 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3949 		/* nothing left on sendqueue.. consider done */
3950 		/* clean up */
3951 		if ((asoc->stream_queue_cnt == 1) &&
3952 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3953 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3954 		    (asoc->locked_on_sending)
3955 		    ) {
3956 			struct sctp_stream_queue_pending *sp;
3957 
3958 			/*
3959 			 * I may be in a state where we got all across.. but
3960 			 * cannot write more due to a shutdown... we abort
3961 			 * since the user did not indicate EOR in this case.
3962 			 * The sp will be cleaned during free of the asoc.
3963 			 */
3964 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3965 			    sctp_streamhead);
3966 			if ((sp) && (sp->length == 0)) {
3967 				/* Let cleanup code purge it */
3968 				if (sp->msg_is_complete) {
3969 					asoc->stream_queue_cnt--;
3970 				} else {
3971 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3972 					asoc->locked_on_sending = NULL;
3973 					asoc->stream_queue_cnt--;
3974 				}
3975 			}
3976 		}
3977 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3978 		    (asoc->stream_queue_cnt == 0)) {
3979 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3980 				/* Need to abort here */
3981 				struct mbuf *op_err;
3982 
3983 		abort_out_now:
3984 				*abort_now = 1;
3985 				/* XXX */
3986 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3987 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3988 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3989 			} else {
3990 				struct sctp_nets *netp;
3991 
3992 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3993 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3994 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3995 				}
3996 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3997 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3998 				sctp_stop_timers_for_shutdown(stcb);
3999 				if (asoc->alternate) {
4000 					netp = asoc->alternate;
4001 				} else {
4002 					netp = asoc->primary_destination;
4003 				}
4004 				sctp_send_shutdown(stcb, netp);
4005 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4006 				    stcb->sctp_ep, stcb, netp);
4007 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4008 				    stcb->sctp_ep, stcb, netp);
4009 			}
4010 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4011 		    (asoc->stream_queue_cnt == 0)) {
4012 			struct sctp_nets *netp;
4013 
4014 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4015 				goto abort_out_now;
4016 			}
4017 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4018 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4019 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4020 			sctp_stop_timers_for_shutdown(stcb);
4021 			if (asoc->alternate) {
4022 				netp = asoc->alternate;
4023 			} else {
4024 				netp = asoc->primary_destination;
4025 			}
4026 			sctp_send_shutdown_ack(stcb, netp);
4027 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4028 			    stcb->sctp_ep, stcb, netp);
4029 		}
4030 	}
4031 	/*********************************************/
4032 	/* Here we perform PR-SCTP procedures        */
4033 	/* (section 4.2)                             */
4034 	/*********************************************/
4035 	/* C1. update advancedPeerAckPoint */
4036 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4037 		asoc->advanced_peer_ack_point = cumack;
4038 	}
4039 	/* PR-Sctp issues need to be addressed too */
4040 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4041 		struct sctp_tmit_chunk *lchk;
4042 		uint32_t old_adv_peer_ack_point;
4043 
4044 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4045 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4046 		/* C3. See if we need to send a Fwd-TSN */
4047 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4048 			/*
4049 			 * ISSUE with ECN, see FWD-TSN processing.
4050 			 */
4051 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4052 				send_forward_tsn(stcb, asoc);
4053 			} else if (lchk) {
4054 				/* try to FR fwd-tsn's that get lost too */
4055 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4056 					send_forward_tsn(stcb, asoc);
4057 				}
4058 			}
4059 		}
4060 		if (lchk) {
4061 			/* Assure a timer is up */
4062 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4063 			    stcb->sctp_ep, stcb, lchk->whoTo);
4064 		}
4065 	}
4066 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4067 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4068 		    rwnd,
4069 		    stcb->asoc.peers_rwnd,
4070 		    stcb->asoc.total_flight,
4071 		    stcb->asoc.total_output_queue_size);
4072 	}
4073 }
4074 
4075 void
4076 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4077     struct sctp_tcb *stcb,
4078     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4079     int *abort_now, uint8_t flags,
4080     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4081 {
4082 	struct sctp_association *asoc;
4083 	struct sctp_tmit_chunk *tp1, *tp2;
4084 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4085 	uint16_t wake_him = 0;
4086 	uint32_t send_s = 0;
4087 	long j;
4088 	int accum_moved = 0;
4089 	int will_exit_fast_recovery = 0;
4090 	uint32_t a_rwnd, old_rwnd;
4091 	int win_probe_recovery = 0;
4092 	int win_probe_recovered = 0;
4093 	struct sctp_nets *net = NULL;
4094 	int done_once;
4095 	int rto_ok = 1;
4096 	uint8_t reneged_all = 0;
4097 	uint8_t cmt_dac_flag;
4098 
4099 	/*
4100 	 * we take any chance we can to service our queues since we cannot
4101 	 * get awoken when the socket is read from :<
4102 	 */
4103 	/*
4104 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4105 	 * old sack, if so discard. 2) If there is nothing left in the send
4106 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4107 	 * too, update any rwnd change and verify no timers are running.
4108 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4109 	 * moved process these first and note that it moved. 4) Process any
4110 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4111 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4112 	 * sync up flightsizes and things, stop all timers and also check
4113 	 * for shutdown_pending state. If so then go ahead and send off the
4114 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4115 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4116 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4117 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4118 	 * if in shutdown_recv state.
4119 	 */
4120 	SCTP_TCB_LOCK_ASSERT(stcb);
4121 	/* CMT DAC algo */
4122 	this_sack_lowest_newack = 0;
4123 	SCTP_STAT_INCR(sctps_slowpath_sack);
4124 	last_tsn = cum_ack;
4125 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4126 #ifdef SCTP_ASOCLOG_OF_TSNS
4127 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4128 	stcb->asoc.cumack_log_at++;
4129 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4130 		stcb->asoc.cumack_log_at = 0;
4131 	}
4132 #endif
4133 	a_rwnd = rwnd;
4134 
4135 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4136 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4137 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4138 	}
4139 	old_rwnd = stcb->asoc.peers_rwnd;
4140 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4141 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4142 		    stcb->asoc.overall_error_count,
4143 		    0,
4144 		    SCTP_FROM_SCTP_INDATA,
4145 		    __LINE__);
4146 	}
4147 	stcb->asoc.overall_error_count = 0;
4148 	asoc = &stcb->asoc;
4149 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4150 		sctp_log_sack(asoc->last_acked_seq,
4151 		    cum_ack,
4152 		    0,
4153 		    num_seg,
4154 		    num_dup,
4155 		    SCTP_LOG_NEW_SACK);
4156 	}
4157 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4158 		uint16_t i;
4159 		uint32_t *dupdata, dblock;
4160 
4161 		for (i = 0; i < num_dup; i++) {
4162 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4163 			    sizeof(uint32_t), (uint8_t *) & dblock);
4164 			if (dupdata == NULL) {
4165 				break;
4166 			}
4167 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4168 		}
4169 	}
4170 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4171 		/* reality check */
4172 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4173 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4174 			    sctpchunk_listhead);
4175 			send_s = tp1->rec.data.TSN_seq + 1;
4176 		} else {
4177 			tp1 = NULL;
4178 			send_s = asoc->sending_seq;
4179 		}
4180 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4181 			struct mbuf *op_err;
4182 			char msg[SCTP_DIAG_INFO_LEN];
4183 
4184 			/*
4185 			 * no way, we have not even sent this TSN out yet.
4186 			 * Peer is hopelessly messed up with us.
4187 			 */
4188 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4189 			    cum_ack, send_s);
4190 			if (tp1) {
4191 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4192 				    tp1->rec.data.TSN_seq, (void *)tp1);
4193 			}
4194 	hopeless_peer:
4195 			*abort_now = 1;
4196 			/* XXX */
4197 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4198 			    cum_ack, send_s);
4199 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4200 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4201 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4202 			return;
4203 		}
4204 	}
4205 	/**********************/
4206 	/* 1) check the range */
4207 	/**********************/
4208 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4209 		/* acking something behind */
4210 		return;
4211 	}
4212 	/* update the Rwnd of the peer */
4213 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4214 	    TAILQ_EMPTY(&asoc->send_queue) &&
4215 	    (asoc->stream_queue_cnt == 0)) {
4216 		/* nothing left on send/sent and strmq */
4217 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4218 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4219 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4220 		}
4221 		asoc->peers_rwnd = a_rwnd;
4222 		if (asoc->sent_queue_retran_cnt) {
4223 			asoc->sent_queue_retran_cnt = 0;
4224 		}
4225 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4226 			/* SWS sender side engages */
4227 			asoc->peers_rwnd = 0;
4228 		}
4229 		/* stop any timers */
4230 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4231 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4232 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4233 			net->partial_bytes_acked = 0;
4234 			net->flight_size = 0;
4235 		}
4236 		asoc->total_flight = 0;
4237 		asoc->total_flight_count = 0;
4238 		return;
4239 	}
4240 	/*
4241 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4242 	 * things. The total byte count acked is tracked in netAckSz AND
4243 	 * netAck2 is used to track the total bytes acked that are un-
4244 	 * amibguious and were never retransmitted. We track these on a per
4245 	 * destination address basis.
4246 	 */
4247 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4248 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4249 			/* Drag along the window_tsn for cwr's */
4250 			net->cwr_window_tsn = cum_ack;
4251 		}
4252 		net->prev_cwnd = net->cwnd;
4253 		net->net_ack = 0;
4254 		net->net_ack2 = 0;
4255 
4256 		/*
4257 		 * CMT: Reset CUC and Fast recovery algo variables before
4258 		 * SACK processing
4259 		 */
4260 		net->new_pseudo_cumack = 0;
4261 		net->will_exit_fast_recovery = 0;
4262 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4263 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4264 		}
4265 	}
4266 	/* process the new consecutive TSN first */
4267 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4268 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4269 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4270 				accum_moved = 1;
4271 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4272 					/*
4273 					 * If it is less than ACKED, it is
4274 					 * now no-longer in flight. Higher
4275 					 * values may occur during marking
4276 					 */
4277 					if ((tp1->whoTo->dest_state &
4278 					    SCTP_ADDR_UNCONFIRMED) &&
4279 					    (tp1->snd_count < 2)) {
4280 						/*
4281 						 * If there was no retran
4282 						 * and the address is
4283 						 * un-confirmed and we sent
4284 						 * there and are now
4285 						 * sacked.. its confirmed,
4286 						 * mark it so.
4287 						 */
4288 						tp1->whoTo->dest_state &=
4289 						    ~SCTP_ADDR_UNCONFIRMED;
4290 					}
4291 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4292 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4293 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4294 							    tp1->whoTo->flight_size,
4295 							    tp1->book_size,
4296 							    (uintptr_t) tp1->whoTo,
4297 							    tp1->rec.data.TSN_seq);
4298 						}
4299 						sctp_flight_size_decrease(tp1);
4300 						sctp_total_flight_decrease(stcb, tp1);
4301 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4302 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4303 							    tp1);
4304 						}
4305 					}
4306 					tp1->whoTo->net_ack += tp1->send_size;
4307 
4308 					/* CMT SFR and DAC algos */
4309 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4310 					tp1->whoTo->saw_newack = 1;
4311 
4312 					if (tp1->snd_count < 2) {
4313 						/*
4314 						 * True non-retransmited
4315 						 * chunk
4316 						 */
4317 						tp1->whoTo->net_ack2 +=
4318 						    tp1->send_size;
4319 
4320 						/* update RTO too? */
4321 						if (tp1->do_rtt) {
4322 							if (rto_ok) {
4323 								tp1->whoTo->RTO =
4324 								    sctp_calculate_rto(stcb,
4325 								    asoc, tp1->whoTo,
4326 								    &tp1->sent_rcv_time,
4327 								    sctp_align_safe_nocopy,
4328 								    SCTP_RTT_FROM_DATA);
4329 								rto_ok = 0;
4330 							}
4331 							if (tp1->whoTo->rto_needed == 0) {
4332 								tp1->whoTo->rto_needed = 1;
4333 							}
4334 							tp1->do_rtt = 0;
4335 						}
4336 					}
4337 					/*
4338 					 * CMT: CUCv2 algorithm. From the
4339 					 * cumack'd TSNs, for each TSN being
4340 					 * acked for the first time, set the
4341 					 * following variables for the
4342 					 * corresp destination.
4343 					 * new_pseudo_cumack will trigger a
4344 					 * cwnd update.
4345 					 * find_(rtx_)pseudo_cumack will
4346 					 * trigger search for the next
4347 					 * expected (rtx-)pseudo-cumack.
4348 					 */
4349 					tp1->whoTo->new_pseudo_cumack = 1;
4350 					tp1->whoTo->find_pseudo_cumack = 1;
4351 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4352 
4353 
4354 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4355 						sctp_log_sack(asoc->last_acked_seq,
4356 						    cum_ack,
4357 						    tp1->rec.data.TSN_seq,
4358 						    0,
4359 						    0,
4360 						    SCTP_LOG_TSN_ACKED);
4361 					}
4362 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4363 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4364 					}
4365 				}
4366 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4367 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4368 #ifdef SCTP_AUDITING_ENABLED
4369 					sctp_audit_log(0xB3,
4370 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4371 #endif
4372 				}
4373 				if (tp1->rec.data.chunk_was_revoked) {
4374 					/* deflate the cwnd */
4375 					tp1->whoTo->cwnd -= tp1->book_size;
4376 					tp1->rec.data.chunk_was_revoked = 0;
4377 				}
4378 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4379 					tp1->sent = SCTP_DATAGRAM_ACKED;
4380 				}
4381 			}
4382 		} else {
4383 			break;
4384 		}
4385 	}
4386 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4387 	/* always set this up to cum-ack */
4388 	asoc->this_sack_highest_gap = last_tsn;
4389 
4390 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4391 
4392 		/*
4393 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4394 		 * to be greater than the cumack. Also reset saw_newack to 0
4395 		 * for all dests.
4396 		 */
4397 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4398 			net->saw_newack = 0;
4399 			net->this_sack_highest_newack = last_tsn;
4400 		}
4401 
4402 		/*
4403 		 * thisSackHighestGap will increase while handling NEW
4404 		 * segments this_sack_highest_newack will increase while
4405 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4406 		 * used for CMT DAC algo. saw_newack will also change.
4407 		 */
4408 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4409 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4410 		    num_seg, num_nr_seg, &rto_ok)) {
4411 			wake_him++;
4412 		}
4413 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4414 			/*
4415 			 * validate the biggest_tsn_acked in the gap acks if
4416 			 * strict adherence is wanted.
4417 			 */
4418 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4419 				/*
4420 				 * peer is either confused or we are under
4421 				 * attack. We must abort.
4422 				 */
4423 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4424 				    biggest_tsn_acked, send_s);
4425 				goto hopeless_peer;
4426 			}
4427 		}
4428 	}
4429 	/*******************************************/
4430 	/* cancel ALL T3-send timer if accum moved */
4431 	/*******************************************/
4432 	if (asoc->sctp_cmt_on_off > 0) {
4433 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4434 			if (net->new_pseudo_cumack)
4435 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4436 				    stcb, net,
4437 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4438 
4439 		}
4440 	} else {
4441 		if (accum_moved) {
4442 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4443 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4444 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4445 			}
4446 		}
4447 	}
4448 	/********************************************/
4449 	/* drop the acked chunks from the sentqueue */
4450 	/********************************************/
4451 	asoc->last_acked_seq = cum_ack;
4452 
4453 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4454 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4455 			break;
4456 		}
4457 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4458 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4459 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4460 #ifdef INVARIANTS
4461 			} else {
4462 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4463 #endif
4464 			}
4465 		}
4466 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4467 		if (PR_SCTP_ENABLED(tp1->flags)) {
4468 			if (asoc->pr_sctp_cnt != 0)
4469 				asoc->pr_sctp_cnt--;
4470 		}
4471 		asoc->sent_queue_cnt--;
4472 		if (tp1->data) {
4473 			/* sa_ignore NO_NULL_CHK */
4474 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4475 			sctp_m_freem(tp1->data);
4476 			tp1->data = NULL;
4477 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4478 				asoc->sent_queue_cnt_removeable--;
4479 			}
4480 		}
4481 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4482 			sctp_log_sack(asoc->last_acked_seq,
4483 			    cum_ack,
4484 			    tp1->rec.data.TSN_seq,
4485 			    0,
4486 			    0,
4487 			    SCTP_LOG_FREE_SENT);
4488 		}
4489 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4490 		wake_him++;
4491 	}
4492 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4493 #ifdef INVARIANTS
4494 		panic("Warning flight size is postive and should be 0");
4495 #else
4496 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4497 		    asoc->total_flight);
4498 #endif
4499 		asoc->total_flight = 0;
4500 	}
4501 	/* sa_ignore NO_NULL_CHK */
4502 	if ((wake_him) && (stcb->sctp_socket)) {
4503 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4504 		struct socket *so;
4505 
4506 #endif
4507 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4508 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4509 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4510 		}
4511 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4512 		so = SCTP_INP_SO(stcb->sctp_ep);
4513 		atomic_add_int(&stcb->asoc.refcnt, 1);
4514 		SCTP_TCB_UNLOCK(stcb);
4515 		SCTP_SOCKET_LOCK(so, 1);
4516 		SCTP_TCB_LOCK(stcb);
4517 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4518 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4519 			/* assoc was freed while we were unlocked */
4520 			SCTP_SOCKET_UNLOCK(so, 1);
4521 			return;
4522 		}
4523 #endif
4524 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4525 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4526 		SCTP_SOCKET_UNLOCK(so, 1);
4527 #endif
4528 	} else {
4529 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4530 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4531 		}
4532 	}
4533 
4534 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4535 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4536 			/* Setup so we will exit RFC2582 fast recovery */
4537 			will_exit_fast_recovery = 1;
4538 		}
4539 	}
4540 	/*
4541 	 * Check for revoked fragments:
4542 	 *
4543 	 * if Previous sack - Had no frags then we can't have any revoked if
4544 	 * Previous sack - Had frag's then - If we now have frags aka
4545 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4546 	 * some of them. else - The peer revoked all ACKED fragments, since
4547 	 * we had some before and now we have NONE.
4548 	 */
4549 
4550 	if (num_seg) {
4551 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4552 		asoc->saw_sack_with_frags = 1;
4553 	} else if (asoc->saw_sack_with_frags) {
4554 		int cnt_revoked = 0;
4555 
4556 		/* Peer revoked all dg's marked or acked */
4557 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4558 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4559 				tp1->sent = SCTP_DATAGRAM_SENT;
4560 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4561 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4562 					    tp1->whoTo->flight_size,
4563 					    tp1->book_size,
4564 					    (uintptr_t) tp1->whoTo,
4565 					    tp1->rec.data.TSN_seq);
4566 				}
4567 				sctp_flight_size_increase(tp1);
4568 				sctp_total_flight_increase(stcb, tp1);
4569 				tp1->rec.data.chunk_was_revoked = 1;
4570 				/*
4571 				 * To ensure that this increase in
4572 				 * flightsize, which is artificial, does not
4573 				 * throttle the sender, we also increase the
4574 				 * cwnd artificially.
4575 				 */
4576 				tp1->whoTo->cwnd += tp1->book_size;
4577 				cnt_revoked++;
4578 			}
4579 		}
4580 		if (cnt_revoked) {
4581 			reneged_all = 1;
4582 		}
4583 		asoc->saw_sack_with_frags = 0;
4584 	}
4585 	if (num_nr_seg > 0)
4586 		asoc->saw_sack_with_nr_frags = 1;
4587 	else
4588 		asoc->saw_sack_with_nr_frags = 0;
4589 
4590 	/* JRS - Use the congestion control given in the CC module */
4591 	if (ecne_seen == 0) {
4592 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4593 			if (net->net_ack2 > 0) {
4594 				/*
4595 				 * Karn's rule applies to clearing error
4596 				 * count, this is optional.
4597 				 */
4598 				net->error_count = 0;
4599 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4600 					/* addr came good */
4601 					net->dest_state |= SCTP_ADDR_REACHABLE;
4602 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4603 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4604 				}
4605 				if (net == stcb->asoc.primary_destination) {
4606 					if (stcb->asoc.alternate) {
4607 						/*
4608 						 * release the alternate,
4609 						 * primary is good
4610 						 */
4611 						sctp_free_remote_addr(stcb->asoc.alternate);
4612 						stcb->asoc.alternate = NULL;
4613 					}
4614 				}
4615 				if (net->dest_state & SCTP_ADDR_PF) {
4616 					net->dest_state &= ~SCTP_ADDR_PF;
4617 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4618 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4619 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4620 					/* Done with this net */
4621 					net->net_ack = 0;
4622 				}
4623 				/* restore any doubled timers */
4624 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4625 				if (net->RTO < stcb->asoc.minrto) {
4626 					net->RTO = stcb->asoc.minrto;
4627 				}
4628 				if (net->RTO > stcb->asoc.maxrto) {
4629 					net->RTO = stcb->asoc.maxrto;
4630 				}
4631 			}
4632 		}
4633 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4634 	}
4635 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4636 		/* nothing left in-flight */
4637 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 			/* stop all timers */
4639 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4640 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4641 			net->flight_size = 0;
4642 			net->partial_bytes_acked = 0;
4643 		}
4644 		asoc->total_flight = 0;
4645 		asoc->total_flight_count = 0;
4646 	}
4647 	/**********************************/
4648 	/* Now what about shutdown issues */
4649 	/**********************************/
4650 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4651 		/* nothing left on sendqueue.. consider done */
4652 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4653 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4654 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4655 		}
4656 		asoc->peers_rwnd = a_rwnd;
4657 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4658 			/* SWS sender side engages */
4659 			asoc->peers_rwnd = 0;
4660 		}
4661 		/* clean up */
4662 		if ((asoc->stream_queue_cnt == 1) &&
4663 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4664 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4665 		    (asoc->locked_on_sending)
4666 		    ) {
4667 			struct sctp_stream_queue_pending *sp;
4668 
4669 			/*
4670 			 * I may be in a state where we got all across.. but
4671 			 * cannot write more due to a shutdown... we abort
4672 			 * since the user did not indicate EOR in this case.
4673 			 */
4674 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4675 			    sctp_streamhead);
4676 			if ((sp) && (sp->length == 0)) {
4677 				asoc->locked_on_sending = NULL;
4678 				if (sp->msg_is_complete) {
4679 					asoc->stream_queue_cnt--;
4680 				} else {
4681 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4682 					asoc->stream_queue_cnt--;
4683 				}
4684 			}
4685 		}
4686 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4687 		    (asoc->stream_queue_cnt == 0)) {
4688 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4689 				/* Need to abort here */
4690 				struct mbuf *op_err;
4691 
4692 		abort_out_now:
4693 				*abort_now = 1;
4694 				/* XXX */
4695 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4696 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4697 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4698 				return;
4699 			} else {
4700 				struct sctp_nets *netp;
4701 
4702 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4703 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4704 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4705 				}
4706 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4707 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4708 				sctp_stop_timers_for_shutdown(stcb);
4709 				if (asoc->alternate) {
4710 					netp = asoc->alternate;
4711 				} else {
4712 					netp = asoc->primary_destination;
4713 				}
4714 				sctp_send_shutdown(stcb, netp);
4715 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4716 				    stcb->sctp_ep, stcb, netp);
4717 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4718 				    stcb->sctp_ep, stcb, netp);
4719 			}
4720 			return;
4721 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4722 		    (asoc->stream_queue_cnt == 0)) {
4723 			struct sctp_nets *netp;
4724 
4725 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4726 				goto abort_out_now;
4727 			}
4728 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4729 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4730 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4731 			sctp_stop_timers_for_shutdown(stcb);
4732 			if (asoc->alternate) {
4733 				netp = asoc->alternate;
4734 			} else {
4735 				netp = asoc->primary_destination;
4736 			}
4737 			sctp_send_shutdown_ack(stcb, netp);
4738 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4739 			    stcb->sctp_ep, stcb, netp);
4740 			return;
4741 		}
4742 	}
4743 	/*
4744 	 * Now here we are going to recycle net_ack for a different use...
4745 	 * HEADS UP.
4746 	 */
4747 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4748 		net->net_ack = 0;
4749 	}
4750 
4751 	/*
4752 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4753 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4754 	 * automatically ensure that.
4755 	 */
4756 	if ((asoc->sctp_cmt_on_off > 0) &&
4757 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4758 	    (cmt_dac_flag == 0)) {
4759 		this_sack_lowest_newack = cum_ack;
4760 	}
4761 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4762 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4763 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4764 	}
4765 	/* JRS - Use the congestion control given in the CC module */
4766 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4767 
4768 	/* Now are we exiting loss recovery ? */
4769 	if (will_exit_fast_recovery) {
4770 		/* Ok, we must exit fast recovery */
4771 		asoc->fast_retran_loss_recovery = 0;
4772 	}
4773 	if ((asoc->sat_t3_loss_recovery) &&
4774 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4775 		/* end satellite t3 loss recovery */
4776 		asoc->sat_t3_loss_recovery = 0;
4777 	}
4778 	/*
4779 	 * CMT Fast recovery
4780 	 */
4781 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4782 		if (net->will_exit_fast_recovery) {
4783 			/* Ok, we must exit fast recovery */
4784 			net->fast_retran_loss_recovery = 0;
4785 		}
4786 	}
4787 
4788 	/* Adjust and set the new rwnd value */
4789 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4790 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4791 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4792 	}
4793 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4794 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4795 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4796 		/* SWS sender side engages */
4797 		asoc->peers_rwnd = 0;
4798 	}
4799 	if (asoc->peers_rwnd > old_rwnd) {
4800 		win_probe_recovery = 1;
4801 	}
4802 	/*
4803 	 * Now we must setup so we have a timer up for anyone with
4804 	 * outstanding data.
4805 	 */
4806 	done_once = 0;
4807 again:
4808 	j = 0;
4809 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4810 		if (win_probe_recovery && (net->window_probe)) {
4811 			win_probe_recovered = 1;
4812 			/*-
4813 			 * Find first chunk that was used with
4814 			 * window probe and clear the event. Put
4815 			 * it back into the send queue as if has
4816 			 * not been sent.
4817 			 */
4818 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4819 				if (tp1->window_probe) {
4820 					sctp_window_probe_recovery(stcb, asoc, tp1);
4821 					break;
4822 				}
4823 			}
4824 		}
4825 		if (net->flight_size) {
4826 			j++;
4827 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4828 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4829 				    stcb->sctp_ep, stcb, net);
4830 			}
4831 			if (net->window_probe) {
4832 				net->window_probe = 0;
4833 			}
4834 		} else {
4835 			if (net->window_probe) {
4836 				/*
4837 				 * In window probes we must assure a timer
4838 				 * is still running there
4839 				 */
4840 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4841 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4842 					    stcb->sctp_ep, stcb, net);
4843 
4844 				}
4845 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4846 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4847 				    stcb, net,
4848 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4849 			}
4850 		}
4851 	}
4852 	if ((j == 0) &&
4853 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4854 	    (asoc->sent_queue_retran_cnt == 0) &&
4855 	    (win_probe_recovered == 0) &&
4856 	    (done_once == 0)) {
4857 		/*
4858 		 * huh, this should not happen unless all packets are
4859 		 * PR-SCTP and marked to skip of course.
4860 		 */
4861 		if (sctp_fs_audit(asoc)) {
4862 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4863 				net->flight_size = 0;
4864 			}
4865 			asoc->total_flight = 0;
4866 			asoc->total_flight_count = 0;
4867 			asoc->sent_queue_retran_cnt = 0;
4868 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4869 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4870 					sctp_flight_size_increase(tp1);
4871 					sctp_total_flight_increase(stcb, tp1);
4872 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4873 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4874 				}
4875 			}
4876 		}
4877 		done_once = 1;
4878 		goto again;
4879 	}
4880 	/*********************************************/
4881 	/* Here we perform PR-SCTP procedures        */
4882 	/* (section 4.2)                             */
4883 	/*********************************************/
4884 	/* C1. update advancedPeerAckPoint */
4885 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4886 		asoc->advanced_peer_ack_point = cum_ack;
4887 	}
4888 	/* C2. try to further move advancedPeerAckPoint ahead */
4889 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4890 		struct sctp_tmit_chunk *lchk;
4891 		uint32_t old_adv_peer_ack_point;
4892 
4893 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4894 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4895 		/* C3. See if we need to send a Fwd-TSN */
4896 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4897 			/*
4898 			 * ISSUE with ECN, see FWD-TSN processing.
4899 			 */
4900 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4901 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4902 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4903 				    old_adv_peer_ack_point);
4904 			}
4905 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4906 				send_forward_tsn(stcb, asoc);
4907 			} else if (lchk) {
4908 				/* try to FR fwd-tsn's that get lost too */
4909 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4910 					send_forward_tsn(stcb, asoc);
4911 				}
4912 			}
4913 		}
4914 		if (lchk) {
4915 			/* Assure a timer is up */
4916 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4917 			    stcb->sctp_ep, stcb, lchk->whoTo);
4918 		}
4919 	}
4920 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4921 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4922 		    a_rwnd,
4923 		    stcb->asoc.peers_rwnd,
4924 		    stcb->asoc.total_flight,
4925 		    stcb->asoc.total_output_queue_size);
4926 	}
4927 }
4928 
4929 void
4930 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4931 {
4932 	/* Copy cum-ack */
4933 	uint32_t cum_ack, a_rwnd;
4934 
4935 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4936 	/* Arrange so a_rwnd does NOT change */
4937 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4938 
4939 	/* Now call the express sack handling */
4940 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4941 }
4942 
4943 static void
4944 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4945     struct sctp_stream_in *strmin)
4946 {
4947 	struct sctp_queued_to_read *ctl, *nctl;
4948 	struct sctp_association *asoc;
4949 	uint16_t tt;
4950 
4951 	asoc = &stcb->asoc;
4952 	tt = strmin->last_sequence_delivered;
4953 	/*
4954 	 * First deliver anything prior to and including the stream no that
4955 	 * came in
4956 	 */
4957 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4958 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4959 			/* this is deliverable now */
4960 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4961 			/* subtract pending on streams */
4962 			asoc->size_on_all_streams -= ctl->length;
4963 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4964 			/* deliver it to at least the delivery-q */
4965 			if (stcb->sctp_socket) {
4966 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4967 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4968 				    ctl,
4969 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4970 			}
4971 		} else {
4972 			/* no more delivery now. */
4973 			break;
4974 		}
4975 	}
4976 	/*
4977 	 * now we must deliver things in queue the normal way  if any are
4978 	 * now ready.
4979 	 */
4980 	tt = strmin->last_sequence_delivered + 1;
4981 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4982 		if (tt == ctl->sinfo_ssn) {
4983 			/* this is deliverable now */
4984 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4985 			/* subtract pending on streams */
4986 			asoc->size_on_all_streams -= ctl->length;
4987 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4988 			/* deliver it to at least the delivery-q */
4989 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4990 			if (stcb->sctp_socket) {
4991 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4992 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4993 				    ctl,
4994 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4995 
4996 			}
4997 			tt = strmin->last_sequence_delivered + 1;
4998 		} else {
4999 			break;
5000 		}
5001 	}
5002 }
5003 
5004 static void
5005 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5006     struct sctp_association *asoc,
5007     uint16_t stream, uint16_t seq)
5008 {
5009 	struct sctp_tmit_chunk *chk, *nchk;
5010 
5011 	/* For each one on here see if we need to toss it */
5012 	/*
5013 	 * For now large messages held on the reasmqueue that are complete
5014 	 * will be tossed too. We could in theory do more work to spin
5015 	 * through and stop after dumping one msg aka seeing the start of a
5016 	 * new msg at the head, and call the delivery function... to see if
5017 	 * it can be delivered... But for now we just dump everything on the
5018 	 * queue.
5019 	 */
5020 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5021 		/*
5022 		 * Do not toss it if on a different stream or marked for
5023 		 * unordered delivery in which case the stream sequence
5024 		 * number has no meaning.
5025 		 */
5026 		if ((chk->rec.data.stream_number != stream) ||
5027 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5028 			continue;
5029 		}
5030 		if (chk->rec.data.stream_seq == seq) {
5031 			/* It needs to be tossed */
5032 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5033 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5034 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5035 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5036 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5037 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5038 			}
5039 			asoc->size_on_reasm_queue -= chk->send_size;
5040 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5041 
5042 			/* Clear up any stream problem */
5043 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5044 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5045 				/*
5046 				 * We must dump forward this streams
5047 				 * sequence number if the chunk is not
5048 				 * unordered that is being skipped. There is
5049 				 * a chance that if the peer does not
5050 				 * include the last fragment in its FWD-TSN
5051 				 * we WILL have a problem here since you
5052 				 * would have a partial chunk in queue that
5053 				 * may not be deliverable. Also if a Partial
5054 				 * delivery API as started the user may get
5055 				 * a partial chunk. The next read returning
5056 				 * a new chunk... really ugly but I see no
5057 				 * way around it! Maybe a notify??
5058 				 */
5059 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5060 			}
5061 			if (chk->data) {
5062 				sctp_m_freem(chk->data);
5063 				chk->data = NULL;
5064 			}
5065 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5066 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5067 			/*
5068 			 * If the stream_seq is > than the purging one, we
5069 			 * are done
5070 			 */
5071 			break;
5072 		}
5073 	}
5074 }
5075 
5076 
5077 void
5078 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5079     struct sctp_forward_tsn_chunk *fwd,
5080     int *abort_flag, struct mbuf *m, int offset)
5081 {
5082 	/* The pr-sctp fwd tsn */
5083 	/*
5084 	 * here we will perform all the data receiver side steps for
5085 	 * processing FwdTSN, as required in by pr-sctp draft:
5086 	 *
5087 	 * Assume we get FwdTSN(x):
5088 	 *
5089 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5090 	 * others we have 3) examine and update re-ordering queue on
5091 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5092 	 * report where we are.
5093 	 */
5094 	struct sctp_association *asoc;
5095 	uint32_t new_cum_tsn, gap;
5096 	unsigned int i, fwd_sz, m_size;
5097 	uint32_t str_seq;
5098 	struct sctp_stream_in *strm;
5099 	struct sctp_tmit_chunk *chk, *nchk;
5100 	struct sctp_queued_to_read *ctl, *sv;
5101 
5102 	asoc = &stcb->asoc;
5103 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5104 		SCTPDBG(SCTP_DEBUG_INDATA1,
5105 		    "Bad size too small/big fwd-tsn\n");
5106 		return;
5107 	}
5108 	m_size = (stcb->asoc.mapping_array_size << 3);
5109 	/*************************************************************/
5110 	/* 1. Here we update local cumTSN and shift the bitmap array */
5111 	/*************************************************************/
5112 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5113 
5114 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5115 		/* Already got there ... */
5116 		return;
5117 	}
5118 	/*
5119 	 * now we know the new TSN is more advanced, let's find the actual
5120 	 * gap
5121 	 */
5122 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5123 	asoc->cumulative_tsn = new_cum_tsn;
5124 	if (gap >= m_size) {
5125 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5126 			struct mbuf *op_err;
5127 			char msg[SCTP_DIAG_INFO_LEN];
5128 
5129 			/*
5130 			 * out of range (of single byte chunks in the rwnd I
5131 			 * give out). This must be an attacker.
5132 			 */
5133 			*abort_flag = 1;
5134 			snprintf(msg, sizeof(msg),
5135 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5136 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5137 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5138 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5139 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5140 			return;
5141 		}
5142 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5143 
5144 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5145 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5146 		asoc->highest_tsn_inside_map = new_cum_tsn;
5147 
5148 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5149 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5150 
5151 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5152 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5153 		}
5154 	} else {
5155 		SCTP_TCB_LOCK_ASSERT(stcb);
5156 		for (i = 0; i <= gap; i++) {
5157 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5158 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5159 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5160 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5161 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5162 				}
5163 			}
5164 		}
5165 	}
5166 	/*************************************************************/
5167 	/* 2. Clear up re-assembly queue                             */
5168 	/*************************************************************/
5169 	/*
5170 	 * First service it if pd-api is up, just in case we can progress it
5171 	 * forward
5172 	 */
5173 	if (asoc->fragmented_delivery_inprogress) {
5174 		sctp_service_reassembly(stcb, asoc);
5175 	}
5176 	/* For each one on here see if we need to toss it */
5177 	/*
5178 	 * For now large messages held on the reasmqueue that are complete
5179 	 * will be tossed too. We could in theory do more work to spin
5180 	 * through and stop after dumping one msg aka seeing the start of a
5181 	 * new msg at the head, and call the delivery function... to see if
5182 	 * it can be delivered... But for now we just dump everything on the
5183 	 * queue.
5184 	 */
5185 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5186 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5187 			/* It needs to be tossed */
5188 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5189 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5190 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5191 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5192 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5193 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5194 			}
5195 			asoc->size_on_reasm_queue -= chk->send_size;
5196 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5197 
5198 			/* Clear up any stream problem */
5199 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5200 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5201 				/*
5202 				 * We must dump forward this streams
5203 				 * sequence number if the chunk is not
5204 				 * unordered that is being skipped. There is
5205 				 * a chance that if the peer does not
5206 				 * include the last fragment in its FWD-TSN
5207 				 * we WILL have a problem here since you
5208 				 * would have a partial chunk in queue that
5209 				 * may not be deliverable. Also if a Partial
5210 				 * delivery API as started the user may get
5211 				 * a partial chunk. The next read returning
5212 				 * a new chunk... really ugly but I see no
5213 				 * way around it! Maybe a notify??
5214 				 */
5215 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5216 			}
5217 			if (chk->data) {
5218 				sctp_m_freem(chk->data);
5219 				chk->data = NULL;
5220 			}
5221 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5222 		} else {
5223 			/*
5224 			 * Ok we have gone beyond the end of the fwd-tsn's
5225 			 * mark.
5226 			 */
5227 			break;
5228 		}
5229 	}
5230 	/*******************************************************/
5231 	/* 3. Update the PR-stream re-ordering queues and fix  */
5232 	/* delivery issues as needed.                       */
5233 	/*******************************************************/
5234 	fwd_sz -= sizeof(*fwd);
5235 	if (m && fwd_sz) {
5236 		/* New method. */
5237 		unsigned int num_str;
5238 		struct sctp_strseq *stseq, strseqbuf;
5239 
5240 		offset += sizeof(*fwd);
5241 
5242 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5243 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5244 		for (i = 0; i < num_str; i++) {
5245 			uint16_t st;
5246 
5247 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5248 			    sizeof(struct sctp_strseq),
5249 			    (uint8_t *) & strseqbuf);
5250 			offset += sizeof(struct sctp_strseq);
5251 			if (stseq == NULL) {
5252 				break;
5253 			}
5254 			/* Convert */
5255 			st = ntohs(stseq->stream);
5256 			stseq->stream = st;
5257 			st = ntohs(stseq->sequence);
5258 			stseq->sequence = st;
5259 
5260 			/* now process */
5261 
5262 			/*
5263 			 * Ok we now look for the stream/seq on the read
5264 			 * queue where its not all delivered. If we find it
5265 			 * we transmute the read entry into a PDI_ABORTED.
5266 			 */
5267 			if (stseq->stream >= asoc->streamincnt) {
5268 				/* screwed up streams, stop!  */
5269 				break;
5270 			}
5271 			if ((asoc->str_of_pdapi == stseq->stream) &&
5272 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5273 				/*
5274 				 * If this is the one we were partially
5275 				 * delivering now then we no longer are.
5276 				 * Note this will change with the reassembly
5277 				 * re-write.
5278 				 */
5279 				asoc->fragmented_delivery_inprogress = 0;
5280 			}
5281 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5282 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5283 				if ((ctl->sinfo_stream == stseq->stream) &&
5284 				    (ctl->sinfo_ssn == stseq->sequence)) {
5285 					str_seq = (stseq->stream << 16) | stseq->sequence;
5286 					ctl->end_added = 1;
5287 					ctl->pdapi_aborted = 1;
5288 					sv = stcb->asoc.control_pdapi;
5289 					stcb->asoc.control_pdapi = ctl;
5290 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5291 					    stcb,
5292 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5293 					    (void *)&str_seq,
5294 					    SCTP_SO_NOT_LOCKED);
5295 					stcb->asoc.control_pdapi = sv;
5296 					break;
5297 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5298 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5299 					/* We are past our victim SSN */
5300 					break;
5301 				}
5302 			}
5303 			strm = &asoc->strmin[stseq->stream];
5304 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5305 				/* Update the sequence number */
5306 				strm->last_sequence_delivered = stseq->sequence;
5307 			}
5308 			/* now kick the stream the new way */
5309 			/* sa_ignore NO_NULL_CHK */
5310 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5311 		}
5312 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5313 	}
5314 	/*
5315 	 * Now slide thing forward.
5316 	 */
5317 	sctp_slide_mapping_arrays(stcb);
5318 
5319 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5320 		/* now lets kick out and check for more fragmented delivery */
5321 		/* sa_ignore NO_NULL_CHK */
5322 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5323 	}
5324 }
5325