xref: /freebsd/sys/netinet/sctp_indata.c (revision 10b59a9b4add0320d52c15ce057dd697261e7dfc)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 
50 
51 /*
52  * NOTES: On the outbound side of things I need to check the sack timer to
53  * see if I should generate a sack into the chunk queue (if I have data to
54  * send that is and will be sending it .. for bundling.
55  *
56  * The callback in sctp_usrreq.c will get called when the socket is read from.
57  * This will cause sctp_service_queues() to get called on the top entry in
58  * the list.
59  */
60 
61 void
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66 
67 /* Calculate what the rwnd would be */
68 uint32_t
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	uint32_t calc = 0;
72 
73 	/*
74 	 * This is really set wrong with respect to a 1-2-m socket. Since
75 	 * the sb_cc is the count that everyone as put up. When we re-write
76 	 * sctp_soreceive then we will fix this so that ONLY this
77 	 * associations data is taken into account.
78 	 */
79 	if (stcb->sctp_socket == NULL)
80 		return (calc);
81 
82 	if (stcb->asoc.sb_cc == 0 &&
83 	    asoc->size_on_reasm_queue == 0 &&
84 	    asoc->size_on_all_streams == 0) {
85 		/* Full rwnd granted */
86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 		return (calc);
88 	}
89 	/* get actual space */
90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 
92 	/*
93 	 * take out what has NOT been put on socket queue and we yet hold
94 	 * for putting up.
95 	 */
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97 	    asoc->cnt_on_reasm_queue * MSIZE));
98 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99 	    asoc->cnt_on_all_streams * MSIZE));
100 
101 	if (calc == 0) {
102 		/* out of space */
103 		return (calc);
104 	}
105 	/* what is the overhead of all these rwnd's */
106 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107 	/*
108 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109 	 * even it is 0. SWS engaged
110 	 */
111 	if (calc < stcb->asoc.my_rwnd_control_len) {
112 		calc = 1;
113 	}
114 	return (calc);
115 }
116 
117 
118 
119 /*
120  * Build out our readq entry based on the incoming packet.
121  */
122 struct sctp_queued_to_read *
123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124     struct sctp_nets *net,
125     uint32_t tsn, uint32_t ppid,
126     uint32_t context, uint16_t stream_no,
127     uint16_t stream_seq, uint8_t flags,
128     struct mbuf *dm)
129 {
130 	struct sctp_queued_to_read *read_queue_e = NULL;
131 
132 	sctp_alloc_a_readq(stcb, read_queue_e);
133 	if (read_queue_e == NULL) {
134 		goto failed_build;
135 	}
136 	read_queue_e->sinfo_stream = stream_no;
137 	read_queue_e->sinfo_ssn = stream_seq;
138 	read_queue_e->sinfo_flags = (flags << 8);
139 	read_queue_e->sinfo_ppid = ppid;
140 	read_queue_e->sinfo_context = stcb->asoc.context;
141 	read_queue_e->sinfo_timetolive = 0;
142 	read_queue_e->sinfo_tsn = tsn;
143 	read_queue_e->sinfo_cumtsn = tsn;
144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 	read_queue_e->whoFrom = net;
146 	read_queue_e->length = 0;
147 	atomic_add_int(&net->ref_count, 1);
148 	read_queue_e->data = dm;
149 	read_queue_e->spec_flags = 0;
150 	read_queue_e->tail_mbuf = NULL;
151 	read_queue_e->aux_data = NULL;
152 	read_queue_e->stcb = stcb;
153 	read_queue_e->port_from = stcb->rport;
154 	read_queue_e->do_not_ref_stcb = 0;
155 	read_queue_e->end_added = 0;
156 	read_queue_e->some_taken = 0;
157 	read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 	return (read_queue_e);
160 }
161 
162 
163 /*
164  * Build out our readq entry based on the incoming packet.
165  */
166 static struct sctp_queued_to_read *
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168     struct sctp_tmit_chunk *chk)
169 {
170 	struct sctp_queued_to_read *read_queue_e = NULL;
171 
172 	sctp_alloc_a_readq(stcb, read_queue_e);
173 	if (read_queue_e == NULL) {
174 		goto failed_build;
175 	}
176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 	read_queue_e->sinfo_context = stcb->asoc.context;
181 	read_queue_e->sinfo_timetolive = 0;
182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 	read_queue_e->whoFrom = chk->whoTo;
186 	read_queue_e->aux_data = NULL;
187 	read_queue_e->length = 0;
188 	atomic_add_int(&chk->whoTo->ref_count, 1);
189 	read_queue_e->data = chk->data;
190 	read_queue_e->tail_mbuf = NULL;
191 	read_queue_e->stcb = stcb;
192 	read_queue_e->port_from = stcb->rport;
193 	read_queue_e->spec_flags = 0;
194 	read_queue_e->do_not_ref_stcb = 0;
195 	read_queue_e->end_added = 0;
196 	read_queue_e->some_taken = 0;
197 	read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 	return (read_queue_e);
200 }
201 
202 
203 struct mbuf *
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205 {
206 	struct sctp_extrcvinfo *seinfo;
207 	struct sctp_sndrcvinfo *outinfo;
208 	struct sctp_rcvinfo *rcvinfo;
209 	struct sctp_nxtinfo *nxtinfo;
210 	struct cmsghdr *cmh;
211 	struct mbuf *ret;
212 	int len;
213 	int use_extended;
214 	int provide_nxt;
215 
216 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
217 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
218 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
219 		/* user does not want any ancillary data */
220 		return (NULL);
221 	}
222 	len = 0;
223 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
224 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
225 	}
226 	seinfo = (struct sctp_extrcvinfo *)sinfo;
227 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
228 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
229 		provide_nxt = 1;
230 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
231 	} else {
232 		provide_nxt = 0;
233 	}
234 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
235 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
236 			use_extended = 1;
237 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 		} else {
239 			use_extended = 0;
240 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
241 		}
242 	} else {
243 		use_extended = 0;
244 	}
245 
246 	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
247 	if (ret == NULL) {
248 		/* No space */
249 		return (ret);
250 	}
251 	SCTP_BUF_LEN(ret) = 0;
252 
253 	/* We need a CMSG header followed by the struct */
254 	cmh = mtod(ret, struct cmsghdr *);
255 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
256 		cmh->cmsg_level = IPPROTO_SCTP;
257 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
258 		cmh->cmsg_type = SCTP_RCVINFO;
259 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
260 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
261 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
262 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
263 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
264 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
265 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
266 		rcvinfo->rcv_context = sinfo->sinfo_context;
267 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
268 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
269 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 	}
271 	if (provide_nxt) {
272 		cmh->cmsg_level = IPPROTO_SCTP;
273 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
274 		cmh->cmsg_type = SCTP_NXTINFO;
275 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
276 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
277 		nxtinfo->nxt_flags = 0;
278 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
279 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
280 		}
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
282 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
285 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
286 		}
287 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
288 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
289 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
290 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
291 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
292 	}
293 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
294 		cmh->cmsg_level = IPPROTO_SCTP;
295 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
296 		if (use_extended) {
297 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
298 			cmh->cmsg_type = SCTP_EXTRCV;
299 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
300 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
301 		} else {
302 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
303 			cmh->cmsg_type = SCTP_SNDRCV;
304 			*outinfo = *sinfo;
305 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
306 		}
307 	}
308 	return (ret);
309 }
310 
311 
312 static void
313 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
314 {
315 	uint32_t gap, i, cumackp1;
316 	int fnd = 0;
317 
318 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 		return;
320 	}
321 	cumackp1 = asoc->cumulative_tsn + 1;
322 	if (SCTP_TSN_GT(cumackp1, tsn)) {
323 		/*
324 		 * this tsn is behind the cum ack and thus we don't need to
325 		 * worry about it being moved from one to the other.
326 		 */
327 		return;
328 	}
329 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
330 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
331 		printf("gap:%x tsn:%x\n", gap, tsn);
332 		sctp_print_mapping_array(asoc);
333 #ifdef INVARIANTS
334 		panic("Things are really messed up now!!");
335 #endif
336 	}
337 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
338 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
339 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
340 		asoc->highest_tsn_inside_nr_map = tsn;
341 	}
342 	if (tsn == asoc->highest_tsn_inside_map) {
343 		/* We must back down to see what the new highest is */
344 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
345 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
346 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
347 				asoc->highest_tsn_inside_map = i;
348 				fnd = 1;
349 				break;
350 			}
351 		}
352 		if (!fnd) {
353 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
354 		}
355 	}
356 }
357 
358 
359 /*
360  * We are delivering currently from the reassembly queue. We must continue to
361  * deliver until we either: 1) run out of space. 2) run out of sequential
362  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363  */
364 static void
365 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
366 {
367 	struct sctp_tmit_chunk *chk, *nchk;
368 	uint16_t nxt_todel;
369 	uint16_t stream_no;
370 	int end = 0;
371 	int cntDel;
372 	struct sctp_queued_to_read *control, *ctl, *nctl;
373 
374 	if (stcb == NULL)
375 		return;
376 
377 	cntDel = stream_no = 0;
378 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
379 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
380 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
381 		/* socket above is long gone or going.. */
382 abandon:
383 		asoc->fragmented_delivery_inprogress = 0;
384 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
385 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
386 			asoc->size_on_reasm_queue -= chk->send_size;
387 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
388 			/*
389 			 * Lose the data pointer, since its in the socket
390 			 * buffer
391 			 */
392 			if (chk->data) {
393 				sctp_m_freem(chk->data);
394 				chk->data = NULL;
395 			}
396 			/* Now free the address and data */
397 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
398 			/* sa_ignore FREED_MEMORY */
399 		}
400 		return;
401 	}
402 	SCTP_TCB_LOCK_ASSERT(stcb);
403 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
404 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
405 			/* Can't deliver more :< */
406 			return;
407 		}
408 		stream_no = chk->rec.data.stream_number;
409 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
410 		if (nxt_todel != chk->rec.data.stream_seq &&
411 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
412 			/*
413 			 * Not the next sequence to deliver in its stream OR
414 			 * unordered
415 			 */
416 			return;
417 		}
418 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
419 
420 			control = sctp_build_readq_entry_chk(stcb, chk);
421 			if (control == NULL) {
422 				/* out of memory? */
423 				return;
424 			}
425 			/* save it off for our future deliveries */
426 			stcb->asoc.control_pdapi = control;
427 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
428 				end = 1;
429 			else
430 				end = 0;
431 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
432 			sctp_add_to_readq(stcb->sctp_ep,
433 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
434 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 			cntDel++;
436 		} else {
437 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
438 				end = 1;
439 			else
440 				end = 0;
441 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
442 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
443 			    stcb->asoc.control_pdapi,
444 			    chk->data, end, chk->rec.data.TSN_seq,
445 			    &stcb->sctp_socket->so_rcv)) {
446 				/*
447 				 * something is very wrong, either
448 				 * control_pdapi is NULL, or the tail_mbuf
449 				 * is corrupt, or there is a EOM already on
450 				 * the mbuf chain.
451 				 */
452 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
453 					goto abandon;
454 				} else {
455 #ifdef INVARIANTS
456 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
457 						panic("This should not happen control_pdapi NULL?");
458 					}
459 					/* if we did not panic, it was a EOM */
460 					panic("Bad chunking ??");
461 #else
462 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
463 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
464 					}
465 					SCTP_PRINTF("Bad chunking ??\n");
466 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
467 
468 #endif
469 					goto abandon;
470 				}
471 			}
472 			cntDel++;
473 		}
474 		/* pull it we did it */
475 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
476 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
477 			asoc->fragmented_delivery_inprogress = 0;
478 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
479 				asoc->strmin[stream_no].last_sequence_delivered++;
480 			}
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
482 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
483 			}
484 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
485 			/*
486 			 * turn the flag back on since we just  delivered
487 			 * yet another one.
488 			 */
489 			asoc->fragmented_delivery_inprogress = 1;
490 		}
491 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
492 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
493 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
494 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
495 
496 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
497 		asoc->size_on_reasm_queue -= chk->send_size;
498 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
499 		/* free up the chk */
500 		chk->data = NULL;
501 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
502 
503 		if (asoc->fragmented_delivery_inprogress == 0) {
504 			/*
505 			 * Now lets see if we can deliver the next one on
506 			 * the stream
507 			 */
508 			struct sctp_stream_in *strm;
509 
510 			strm = &asoc->strmin[stream_no];
511 			nxt_todel = strm->last_sequence_delivered + 1;
512 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
513 				/* Deliver more if we can. */
514 				if (nxt_todel == ctl->sinfo_ssn) {
515 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
516 					asoc->size_on_all_streams -= ctl->length;
517 					sctp_ucount_decr(asoc->cnt_on_all_streams);
518 					strm->last_sequence_delivered++;
519 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
520 					sctp_add_to_readq(stcb->sctp_ep, stcb,
521 					    ctl,
522 					    &stcb->sctp_socket->so_rcv, 1,
523 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
524 				} else {
525 					break;
526 				}
527 				nxt_todel = strm->last_sequence_delivered + 1;
528 			}
529 			break;
530 		}
531 	}
532 }
533 
534 /*
535  * Queue the chunk either right into the socket buffer if it is the next one
536  * to go OR put it in the correct place in the delivery queue.  If we do
537  * append to the so_buf, keep doing so until we are out of order. One big
538  * question still remains, what to do when the socket buffer is FULL??
539  */
540 static void
541 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
542     struct sctp_queued_to_read *control, int *abort_flag)
543 {
544 	/*
545 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
546 	 * all the data in one stream this could happen quite rapidly. One
547 	 * could use the TSN to keep track of things, but this scheme breaks
548 	 * down in the other type of stream useage that could occur. Send a
549 	 * single msg to stream 0, send 4Billion messages to stream 1, now
550 	 * send a message to stream 0. You have a situation where the TSN
551 	 * has wrapped but not in the stream. Is this worth worrying about
552 	 * or should we just change our queue sort at the bottom to be by
553 	 * TSN.
554 	 *
555 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
556 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
557 	 * assignment this could happen... and I don't see how this would be
558 	 * a violation. So for now I am undecided an will leave the sort by
559 	 * SSN alone. Maybe a hybred approach is the answer
560 	 *
561 	 */
562 	struct sctp_stream_in *strm;
563 	struct sctp_queued_to_read *at;
564 	int queue_needed;
565 	uint16_t nxt_todel;
566 	struct mbuf *oper;
567 
568 	queue_needed = 1;
569 	asoc->size_on_all_streams += control->length;
570 	sctp_ucount_incr(asoc->cnt_on_all_streams);
571 	strm = &asoc->strmin[control->sinfo_stream];
572 	nxt_todel = strm->last_sequence_delivered + 1;
573 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
574 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 	}
576 	SCTPDBG(SCTP_DEBUG_INDATA1,
577 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
578 	    (uint32_t) control->sinfo_stream,
579 	    (uint32_t) strm->last_sequence_delivered,
580 	    (uint32_t) nxt_todel);
581 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
582 		/* The incoming sseq is behind where we last delivered? */
583 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
584 		    control->sinfo_ssn, strm->last_sequence_delivered);
585 protocol_error:
586 		/*
587 		 * throw it in the stream so it gets cleaned up in
588 		 * association destruction
589 		 */
590 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
591 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
592 		    0, M_DONTWAIT, 1, MT_DATA);
593 		if (oper) {
594 			struct sctp_paramhdr *ph;
595 			uint32_t *ippp;
596 
597 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
598 			    (sizeof(uint32_t) * 3);
599 			ph = mtod(oper, struct sctp_paramhdr *);
600 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
601 			ph->param_length = htons(SCTP_BUF_LEN(oper));
602 			ippp = (uint32_t *) (ph + 1);
603 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
604 			ippp++;
605 			*ippp = control->sinfo_tsn;
606 			ippp++;
607 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
608 		}
609 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
610 		sctp_abort_an_association(stcb->sctp_ep, stcb,
611 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
612 
613 		*abort_flag = 1;
614 		return;
615 
616 	}
617 	if (nxt_todel == control->sinfo_ssn) {
618 		/* can be delivered right away? */
619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
620 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
621 		}
622 		/* EY it wont be queued if it could be delivered directly */
623 		queue_needed = 0;
624 		asoc->size_on_all_streams -= control->length;
625 		sctp_ucount_decr(asoc->cnt_on_all_streams);
626 		strm->last_sequence_delivered++;
627 
628 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
629 		sctp_add_to_readq(stcb->sctp_ep, stcb,
630 		    control,
631 		    &stcb->sctp_socket->so_rcv, 1,
632 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
633 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
634 			/* all delivered */
635 			nxt_todel = strm->last_sequence_delivered + 1;
636 			if (nxt_todel == control->sinfo_ssn) {
637 				TAILQ_REMOVE(&strm->inqueue, control, next);
638 				asoc->size_on_all_streams -= control->length;
639 				sctp_ucount_decr(asoc->cnt_on_all_streams);
640 				strm->last_sequence_delivered++;
641 				/*
642 				 * We ignore the return of deliver_data here
643 				 * since we always can hold the chunk on the
644 				 * d-queue. And we have a finite number that
645 				 * can be delivered from the strq.
646 				 */
647 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
648 					sctp_log_strm_del(control, NULL,
649 					    SCTP_STR_LOG_FROM_IMMED_DEL);
650 				}
651 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
652 				sctp_add_to_readq(stcb->sctp_ep, stcb,
653 				    control,
654 				    &stcb->sctp_socket->so_rcv, 1,
655 				    SCTP_READ_LOCK_NOT_HELD,
656 				    SCTP_SO_NOT_LOCKED);
657 				continue;
658 			}
659 			break;
660 		}
661 	}
662 	if (queue_needed) {
663 		/*
664 		 * Ok, we did not deliver this guy, find the correct place
665 		 * to put it on the queue.
666 		 */
667 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
668 			goto protocol_error;
669 		}
670 		if (TAILQ_EMPTY(&strm->inqueue)) {
671 			/* Empty queue */
672 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
673 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
674 			}
675 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
676 		} else {
677 			TAILQ_FOREACH(at, &strm->inqueue, next) {
678 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
679 					/*
680 					 * one in queue is bigger than the
681 					 * new one, insert before this one
682 					 */
683 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 						sctp_log_strm_del(control, at,
685 						    SCTP_STR_LOG_FROM_INSERT_MD);
686 					}
687 					TAILQ_INSERT_BEFORE(at, control, next);
688 					break;
689 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690 					/*
691 					 * Gak, He sent me a duplicate str
692 					 * seq number
693 					 */
694 					/*
695 					 * foo bar, I guess I will just free
696 					 * this new guy, should we abort
697 					 * too? FIX ME MAYBE? Or it COULD be
698 					 * that the SSN's have wrapped.
699 					 * Maybe I should compare to TSN
700 					 * somehow... sigh for now just blow
701 					 * away the chunk!
702 					 */
703 
704 					if (control->data)
705 						sctp_m_freem(control->data);
706 					control->data = NULL;
707 					asoc->size_on_all_streams -= control->length;
708 					sctp_ucount_decr(asoc->cnt_on_all_streams);
709 					if (control->whoFrom) {
710 						sctp_free_remote_addr(control->whoFrom);
711 						control->whoFrom = NULL;
712 					}
713 					sctp_free_a_readq(stcb, control);
714 					return;
715 				} else {
716 					if (TAILQ_NEXT(at, next) == NULL) {
717 						/*
718 						 * We are at the end, insert
719 						 * it after this one
720 						 */
721 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
722 							sctp_log_strm_del(control, at,
723 							    SCTP_STR_LOG_FROM_INSERT_TL);
724 						}
725 						TAILQ_INSERT_AFTER(&strm->inqueue,
726 						    at, control, next);
727 						break;
728 					}
729 				}
730 			}
731 		}
732 	}
733 }
734 
735 /*
736  * Returns two things: You get the total size of the deliverable parts of the
737  * first fragmented message on the reassembly queue. And you get a 1 back if
738  * all of the message is ready or a 0 back if the message is still incomplete
739  */
740 static int
741 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
742 {
743 	struct sctp_tmit_chunk *chk;
744 	uint32_t tsn;
745 
746 	*t_size = 0;
747 	chk = TAILQ_FIRST(&asoc->reasmqueue);
748 	if (chk == NULL) {
749 		/* nothing on the queue */
750 		return (0);
751 	}
752 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
753 		/* Not a first on the queue */
754 		return (0);
755 	}
756 	tsn = chk->rec.data.TSN_seq;
757 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
758 		if (tsn != chk->rec.data.TSN_seq) {
759 			return (0);
760 		}
761 		*t_size += chk->send_size;
762 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
763 			return (1);
764 		}
765 		tsn++;
766 	}
767 	return (0);
768 }
769 
770 static void
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	struct sctp_tmit_chunk *chk;
774 	uint16_t nxt_todel;
775 	uint32_t tsize, pd_point;
776 
777 doit_again:
778 	chk = TAILQ_FIRST(&asoc->reasmqueue);
779 	if (chk == NULL) {
780 		/* Huh? */
781 		asoc->size_on_reasm_queue = 0;
782 		asoc->cnt_on_reasm_queue = 0;
783 		return;
784 	}
785 	if (asoc->fragmented_delivery_inprogress == 0) {
786 		nxt_todel =
787 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 		    (nxt_todel == chk->rec.data.stream_seq ||
790 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791 			/*
792 			 * Yep the first one is here and its ok to deliver
793 			 * but should we?
794 			 */
795 			if (stcb->sctp_socket) {
796 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 				    stcb->sctp_ep->partial_delivery_point);
798 			} else {
799 				pd_point = stcb->sctp_ep->partial_delivery_point;
800 			}
801 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802 
803 				/*
804 				 * Yes, we setup to start reception, by
805 				 * backing down the TSN just in case we
806 				 * can't deliver. If we
807 				 */
808 				asoc->fragmented_delivery_inprogress = 1;
809 				asoc->tsn_last_delivered =
810 				    chk->rec.data.TSN_seq - 1;
811 				asoc->str_of_pdapi =
812 				    chk->rec.data.stream_number;
813 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 				asoc->fragment_flags = chk->rec.data.rcv_flags;
816 				sctp_service_reassembly(stcb, asoc);
817 			}
818 		}
819 	} else {
820 		/*
821 		 * Service re-assembly will deliver stream data queued at
822 		 * the end of fragmented delivery.. but it wont know to go
823 		 * back and call itself again... we do that here with the
824 		 * got doit_again
825 		 */
826 		sctp_service_reassembly(stcb, asoc);
827 		if (asoc->fragmented_delivery_inprogress == 0) {
828 			/*
829 			 * finished our Fragmented delivery, could be more
830 			 * waiting?
831 			 */
832 			goto doit_again;
833 		}
834 	}
835 }
836 
837 /*
838  * Dump onto the re-assembly queue, in its proper place. After dumping on the
839  * queue, see if anthing can be delivered. If so pull it off (or as much as
840  * we can. If we run out of space then we must dump what we can and set the
841  * appropriate flag to say we queued what we could.
842  */
843 static void
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845     struct sctp_tmit_chunk *chk, int *abort_flag)
846 {
847 	struct mbuf *oper;
848 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
849 	u_char last_flags;
850 	struct sctp_tmit_chunk *at, *prev, *next;
851 
852 	prev = next = NULL;
853 	cum_ackp1 = asoc->tsn_last_delivered + 1;
854 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 		/* This is the first one on the queue */
856 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
857 		/*
858 		 * we do not check for delivery of anything when only one
859 		 * fragment is here
860 		 */
861 		asoc->size_on_reasm_queue = chk->send_size;
862 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 		if (chk->rec.data.TSN_seq == cum_ackp1) {
864 			if (asoc->fragmented_delivery_inprogress == 0 &&
865 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 			    SCTP_DATA_FIRST_FRAG) {
867 				/*
868 				 * An empty queue, no delivery inprogress,
869 				 * we hit the next one and it does NOT have
870 				 * a FIRST fragment mark.
871 				 */
872 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 				    0, M_DONTWAIT, 1, MT_DATA);
875 
876 				if (oper) {
877 					struct sctp_paramhdr *ph;
878 					uint32_t *ippp;
879 
880 					SCTP_BUF_LEN(oper) =
881 					    sizeof(struct sctp_paramhdr) +
882 					    (sizeof(uint32_t) * 3);
883 					ph = mtod(oper, struct sctp_paramhdr *);
884 					ph->param_type =
885 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 					ph->param_length = htons(SCTP_BUF_LEN(oper));
887 					ippp = (uint32_t *) (ph + 1);
888 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
889 					ippp++;
890 					*ippp = chk->rec.data.TSN_seq;
891 					ippp++;
892 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
893 
894 				}
895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 				sctp_abort_an_association(stcb->sctp_ep, stcb,
897 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
898 				*abort_flag = 1;
899 			} else if (asoc->fragmented_delivery_inprogress &&
900 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
901 				/*
902 				 * We are doing a partial delivery and the
903 				 * NEXT chunk MUST be either the LAST or
904 				 * MIDDLE fragment NOT a FIRST
905 				 */
906 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
907 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
908 				    0, M_DONTWAIT, 1, MT_DATA);
909 				if (oper) {
910 					struct sctp_paramhdr *ph;
911 					uint32_t *ippp;
912 
913 					SCTP_BUF_LEN(oper) =
914 					    sizeof(struct sctp_paramhdr) +
915 					    (3 * sizeof(uint32_t));
916 					ph = mtod(oper, struct sctp_paramhdr *);
917 					ph->param_type =
918 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
919 					ph->param_length = htons(SCTP_BUF_LEN(oper));
920 					ippp = (uint32_t *) (ph + 1);
921 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
922 					ippp++;
923 					*ippp = chk->rec.data.TSN_seq;
924 					ippp++;
925 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
926 				}
927 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
928 				sctp_abort_an_association(stcb->sctp_ep, stcb,
929 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
930 				*abort_flag = 1;
931 			} else if (asoc->fragmented_delivery_inprogress) {
932 				/*
933 				 * Here we are ok with a MIDDLE or LAST
934 				 * piece
935 				 */
936 				if (chk->rec.data.stream_number !=
937 				    asoc->str_of_pdapi) {
938 					/* Got to be the right STR No */
939 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
940 					    chk->rec.data.stream_number,
941 					    asoc->str_of_pdapi);
942 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
943 					    0, M_DONTWAIT, 1, MT_DATA);
944 					if (oper) {
945 						struct sctp_paramhdr *ph;
946 						uint32_t *ippp;
947 
948 						SCTP_BUF_LEN(oper) =
949 						    sizeof(struct sctp_paramhdr) +
950 						    (sizeof(uint32_t) * 3);
951 						ph = mtod(oper,
952 						    struct sctp_paramhdr *);
953 						ph->param_type =
954 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
955 						ph->param_length =
956 						    htons(SCTP_BUF_LEN(oper));
957 						ippp = (uint32_t *) (ph + 1);
958 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
959 						ippp++;
960 						*ippp = chk->rec.data.TSN_seq;
961 						ippp++;
962 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
963 					}
964 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
965 					sctp_abort_an_association(stcb->sctp_ep,
966 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
967 					*abort_flag = 1;
968 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
969 					    SCTP_DATA_UNORDERED &&
970 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
971 					/* Got to be the right STR Seq */
972 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
973 					    chk->rec.data.stream_seq,
974 					    asoc->ssn_of_pdapi);
975 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
976 					    0, M_DONTWAIT, 1, MT_DATA);
977 					if (oper) {
978 						struct sctp_paramhdr *ph;
979 						uint32_t *ippp;
980 
981 						SCTP_BUF_LEN(oper) =
982 						    sizeof(struct sctp_paramhdr) +
983 						    (3 * sizeof(uint32_t));
984 						ph = mtod(oper,
985 						    struct sctp_paramhdr *);
986 						ph->param_type =
987 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
988 						ph->param_length =
989 						    htons(SCTP_BUF_LEN(oper));
990 						ippp = (uint32_t *) (ph + 1);
991 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
992 						ippp++;
993 						*ippp = chk->rec.data.TSN_seq;
994 						ippp++;
995 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
996 
997 					}
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
999 					sctp_abort_an_association(stcb->sctp_ep,
1000 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1001 					*abort_flag = 1;
1002 				}
1003 			}
1004 		}
1005 		return;
1006 	}
1007 	/* Find its place */
1008 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1009 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1010 			/*
1011 			 * one in queue is bigger than the new one, insert
1012 			 * before this one
1013 			 */
1014 			/* A check */
1015 			asoc->size_on_reasm_queue += chk->send_size;
1016 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1017 			next = at;
1018 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1019 			break;
1020 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1021 			/* Gak, He sent me a duplicate str seq number */
1022 			/*
1023 			 * foo bar, I guess I will just free this new guy,
1024 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1025 			 * that the SSN's have wrapped. Maybe I should
1026 			 * compare to TSN somehow... sigh for now just blow
1027 			 * away the chunk!
1028 			 */
1029 			if (chk->data) {
1030 				sctp_m_freem(chk->data);
1031 				chk->data = NULL;
1032 			}
1033 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1034 			return;
1035 		} else {
1036 			last_flags = at->rec.data.rcv_flags;
1037 			last_tsn = at->rec.data.TSN_seq;
1038 			prev = at;
1039 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1040 				/*
1041 				 * We are at the end, insert it after this
1042 				 * one
1043 				 */
1044 				/* check it first */
1045 				asoc->size_on_reasm_queue += chk->send_size;
1046 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1047 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1048 				break;
1049 			}
1050 		}
1051 	}
1052 	/* Now the audits */
1053 	if (prev) {
1054 		prev_tsn = chk->rec.data.TSN_seq - 1;
1055 		if (prev_tsn == prev->rec.data.TSN_seq) {
1056 			/*
1057 			 * Ok the one I am dropping onto the end is the
1058 			 * NEXT. A bit of valdiation here.
1059 			 */
1060 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1061 			    SCTP_DATA_FIRST_FRAG ||
1062 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1063 			    SCTP_DATA_MIDDLE_FRAG) {
1064 				/*
1065 				 * Insert chk MUST be a MIDDLE or LAST
1066 				 * fragment
1067 				 */
1068 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1069 				    SCTP_DATA_FIRST_FRAG) {
1070 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1071 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1072 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1073 					    0, M_DONTWAIT, 1, MT_DATA);
1074 					if (oper) {
1075 						struct sctp_paramhdr *ph;
1076 						uint32_t *ippp;
1077 
1078 						SCTP_BUF_LEN(oper) =
1079 						    sizeof(struct sctp_paramhdr) +
1080 						    (3 * sizeof(uint32_t));
1081 						ph = mtod(oper,
1082 						    struct sctp_paramhdr *);
1083 						ph->param_type =
1084 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1085 						ph->param_length =
1086 						    htons(SCTP_BUF_LEN(oper));
1087 						ippp = (uint32_t *) (ph + 1);
1088 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1089 						ippp++;
1090 						*ippp = chk->rec.data.TSN_seq;
1091 						ippp++;
1092 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1093 
1094 					}
1095 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1096 					sctp_abort_an_association(stcb->sctp_ep,
1097 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1098 					*abort_flag = 1;
1099 					return;
1100 				}
1101 				if (chk->rec.data.stream_number !=
1102 				    prev->rec.data.stream_number) {
1103 					/*
1104 					 * Huh, need the correct STR here,
1105 					 * they must be the same.
1106 					 */
1107 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1108 					    chk->rec.data.stream_number,
1109 					    prev->rec.data.stream_number);
1110 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1111 					    0, M_DONTWAIT, 1, MT_DATA);
1112 					if (oper) {
1113 						struct sctp_paramhdr *ph;
1114 						uint32_t *ippp;
1115 
1116 						SCTP_BUF_LEN(oper) =
1117 						    sizeof(struct sctp_paramhdr) +
1118 						    (3 * sizeof(uint32_t));
1119 						ph = mtod(oper,
1120 						    struct sctp_paramhdr *);
1121 						ph->param_type =
1122 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1123 						ph->param_length =
1124 						    htons(SCTP_BUF_LEN(oper));
1125 						ippp = (uint32_t *) (ph + 1);
1126 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1127 						ippp++;
1128 						*ippp = chk->rec.data.TSN_seq;
1129 						ippp++;
1130 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1131 					}
1132 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1133 					sctp_abort_an_association(stcb->sctp_ep,
1134 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1135 
1136 					*abort_flag = 1;
1137 					return;
1138 				}
1139 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1140 				    chk->rec.data.stream_seq !=
1141 				    prev->rec.data.stream_seq) {
1142 					/*
1143 					 * Huh, need the correct STR here,
1144 					 * they must be the same.
1145 					 */
1146 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1147 					    chk->rec.data.stream_seq,
1148 					    prev->rec.data.stream_seq);
1149 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1150 					    0, M_DONTWAIT, 1, MT_DATA);
1151 					if (oper) {
1152 						struct sctp_paramhdr *ph;
1153 						uint32_t *ippp;
1154 
1155 						SCTP_BUF_LEN(oper) =
1156 						    sizeof(struct sctp_paramhdr) +
1157 						    (3 * sizeof(uint32_t));
1158 						ph = mtod(oper,
1159 						    struct sctp_paramhdr *);
1160 						ph->param_type =
1161 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1162 						ph->param_length =
1163 						    htons(SCTP_BUF_LEN(oper));
1164 						ippp = (uint32_t *) (ph + 1);
1165 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1166 						ippp++;
1167 						*ippp = chk->rec.data.TSN_seq;
1168 						ippp++;
1169 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1170 					}
1171 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1172 					sctp_abort_an_association(stcb->sctp_ep,
1173 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1174 
1175 					*abort_flag = 1;
1176 					return;
1177 				}
1178 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1179 			    SCTP_DATA_LAST_FRAG) {
1180 				/* Insert chk MUST be a FIRST */
1181 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1182 				    SCTP_DATA_FIRST_FRAG) {
1183 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1184 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1185 					    0, M_DONTWAIT, 1, MT_DATA);
1186 					if (oper) {
1187 						struct sctp_paramhdr *ph;
1188 						uint32_t *ippp;
1189 
1190 						SCTP_BUF_LEN(oper) =
1191 						    sizeof(struct sctp_paramhdr) +
1192 						    (3 * sizeof(uint32_t));
1193 						ph = mtod(oper,
1194 						    struct sctp_paramhdr *);
1195 						ph->param_type =
1196 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1197 						ph->param_length =
1198 						    htons(SCTP_BUF_LEN(oper));
1199 						ippp = (uint32_t *) (ph + 1);
1200 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1201 						ippp++;
1202 						*ippp = chk->rec.data.TSN_seq;
1203 						ippp++;
1204 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1205 
1206 					}
1207 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1208 					sctp_abort_an_association(stcb->sctp_ep,
1209 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1210 
1211 					*abort_flag = 1;
1212 					return;
1213 				}
1214 			}
1215 		}
1216 	}
1217 	if (next) {
1218 		post_tsn = chk->rec.data.TSN_seq + 1;
1219 		if (post_tsn == next->rec.data.TSN_seq) {
1220 			/*
1221 			 * Ok the one I am inserting ahead of is my NEXT
1222 			 * one. A bit of valdiation here.
1223 			 */
1224 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1225 				/* Insert chk MUST be a last fragment */
1226 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1227 				    != SCTP_DATA_LAST_FRAG) {
1228 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1229 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1230 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1231 					    0, M_DONTWAIT, 1, MT_DATA);
1232 					if (oper) {
1233 						struct sctp_paramhdr *ph;
1234 						uint32_t *ippp;
1235 
1236 						SCTP_BUF_LEN(oper) =
1237 						    sizeof(struct sctp_paramhdr) +
1238 						    (3 * sizeof(uint32_t));
1239 						ph = mtod(oper,
1240 						    struct sctp_paramhdr *);
1241 						ph->param_type =
1242 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1243 						ph->param_length =
1244 						    htons(SCTP_BUF_LEN(oper));
1245 						ippp = (uint32_t *) (ph + 1);
1246 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1247 						ippp++;
1248 						*ippp = chk->rec.data.TSN_seq;
1249 						ippp++;
1250 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1251 					}
1252 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1253 					sctp_abort_an_association(stcb->sctp_ep,
1254 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1255 
1256 					*abort_flag = 1;
1257 					return;
1258 				}
1259 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1260 				    SCTP_DATA_MIDDLE_FRAG ||
1261 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1262 			    SCTP_DATA_LAST_FRAG) {
1263 				/*
1264 				 * Insert chk CAN be MIDDLE or FIRST NOT
1265 				 * LAST
1266 				 */
1267 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1268 				    SCTP_DATA_LAST_FRAG) {
1269 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1270 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1271 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1272 					    0, M_DONTWAIT, 1, MT_DATA);
1273 					if (oper) {
1274 						struct sctp_paramhdr *ph;
1275 						uint32_t *ippp;
1276 
1277 						SCTP_BUF_LEN(oper) =
1278 						    sizeof(struct sctp_paramhdr) +
1279 						    (3 * sizeof(uint32_t));
1280 						ph = mtod(oper,
1281 						    struct sctp_paramhdr *);
1282 						ph->param_type =
1283 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1284 						ph->param_length =
1285 						    htons(SCTP_BUF_LEN(oper));
1286 						ippp = (uint32_t *) (ph + 1);
1287 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1288 						ippp++;
1289 						*ippp = chk->rec.data.TSN_seq;
1290 						ippp++;
1291 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1292 
1293 					}
1294 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1295 					sctp_abort_an_association(stcb->sctp_ep,
1296 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1297 
1298 					*abort_flag = 1;
1299 					return;
1300 				}
1301 				if (chk->rec.data.stream_number !=
1302 				    next->rec.data.stream_number) {
1303 					/*
1304 					 * Huh, need the correct STR here,
1305 					 * they must be the same.
1306 					 */
1307 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1308 					    chk->rec.data.stream_number,
1309 					    next->rec.data.stream_number);
1310 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1311 					    0, M_DONTWAIT, 1, MT_DATA);
1312 					if (oper) {
1313 						struct sctp_paramhdr *ph;
1314 						uint32_t *ippp;
1315 
1316 						SCTP_BUF_LEN(oper) =
1317 						    sizeof(struct sctp_paramhdr) +
1318 						    (3 * sizeof(uint32_t));
1319 						ph = mtod(oper,
1320 						    struct sctp_paramhdr *);
1321 						ph->param_type =
1322 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1323 						ph->param_length =
1324 						    htons(SCTP_BUF_LEN(oper));
1325 						ippp = (uint32_t *) (ph + 1);
1326 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1327 						ippp++;
1328 						*ippp = chk->rec.data.TSN_seq;
1329 						ippp++;
1330 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1331 
1332 					}
1333 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1334 					sctp_abort_an_association(stcb->sctp_ep,
1335 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1336 
1337 					*abort_flag = 1;
1338 					return;
1339 				}
1340 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1341 				    chk->rec.data.stream_seq !=
1342 				    next->rec.data.stream_seq) {
1343 					/*
1344 					 * Huh, need the correct STR here,
1345 					 * they must be the same.
1346 					 */
1347 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1348 					    chk->rec.data.stream_seq,
1349 					    next->rec.data.stream_seq);
1350 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1351 					    0, M_DONTWAIT, 1, MT_DATA);
1352 					if (oper) {
1353 						struct sctp_paramhdr *ph;
1354 						uint32_t *ippp;
1355 
1356 						SCTP_BUF_LEN(oper) =
1357 						    sizeof(struct sctp_paramhdr) +
1358 						    (3 * sizeof(uint32_t));
1359 						ph = mtod(oper,
1360 						    struct sctp_paramhdr *);
1361 						ph->param_type =
1362 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1363 						ph->param_length =
1364 						    htons(SCTP_BUF_LEN(oper));
1365 						ippp = (uint32_t *) (ph + 1);
1366 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1367 						ippp++;
1368 						*ippp = chk->rec.data.TSN_seq;
1369 						ippp++;
1370 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1371 					}
1372 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1373 					sctp_abort_an_association(stcb->sctp_ep,
1374 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1375 
1376 					*abort_flag = 1;
1377 					return;
1378 				}
1379 			}
1380 		}
1381 	}
1382 	/* Do we need to do some delivery? check */
1383 	sctp_deliver_reasm_check(stcb, asoc);
1384 }
1385 
1386 /*
1387  * This is an unfortunate routine. It checks to make sure a evil guy is not
1388  * stuffing us full of bad packet fragments. A broken peer could also do this
1389  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1390  * :< more cycles.
1391  */
1392 static int
1393 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1394     uint32_t TSN_seq)
1395 {
1396 	struct sctp_tmit_chunk *at;
1397 	uint32_t tsn_est;
1398 
1399 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1400 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1401 			/* is it one bigger? */
1402 			tsn_est = at->rec.data.TSN_seq + 1;
1403 			if (tsn_est == TSN_seq) {
1404 				/* yep. It better be a last then */
1405 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1406 				    SCTP_DATA_LAST_FRAG) {
1407 					/*
1408 					 * Ok this guy belongs next to a guy
1409 					 * that is NOT last, it should be a
1410 					 * middle/last, not a complete
1411 					 * chunk.
1412 					 */
1413 					return (1);
1414 				} else {
1415 					/*
1416 					 * This guy is ok since its a LAST
1417 					 * and the new chunk is a fully
1418 					 * self- contained one.
1419 					 */
1420 					return (0);
1421 				}
1422 			}
1423 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1424 			/* Software error since I have a dup? */
1425 			return (1);
1426 		} else {
1427 			/*
1428 			 * Ok, 'at' is larger than new chunk but does it
1429 			 * need to be right before it.
1430 			 */
1431 			tsn_est = TSN_seq + 1;
1432 			if (tsn_est == at->rec.data.TSN_seq) {
1433 				/* Yep, It better be a first */
1434 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1435 				    SCTP_DATA_FIRST_FRAG) {
1436 					return (1);
1437 				} else {
1438 					return (0);
1439 				}
1440 			}
1441 		}
1442 	}
1443 	return (0);
1444 }
1445 
1446 
1447 static int
1448 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1449     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1450     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1451     int *break_flag, int last_chunk)
1452 {
1453 	/* Process a data chunk */
1454 	/* struct sctp_tmit_chunk *chk; */
1455 	struct sctp_tmit_chunk *chk;
1456 	uint32_t tsn, gap;
1457 	struct mbuf *dmbuf;
1458 	int indx, the_len;
1459 	int need_reasm_check = 0;
1460 	uint16_t strmno, strmseq;
1461 	struct mbuf *oper;
1462 	struct sctp_queued_to_read *control;
1463 	int ordered;
1464 	uint32_t protocol_id;
1465 	uint8_t chunk_flags;
1466 	struct sctp_stream_reset_list *liste;
1467 
1468 	chk = NULL;
1469 	tsn = ntohl(ch->dp.tsn);
1470 	chunk_flags = ch->ch.chunk_flags;
1471 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1472 		asoc->send_sack = 1;
1473 	}
1474 	protocol_id = ch->dp.protocol_id;
1475 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1476 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1477 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1478 	}
1479 	if (stcb == NULL) {
1480 		return (0);
1481 	}
1482 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1483 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1484 		/* It is a duplicate */
1485 		SCTP_STAT_INCR(sctps_recvdupdata);
1486 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1487 			/* Record a dup for the next outbound sack */
1488 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1489 			asoc->numduptsns++;
1490 		}
1491 		asoc->send_sack = 1;
1492 		return (0);
1493 	}
1494 	/* Calculate the number of TSN's between the base and this TSN */
1495 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1496 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1497 		/* Can't hold the bit in the mapping at max array, toss it */
1498 		return (0);
1499 	}
1500 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1501 		SCTP_TCB_LOCK_ASSERT(stcb);
1502 		if (sctp_expand_mapping_array(asoc, gap)) {
1503 			/* Can't expand, drop it */
1504 			return (0);
1505 		}
1506 	}
1507 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1508 		*high_tsn = tsn;
1509 	}
1510 	/* See if we have received this one already */
1511 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1512 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1513 		SCTP_STAT_INCR(sctps_recvdupdata);
1514 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1515 			/* Record a dup for the next outbound sack */
1516 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1517 			asoc->numduptsns++;
1518 		}
1519 		asoc->send_sack = 1;
1520 		return (0);
1521 	}
1522 	/*
1523 	 * Check to see about the GONE flag, duplicates would cause a sack
1524 	 * to be sent up above
1525 	 */
1526 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1527 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1528 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1529 	    ) {
1530 		/*
1531 		 * wait a minute, this guy is gone, there is no longer a
1532 		 * receiver. Send peer an ABORT!
1533 		 */
1534 		struct mbuf *op_err;
1535 
1536 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1537 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1538 		*abort_flag = 1;
1539 		return (0);
1540 	}
1541 	/*
1542 	 * Now before going further we see if there is room. If NOT then we
1543 	 * MAY let one through only IF this TSN is the one we are waiting
1544 	 * for on a partial delivery API.
1545 	 */
1546 
1547 	/* now do the tests */
1548 	if (((asoc->cnt_on_all_streams +
1549 	    asoc->cnt_on_reasm_queue +
1550 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1551 	    (((int)asoc->my_rwnd) <= 0)) {
1552 		/*
1553 		 * When we have NO room in the rwnd we check to make sure
1554 		 * the reader is doing its job...
1555 		 */
1556 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1557 			/* some to read, wake-up */
1558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1559 			struct socket *so;
1560 
1561 			so = SCTP_INP_SO(stcb->sctp_ep);
1562 			atomic_add_int(&stcb->asoc.refcnt, 1);
1563 			SCTP_TCB_UNLOCK(stcb);
1564 			SCTP_SOCKET_LOCK(so, 1);
1565 			SCTP_TCB_LOCK(stcb);
1566 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1567 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1568 				/* assoc was freed while we were unlocked */
1569 				SCTP_SOCKET_UNLOCK(so, 1);
1570 				return (0);
1571 			}
1572 #endif
1573 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1574 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1575 			SCTP_SOCKET_UNLOCK(so, 1);
1576 #endif
1577 		}
1578 		/* now is it in the mapping array of what we have accepted? */
1579 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1580 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1581 			/* Nope not in the valid range dump it */
1582 			sctp_set_rwnd(stcb, asoc);
1583 			if ((asoc->cnt_on_all_streams +
1584 			    asoc->cnt_on_reasm_queue +
1585 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1586 				SCTP_STAT_INCR(sctps_datadropchklmt);
1587 			} else {
1588 				SCTP_STAT_INCR(sctps_datadroprwnd);
1589 			}
1590 			indx = *break_flag;
1591 			*break_flag = 1;
1592 			return (0);
1593 		}
1594 	}
1595 	strmno = ntohs(ch->dp.stream_id);
1596 	if (strmno >= asoc->streamincnt) {
1597 		struct sctp_paramhdr *phdr;
1598 		struct mbuf *mb;
1599 
1600 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1601 		    0, M_DONTWAIT, 1, MT_DATA);
1602 		if (mb != NULL) {
1603 			/* add some space up front so prepend will work well */
1604 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1605 			phdr = mtod(mb, struct sctp_paramhdr *);
1606 			/*
1607 			 * Error causes are just param's and this one has
1608 			 * two back to back phdr, one with the error type
1609 			 * and size, the other with the streamid and a rsvd
1610 			 */
1611 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1612 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1613 			phdr->param_length =
1614 			    htons(sizeof(struct sctp_paramhdr) * 2);
1615 			phdr++;
1616 			/* We insert the stream in the type field */
1617 			phdr->param_type = ch->dp.stream_id;
1618 			/* And set the length to 0 for the rsvd field */
1619 			phdr->param_length = 0;
1620 			sctp_queue_op_err(stcb, mb);
1621 		}
1622 		SCTP_STAT_INCR(sctps_badsid);
1623 		SCTP_TCB_LOCK_ASSERT(stcb);
1624 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1625 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1626 			asoc->highest_tsn_inside_nr_map = tsn;
1627 		}
1628 		if (tsn == (asoc->cumulative_tsn + 1)) {
1629 			/* Update cum-ack */
1630 			asoc->cumulative_tsn = tsn;
1631 		}
1632 		return (0);
1633 	}
1634 	/*
1635 	 * Before we continue lets validate that we are not being fooled by
1636 	 * an evil attacker. We can only have 4k chunks based on our TSN
1637 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1638 	 * way our stream sequence numbers could have wrapped. We of course
1639 	 * only validate the FIRST fragment so the bit must be set.
1640 	 */
1641 	strmseq = ntohs(ch->dp.stream_sequence);
1642 #ifdef SCTP_ASOCLOG_OF_TSNS
1643 	SCTP_TCB_LOCK_ASSERT(stcb);
1644 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1645 		asoc->tsn_in_at = 0;
1646 		asoc->tsn_in_wrapped = 1;
1647 	}
1648 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1649 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1650 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1651 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1652 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1653 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1654 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1655 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1656 	asoc->tsn_in_at++;
1657 #endif
1658 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1659 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1660 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1661 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1662 		/* The incoming sseq is behind where we last delivered? */
1663 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1664 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1665 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1666 		    0, M_DONTWAIT, 1, MT_DATA);
1667 		if (oper) {
1668 			struct sctp_paramhdr *ph;
1669 			uint32_t *ippp;
1670 
1671 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1672 			    (3 * sizeof(uint32_t));
1673 			ph = mtod(oper, struct sctp_paramhdr *);
1674 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1675 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1676 			ippp = (uint32_t *) (ph + 1);
1677 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1678 			ippp++;
1679 			*ippp = tsn;
1680 			ippp++;
1681 			*ippp = ((strmno << 16) | strmseq);
1682 
1683 		}
1684 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1685 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1686 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1687 		*abort_flag = 1;
1688 		return (0);
1689 	}
1690 	/************************************
1691 	 * From here down we may find ch-> invalid
1692 	 * so its a good idea NOT to use it.
1693 	 *************************************/
1694 
1695 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1696 	if (last_chunk == 0) {
1697 		dmbuf = SCTP_M_COPYM(*m,
1698 		    (offset + sizeof(struct sctp_data_chunk)),
1699 		    the_len, M_DONTWAIT);
1700 #ifdef SCTP_MBUF_LOGGING
1701 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1702 			struct mbuf *mat;
1703 
1704 			mat = dmbuf;
1705 			while (mat) {
1706 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1707 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1708 				}
1709 				mat = SCTP_BUF_NEXT(mat);
1710 			}
1711 		}
1712 #endif
1713 	} else {
1714 		/* We can steal the last chunk */
1715 		int l_len;
1716 
1717 		dmbuf = *m;
1718 		/* lop off the top part */
1719 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1720 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1721 			l_len = SCTP_BUF_LEN(dmbuf);
1722 		} else {
1723 			/*
1724 			 * need to count up the size hopefully does not hit
1725 			 * this to often :-0
1726 			 */
1727 			struct mbuf *lat;
1728 
1729 			l_len = 0;
1730 			lat = dmbuf;
1731 			while (lat) {
1732 				l_len += SCTP_BUF_LEN(lat);
1733 				lat = SCTP_BUF_NEXT(lat);
1734 			}
1735 		}
1736 		if (l_len > the_len) {
1737 			/* Trim the end round bytes off  too */
1738 			m_adj(dmbuf, -(l_len - the_len));
1739 		}
1740 	}
1741 	if (dmbuf == NULL) {
1742 		SCTP_STAT_INCR(sctps_nomem);
1743 		return (0);
1744 	}
1745 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1746 	    asoc->fragmented_delivery_inprogress == 0 &&
1747 	    TAILQ_EMPTY(&asoc->resetHead) &&
1748 	    ((ordered == 0) ||
1749 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1750 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1751 		/* Candidate for express delivery */
1752 		/*
1753 		 * Its not fragmented, No PD-API is up, Nothing in the
1754 		 * delivery queue, Its un-ordered OR ordered and the next to
1755 		 * deliver AND nothing else is stuck on the stream queue,
1756 		 * And there is room for it in the socket buffer. Lets just
1757 		 * stuff it up the buffer....
1758 		 */
1759 
1760 		/* It would be nice to avoid this copy if we could :< */
1761 		sctp_alloc_a_readq(stcb, control);
1762 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1763 		    protocol_id,
1764 		    stcb->asoc.context,
1765 		    strmno, strmseq,
1766 		    chunk_flags,
1767 		    dmbuf);
1768 		if (control == NULL) {
1769 			goto failed_express_del;
1770 		}
1771 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1772 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1773 			asoc->highest_tsn_inside_nr_map = tsn;
1774 		}
1775 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1776 		    control, &stcb->sctp_socket->so_rcv,
1777 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1778 
1779 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1780 			/* for ordered, bump what we delivered */
1781 			asoc->strmin[strmno].last_sequence_delivered++;
1782 		}
1783 		SCTP_STAT_INCR(sctps_recvexpress);
1784 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1785 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1786 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1787 		}
1788 		control = NULL;
1789 
1790 		goto finish_express_del;
1791 	}
1792 failed_express_del:
1793 	/* If we reach here this is a new chunk */
1794 	chk = NULL;
1795 	control = NULL;
1796 	/* Express for fragmented delivery? */
1797 	if ((asoc->fragmented_delivery_inprogress) &&
1798 	    (stcb->asoc.control_pdapi) &&
1799 	    (asoc->str_of_pdapi == strmno) &&
1800 	    (asoc->ssn_of_pdapi == strmseq)
1801 	    ) {
1802 		control = stcb->asoc.control_pdapi;
1803 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1804 			/* Can't be another first? */
1805 			goto failed_pdapi_express_del;
1806 		}
1807 		if (tsn == (control->sinfo_tsn + 1)) {
1808 			/* Yep, we can add it on */
1809 			int end = 0;
1810 			uint32_t cumack;
1811 
1812 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1813 				end = 1;
1814 			}
1815 			cumack = asoc->cumulative_tsn;
1816 			if ((cumack + 1) == tsn)
1817 				cumack = tsn;
1818 
1819 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1820 			    tsn,
1821 			    &stcb->sctp_socket->so_rcv)) {
1822 				SCTP_PRINTF("Append fails end:%d\n", end);
1823 				goto failed_pdapi_express_del;
1824 			}
1825 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1826 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1827 				asoc->highest_tsn_inside_nr_map = tsn;
1828 			}
1829 			SCTP_STAT_INCR(sctps_recvexpressm);
1830 			control->sinfo_tsn = tsn;
1831 			asoc->tsn_last_delivered = tsn;
1832 			asoc->fragment_flags = chunk_flags;
1833 			asoc->tsn_of_pdapi_last_delivered = tsn;
1834 			asoc->last_flags_delivered = chunk_flags;
1835 			asoc->last_strm_seq_delivered = strmseq;
1836 			asoc->last_strm_no_delivered = strmno;
1837 			if (end) {
1838 				/* clean up the flags and such */
1839 				asoc->fragmented_delivery_inprogress = 0;
1840 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1841 					asoc->strmin[strmno].last_sequence_delivered++;
1842 				}
1843 				stcb->asoc.control_pdapi = NULL;
1844 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1845 					/*
1846 					 * There could be another message
1847 					 * ready
1848 					 */
1849 					need_reasm_check = 1;
1850 				}
1851 			}
1852 			control = NULL;
1853 			goto finish_express_del;
1854 		}
1855 	}
1856 failed_pdapi_express_del:
1857 	control = NULL;
1858 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1859 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1860 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1861 			asoc->highest_tsn_inside_nr_map = tsn;
1862 		}
1863 	} else {
1864 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1865 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1866 			asoc->highest_tsn_inside_map = tsn;
1867 		}
1868 	}
1869 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1870 		sctp_alloc_a_chunk(stcb, chk);
1871 		if (chk == NULL) {
1872 			/* No memory so we drop the chunk */
1873 			SCTP_STAT_INCR(sctps_nomem);
1874 			if (last_chunk == 0) {
1875 				/* we copied it, free the copy */
1876 				sctp_m_freem(dmbuf);
1877 			}
1878 			return (0);
1879 		}
1880 		chk->rec.data.TSN_seq = tsn;
1881 		chk->no_fr_allowed = 0;
1882 		chk->rec.data.stream_seq = strmseq;
1883 		chk->rec.data.stream_number = strmno;
1884 		chk->rec.data.payloadtype = protocol_id;
1885 		chk->rec.data.context = stcb->asoc.context;
1886 		chk->rec.data.doing_fast_retransmit = 0;
1887 		chk->rec.data.rcv_flags = chunk_flags;
1888 		chk->asoc = asoc;
1889 		chk->send_size = the_len;
1890 		chk->whoTo = net;
1891 		atomic_add_int(&net->ref_count, 1);
1892 		chk->data = dmbuf;
1893 	} else {
1894 		sctp_alloc_a_readq(stcb, control);
1895 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1896 		    protocol_id,
1897 		    stcb->asoc.context,
1898 		    strmno, strmseq,
1899 		    chunk_flags,
1900 		    dmbuf);
1901 		if (control == NULL) {
1902 			/* No memory so we drop the chunk */
1903 			SCTP_STAT_INCR(sctps_nomem);
1904 			if (last_chunk == 0) {
1905 				/* we copied it, free the copy */
1906 				sctp_m_freem(dmbuf);
1907 			}
1908 			return (0);
1909 		}
1910 		control->length = the_len;
1911 	}
1912 
1913 	/* Mark it as received */
1914 	/* Now queue it where it belongs */
1915 	if (control != NULL) {
1916 		/* First a sanity check */
1917 		if (asoc->fragmented_delivery_inprogress) {
1918 			/*
1919 			 * Ok, we have a fragmented delivery in progress if
1920 			 * this chunk is next to deliver OR belongs in our
1921 			 * view to the reassembly, the peer is evil or
1922 			 * broken.
1923 			 */
1924 			uint32_t estimate_tsn;
1925 
1926 			estimate_tsn = asoc->tsn_last_delivered + 1;
1927 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1928 			    (estimate_tsn == control->sinfo_tsn)) {
1929 				/* Evil/Broke peer */
1930 				sctp_m_freem(control->data);
1931 				control->data = NULL;
1932 				if (control->whoFrom) {
1933 					sctp_free_remote_addr(control->whoFrom);
1934 					control->whoFrom = NULL;
1935 				}
1936 				sctp_free_a_readq(stcb, control);
1937 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1938 				    0, M_DONTWAIT, 1, MT_DATA);
1939 				if (oper) {
1940 					struct sctp_paramhdr *ph;
1941 					uint32_t *ippp;
1942 
1943 					SCTP_BUF_LEN(oper) =
1944 					    sizeof(struct sctp_paramhdr) +
1945 					    (3 * sizeof(uint32_t));
1946 					ph = mtod(oper, struct sctp_paramhdr *);
1947 					ph->param_type =
1948 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1949 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1950 					ippp = (uint32_t *) (ph + 1);
1951 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1952 					ippp++;
1953 					*ippp = tsn;
1954 					ippp++;
1955 					*ippp = ((strmno << 16) | strmseq);
1956 				}
1957 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1958 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1959 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1960 
1961 				*abort_flag = 1;
1962 				return (0);
1963 			} else {
1964 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1965 					sctp_m_freem(control->data);
1966 					control->data = NULL;
1967 					if (control->whoFrom) {
1968 						sctp_free_remote_addr(control->whoFrom);
1969 						control->whoFrom = NULL;
1970 					}
1971 					sctp_free_a_readq(stcb, control);
1972 
1973 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1974 					    0, M_DONTWAIT, 1, MT_DATA);
1975 					if (oper) {
1976 						struct sctp_paramhdr *ph;
1977 						uint32_t *ippp;
1978 
1979 						SCTP_BUF_LEN(oper) =
1980 						    sizeof(struct sctp_paramhdr) +
1981 						    (3 * sizeof(uint32_t));
1982 						ph = mtod(oper,
1983 						    struct sctp_paramhdr *);
1984 						ph->param_type =
1985 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1986 						ph->param_length =
1987 						    htons(SCTP_BUF_LEN(oper));
1988 						ippp = (uint32_t *) (ph + 1);
1989 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1990 						ippp++;
1991 						*ippp = tsn;
1992 						ippp++;
1993 						*ippp = ((strmno << 16) | strmseq);
1994 					}
1995 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1996 					sctp_abort_an_association(stcb->sctp_ep,
1997 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1998 
1999 					*abort_flag = 1;
2000 					return (0);
2001 				}
2002 			}
2003 		} else {
2004 			/* No PDAPI running */
2005 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2006 				/*
2007 				 * Reassembly queue is NOT empty validate
2008 				 * that this tsn does not need to be in
2009 				 * reasembly queue. If it does then our peer
2010 				 * is broken or evil.
2011 				 */
2012 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2013 					sctp_m_freem(control->data);
2014 					control->data = NULL;
2015 					if (control->whoFrom) {
2016 						sctp_free_remote_addr(control->whoFrom);
2017 						control->whoFrom = NULL;
2018 					}
2019 					sctp_free_a_readq(stcb, control);
2020 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2021 					    0, M_DONTWAIT, 1, MT_DATA);
2022 					if (oper) {
2023 						struct sctp_paramhdr *ph;
2024 						uint32_t *ippp;
2025 
2026 						SCTP_BUF_LEN(oper) =
2027 						    sizeof(struct sctp_paramhdr) +
2028 						    (3 * sizeof(uint32_t));
2029 						ph = mtod(oper,
2030 						    struct sctp_paramhdr *);
2031 						ph->param_type =
2032 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2033 						ph->param_length =
2034 						    htons(SCTP_BUF_LEN(oper));
2035 						ippp = (uint32_t *) (ph + 1);
2036 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2037 						ippp++;
2038 						*ippp = tsn;
2039 						ippp++;
2040 						*ippp = ((strmno << 16) | strmseq);
2041 					}
2042 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2043 					sctp_abort_an_association(stcb->sctp_ep,
2044 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2045 
2046 					*abort_flag = 1;
2047 					return (0);
2048 				}
2049 			}
2050 		}
2051 		/* ok, if we reach here we have passed the sanity checks */
2052 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2053 			/* queue directly into socket buffer */
2054 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2055 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2056 			    control,
2057 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2058 		} else {
2059 			/*
2060 			 * Special check for when streams are resetting. We
2061 			 * could be more smart about this and check the
2062 			 * actual stream to see if it is not being reset..
2063 			 * that way we would not create a HOLB when amongst
2064 			 * streams being reset and those not being reset.
2065 			 *
2066 			 * We take complete messages that have a stream reset
2067 			 * intervening (aka the TSN is after where our
2068 			 * cum-ack needs to be) off and put them on a
2069 			 * pending_reply_queue. The reassembly ones we do
2070 			 * not have to worry about since they are all sorted
2071 			 * and proceessed by TSN order. It is only the
2072 			 * singletons I must worry about.
2073 			 */
2074 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2075 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2076 				/*
2077 				 * yep its past where we need to reset... go
2078 				 * ahead and queue it.
2079 				 */
2080 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2081 					/* first one on */
2082 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2083 				} else {
2084 					struct sctp_queued_to_read *ctlOn,
2085 					                   *nctlOn;
2086 					unsigned char inserted = 0;
2087 
2088 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2089 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2090 							continue;
2091 						} else {
2092 							/* found it */
2093 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2094 							inserted = 1;
2095 							break;
2096 						}
2097 					}
2098 					if (inserted == 0) {
2099 						/*
2100 						 * must be put at end, use
2101 						 * prevP (all setup from
2102 						 * loop) to setup nextP.
2103 						 */
2104 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2105 					}
2106 				}
2107 			} else {
2108 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2109 				if (*abort_flag) {
2110 					return (0);
2111 				}
2112 			}
2113 		}
2114 	} else {
2115 		/* Into the re-assembly queue */
2116 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2117 		if (*abort_flag) {
2118 			/*
2119 			 * the assoc is now gone and chk was put onto the
2120 			 * reasm queue, which has all been freed.
2121 			 */
2122 			*m = NULL;
2123 			return (0);
2124 		}
2125 	}
2126 finish_express_del:
2127 	if (tsn == (asoc->cumulative_tsn + 1)) {
2128 		/* Update cum-ack */
2129 		asoc->cumulative_tsn = tsn;
2130 	}
2131 	if (last_chunk) {
2132 		*m = NULL;
2133 	}
2134 	if (ordered) {
2135 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2136 	} else {
2137 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2138 	}
2139 	SCTP_STAT_INCR(sctps_recvdata);
2140 	/* Set it present please */
2141 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2142 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2143 	}
2144 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2145 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2146 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2147 	}
2148 	/* check the special flag for stream resets */
2149 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2150 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2151 		/*
2152 		 * we have finished working through the backlogged TSN's now
2153 		 * time to reset streams. 1: call reset function. 2: free
2154 		 * pending_reply space 3: distribute any chunks in
2155 		 * pending_reply_queue.
2156 		 */
2157 		struct sctp_queued_to_read *ctl, *nctl;
2158 
2159 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2160 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2161 		SCTP_FREE(liste, SCTP_M_STRESET);
2162 		/* sa_ignore FREED_MEMORY */
2163 		liste = TAILQ_FIRST(&asoc->resetHead);
2164 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2165 			/* All can be removed */
2166 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2167 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2168 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2169 				if (*abort_flag) {
2170 					return (0);
2171 				}
2172 			}
2173 		} else {
2174 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2175 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2176 					break;
2177 				}
2178 				/*
2179 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2180 				 * process it which is the NOT of
2181 				 * ctl->sinfo_tsn > liste->tsn
2182 				 */
2183 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2184 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2185 				if (*abort_flag) {
2186 					return (0);
2187 				}
2188 			}
2189 		}
2190 		/*
2191 		 * Now service re-assembly to pick up anything that has been
2192 		 * held on reassembly queue?
2193 		 */
2194 		sctp_deliver_reasm_check(stcb, asoc);
2195 		need_reasm_check = 0;
2196 	}
2197 	if (need_reasm_check) {
2198 		/* Another one waits ? */
2199 		sctp_deliver_reasm_check(stcb, asoc);
2200 	}
2201 	return (1);
2202 }
2203 
2204 int8_t sctp_map_lookup_tab[256] = {
2205 	0, 1, 0, 2, 0, 1, 0, 3,
2206 	0, 1, 0, 2, 0, 1, 0, 4,
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 5,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 4,
2211 	0, 1, 0, 2, 0, 1, 0, 3,
2212 	0, 1, 0, 2, 0, 1, 0, 6,
2213 	0, 1, 0, 2, 0, 1, 0, 3,
2214 	0, 1, 0, 2, 0, 1, 0, 4,
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 5,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 4,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 7,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 4,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 5,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 4,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 6,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 4,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 5,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 4,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 8
2237 };
2238 
2239 
2240 void
2241 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2242 {
2243 	/*
2244 	 * Now we also need to check the mapping array in a couple of ways.
2245 	 * 1) Did we move the cum-ack point?
2246 	 *
2247 	 * When you first glance at this you might think that all entries that
2248 	 * make up the postion of the cum-ack would be in the nr-mapping
2249 	 * array only.. i.e. things up to the cum-ack are always
2250 	 * deliverable. Thats true with one exception, when its a fragmented
2251 	 * message we may not deliver the data until some threshold (or all
2252 	 * of it) is in place. So we must OR the nr_mapping_array and
2253 	 * mapping_array to get a true picture of the cum-ack.
2254 	 */
2255 	struct sctp_association *asoc;
2256 	int at;
2257 	uint8_t val;
2258 	int slide_from, slide_end, lgap, distance;
2259 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2260 
2261 	asoc = &stcb->asoc;
2262 	at = 0;
2263 
2264 	old_cumack = asoc->cumulative_tsn;
2265 	old_base = asoc->mapping_array_base_tsn;
2266 	old_highest = asoc->highest_tsn_inside_map;
2267 	/*
2268 	 * We could probably improve this a small bit by calculating the
2269 	 * offset of the current cum-ack as the starting point.
2270 	 */
2271 	at = 0;
2272 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2273 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2274 		if (val == 0xff) {
2275 			at += 8;
2276 		} else {
2277 			/* there is a 0 bit */
2278 			at += sctp_map_lookup_tab[val];
2279 			break;
2280 		}
2281 	}
2282 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2283 
2284 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2285 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2286 #ifdef INVARIANTS
2287 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2288 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2289 #else
2290 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2291 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2292 		sctp_print_mapping_array(asoc);
2293 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2294 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2295 		}
2296 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2297 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2298 #endif
2299 	}
2300 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2301 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2302 	} else {
2303 		highest_tsn = asoc->highest_tsn_inside_map;
2304 	}
2305 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2306 		/* The complete array was completed by a single FR */
2307 		/* highest becomes the cum-ack */
2308 		int clr;
2309 
2310 #ifdef INVARIANTS
2311 		unsigned int i;
2312 
2313 #endif
2314 
2315 		/* clear the array */
2316 		clr = ((at + 7) >> 3);
2317 		if (clr > asoc->mapping_array_size) {
2318 			clr = asoc->mapping_array_size;
2319 		}
2320 		memset(asoc->mapping_array, 0, clr);
2321 		memset(asoc->nr_mapping_array, 0, clr);
2322 #ifdef INVARIANTS
2323 		for (i = 0; i < asoc->mapping_array_size; i++) {
2324 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2325 				printf("Error Mapping array's not clean at clear\n");
2326 				sctp_print_mapping_array(asoc);
2327 			}
2328 		}
2329 #endif
2330 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2331 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2332 	} else if (at >= 8) {
2333 		/* we can slide the mapping array down */
2334 		/* slide_from holds where we hit the first NON 0xff byte */
2335 
2336 		/*
2337 		 * now calculate the ceiling of the move using our highest
2338 		 * TSN value
2339 		 */
2340 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2341 		slide_end = (lgap >> 3);
2342 		if (slide_end < slide_from) {
2343 			sctp_print_mapping_array(asoc);
2344 #ifdef INVARIANTS
2345 			panic("impossible slide");
2346 #else
2347 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2348 			    lgap, slide_end, slide_from, at);
2349 			return;
2350 #endif
2351 		}
2352 		if (slide_end > asoc->mapping_array_size) {
2353 #ifdef INVARIANTS
2354 			panic("would overrun buffer");
2355 #else
2356 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2357 			    asoc->mapping_array_size, slide_end);
2358 			slide_end = asoc->mapping_array_size;
2359 #endif
2360 		}
2361 		distance = (slide_end - slide_from) + 1;
2362 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2363 			sctp_log_map(old_base, old_cumack, old_highest,
2364 			    SCTP_MAP_PREPARE_SLIDE);
2365 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2366 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2367 		}
2368 		if (distance + slide_from > asoc->mapping_array_size ||
2369 		    distance < 0) {
2370 			/*
2371 			 * Here we do NOT slide forward the array so that
2372 			 * hopefully when more data comes in to fill it up
2373 			 * we will be able to slide it forward. Really I
2374 			 * don't think this should happen :-0
2375 			 */
2376 
2377 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2378 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2379 				    (uint32_t) asoc->mapping_array_size,
2380 				    SCTP_MAP_SLIDE_NONE);
2381 			}
2382 		} else {
2383 			int ii;
2384 
2385 			for (ii = 0; ii < distance; ii++) {
2386 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2387 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2388 
2389 			}
2390 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2391 				asoc->mapping_array[ii] = 0;
2392 				asoc->nr_mapping_array[ii] = 0;
2393 			}
2394 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2395 				asoc->highest_tsn_inside_map += (slide_from << 3);
2396 			}
2397 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2398 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2399 			}
2400 			asoc->mapping_array_base_tsn += (slide_from << 3);
2401 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2402 				sctp_log_map(asoc->mapping_array_base_tsn,
2403 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2404 				    SCTP_MAP_SLIDE_RESULT);
2405 			}
2406 		}
2407 	}
2408 }
2409 
2410 void
2411 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2412 {
2413 	struct sctp_association *asoc;
2414 	uint32_t highest_tsn;
2415 
2416 	asoc = &stcb->asoc;
2417 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2418 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2419 	} else {
2420 		highest_tsn = asoc->highest_tsn_inside_map;
2421 	}
2422 
2423 	/*
2424 	 * Now we need to see if we need to queue a sack or just start the
2425 	 * timer (if allowed).
2426 	 */
2427 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2428 		/*
2429 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2430 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2431 		 * SACK
2432 		 */
2433 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2434 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2435 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2436 		}
2437 		sctp_send_shutdown(stcb,
2438 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2439 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2440 	} else {
2441 		int is_a_gap;
2442 
2443 		/* is there a gap now ? */
2444 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2445 
2446 		/*
2447 		 * CMT DAC algorithm: increase number of packets received
2448 		 * since last ack
2449 		 */
2450 		stcb->asoc.cmt_dac_pkts_rcvd++;
2451 
2452 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2453 							 * SACK */
2454 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2455 							 * longer is one */
2456 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2457 		    (is_a_gap) ||	/* is still a gap */
2458 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2459 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2460 		    ) {
2461 
2462 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2463 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2464 			    (stcb->asoc.send_sack == 0) &&
2465 			    (stcb->asoc.numduptsns == 0) &&
2466 			    (stcb->asoc.delayed_ack) &&
2467 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2468 
2469 				/*
2470 				 * CMT DAC algorithm: With CMT, delay acks
2471 				 * even in the face of
2472 				 *
2473 				 * reordering. Therefore, if acks that do not
2474 				 * have to be sent because of the above
2475 				 * reasons, will be delayed. That is, acks
2476 				 * that would have been sent due to gap
2477 				 * reports will be delayed with DAC. Start
2478 				 * the delayed ack timer.
2479 				 */
2480 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2481 				    stcb->sctp_ep, stcb, NULL);
2482 			} else {
2483 				/*
2484 				 * Ok we must build a SACK since the timer
2485 				 * is pending, we got our first packet OR
2486 				 * there are gaps or duplicates.
2487 				 */
2488 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2489 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2490 			}
2491 		} else {
2492 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2493 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2494 				    stcb->sctp_ep, stcb, NULL);
2495 			}
2496 		}
2497 	}
2498 }
2499 
2500 void
2501 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2502 {
2503 	struct sctp_tmit_chunk *chk;
2504 	uint32_t tsize, pd_point;
2505 	uint16_t nxt_todel;
2506 
2507 	if (asoc->fragmented_delivery_inprogress) {
2508 		sctp_service_reassembly(stcb, asoc);
2509 	}
2510 	/* Can we proceed further, i.e. the PD-API is complete */
2511 	if (asoc->fragmented_delivery_inprogress) {
2512 		/* no */
2513 		return;
2514 	}
2515 	/*
2516 	 * Now is there some other chunk I can deliver from the reassembly
2517 	 * queue.
2518 	 */
2519 doit_again:
2520 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2521 	if (chk == NULL) {
2522 		asoc->size_on_reasm_queue = 0;
2523 		asoc->cnt_on_reasm_queue = 0;
2524 		return;
2525 	}
2526 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2527 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2528 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2529 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2530 		/*
2531 		 * Yep the first one is here. We setup to start reception,
2532 		 * by backing down the TSN just in case we can't deliver.
2533 		 */
2534 
2535 		/*
2536 		 * Before we start though either all of the message should
2537 		 * be here or the socket buffer max or nothing on the
2538 		 * delivery queue and something can be delivered.
2539 		 */
2540 		if (stcb->sctp_socket) {
2541 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2542 			    stcb->sctp_ep->partial_delivery_point);
2543 		} else {
2544 			pd_point = stcb->sctp_ep->partial_delivery_point;
2545 		}
2546 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2547 			asoc->fragmented_delivery_inprogress = 1;
2548 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2549 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2550 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2551 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2552 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2553 			sctp_service_reassembly(stcb, asoc);
2554 			if (asoc->fragmented_delivery_inprogress == 0) {
2555 				goto doit_again;
2556 			}
2557 		}
2558 	}
2559 }
2560 
2561 int
2562 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2563     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2564     struct sctp_nets *net, uint32_t * high_tsn)
2565 {
2566 	struct sctp_data_chunk *ch, chunk_buf;
2567 	struct sctp_association *asoc;
2568 	int num_chunks = 0;	/* number of control chunks processed */
2569 	int stop_proc = 0;
2570 	int chk_length, break_flag, last_chunk;
2571 	int abort_flag = 0, was_a_gap;
2572 	struct mbuf *m;
2573 	uint32_t highest_tsn;
2574 
2575 	/* set the rwnd */
2576 	sctp_set_rwnd(stcb, &stcb->asoc);
2577 
2578 	m = *mm;
2579 	SCTP_TCB_LOCK_ASSERT(stcb);
2580 	asoc = &stcb->asoc;
2581 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2582 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2583 	} else {
2584 		highest_tsn = asoc->highest_tsn_inside_map;
2585 	}
2586 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2587 	/*
2588 	 * setup where we got the last DATA packet from for any SACK that
2589 	 * may need to go out. Don't bump the net. This is done ONLY when a
2590 	 * chunk is assigned.
2591 	 */
2592 	asoc->last_data_chunk_from = net;
2593 
2594 	/*-
2595 	 * Now before we proceed we must figure out if this is a wasted
2596 	 * cluster... i.e. it is a small packet sent in and yet the driver
2597 	 * underneath allocated a full cluster for it. If so we must copy it
2598 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2599 	 * with cluster starvation. Note for __Panda__ we don't do this
2600 	 * since it has clusters all the way down to 64 bytes.
2601 	 */
2602 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2603 		/* we only handle mbufs that are singletons.. not chains */
2604 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2605 		if (m) {
2606 			/* ok lets see if we can copy the data up */
2607 			caddr_t *from, *to;
2608 
2609 			/* get the pointers and copy */
2610 			to = mtod(m, caddr_t *);
2611 			from = mtod((*mm), caddr_t *);
2612 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2613 			/* copy the length and free up the old */
2614 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2615 			sctp_m_freem(*mm);
2616 			/* sucess, back copy */
2617 			*mm = m;
2618 		} else {
2619 			/* We are in trouble in the mbuf world .. yikes */
2620 			m = *mm;
2621 		}
2622 	}
2623 	/* get pointer to the first chunk header */
2624 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2625 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2626 	if (ch == NULL) {
2627 		return (1);
2628 	}
2629 	/*
2630 	 * process all DATA chunks...
2631 	 */
2632 	*high_tsn = asoc->cumulative_tsn;
2633 	break_flag = 0;
2634 	asoc->data_pkts_seen++;
2635 	while (stop_proc == 0) {
2636 		/* validate chunk length */
2637 		chk_length = ntohs(ch->ch.chunk_length);
2638 		if (length - *offset < chk_length) {
2639 			/* all done, mutulated chunk */
2640 			stop_proc = 1;
2641 			break;
2642 		}
2643 		if (ch->ch.chunk_type == SCTP_DATA) {
2644 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2645 				/*
2646 				 * Need to send an abort since we had a
2647 				 * invalid data chunk.
2648 				 */
2649 				struct mbuf *op_err;
2650 
2651 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2652 				    0, M_DONTWAIT, 1, MT_DATA);
2653 
2654 				if (op_err) {
2655 					struct sctp_paramhdr *ph;
2656 					uint32_t *ippp;
2657 
2658 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2659 					    (2 * sizeof(uint32_t));
2660 					ph = mtod(op_err, struct sctp_paramhdr *);
2661 					ph->param_type =
2662 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2663 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2664 					ippp = (uint32_t *) (ph + 1);
2665 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2666 					ippp++;
2667 					*ippp = asoc->cumulative_tsn;
2668 
2669 				}
2670 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2671 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2672 				    op_err, 0, net->port);
2673 				return (2);
2674 			}
2675 #ifdef SCTP_AUDITING_ENABLED
2676 			sctp_audit_log(0xB1, 0);
2677 #endif
2678 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2679 				last_chunk = 1;
2680 			} else {
2681 				last_chunk = 0;
2682 			}
2683 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2684 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2685 			    last_chunk)) {
2686 				num_chunks++;
2687 			}
2688 			if (abort_flag)
2689 				return (2);
2690 
2691 			if (break_flag) {
2692 				/*
2693 				 * Set because of out of rwnd space and no
2694 				 * drop rep space left.
2695 				 */
2696 				stop_proc = 1;
2697 				break;
2698 			}
2699 		} else {
2700 			/* not a data chunk in the data region */
2701 			switch (ch->ch.chunk_type) {
2702 			case SCTP_INITIATION:
2703 			case SCTP_INITIATION_ACK:
2704 			case SCTP_SELECTIVE_ACK:
2705 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2706 			case SCTP_HEARTBEAT_REQUEST:
2707 			case SCTP_HEARTBEAT_ACK:
2708 			case SCTP_ABORT_ASSOCIATION:
2709 			case SCTP_SHUTDOWN:
2710 			case SCTP_SHUTDOWN_ACK:
2711 			case SCTP_OPERATION_ERROR:
2712 			case SCTP_COOKIE_ECHO:
2713 			case SCTP_COOKIE_ACK:
2714 			case SCTP_ECN_ECHO:
2715 			case SCTP_ECN_CWR:
2716 			case SCTP_SHUTDOWN_COMPLETE:
2717 			case SCTP_AUTHENTICATION:
2718 			case SCTP_ASCONF_ACK:
2719 			case SCTP_PACKET_DROPPED:
2720 			case SCTP_STREAM_RESET:
2721 			case SCTP_FORWARD_CUM_TSN:
2722 			case SCTP_ASCONF:
2723 				/*
2724 				 * Now, what do we do with KNOWN chunks that
2725 				 * are NOT in the right place?
2726 				 *
2727 				 * For now, I do nothing but ignore them. We
2728 				 * may later want to add sysctl stuff to
2729 				 * switch out and do either an ABORT() or
2730 				 * possibly process them.
2731 				 */
2732 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2733 					struct mbuf *op_err;
2734 
2735 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2736 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2737 					return (2);
2738 				}
2739 				break;
2740 			default:
2741 				/* unknown chunk type, use bit rules */
2742 				if (ch->ch.chunk_type & 0x40) {
2743 					/* Add a error report to the queue */
2744 					struct mbuf *merr;
2745 					struct sctp_paramhdr *phd;
2746 
2747 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2748 					if (merr) {
2749 						phd = mtod(merr, struct sctp_paramhdr *);
2750 						/*
2751 						 * We cheat and use param
2752 						 * type since we did not
2753 						 * bother to define a error
2754 						 * cause struct. They are
2755 						 * the same basic format
2756 						 * with different names.
2757 						 */
2758 						phd->param_type =
2759 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2760 						phd->param_length =
2761 						    htons(chk_length + sizeof(*phd));
2762 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2763 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2764 						    SCTP_SIZE32(chk_length),
2765 						    M_DONTWAIT);
2766 						if (SCTP_BUF_NEXT(merr)) {
2767 							sctp_queue_op_err(stcb, merr);
2768 						} else {
2769 							sctp_m_freem(merr);
2770 						}
2771 					}
2772 				}
2773 				if ((ch->ch.chunk_type & 0x80) == 0) {
2774 					/* discard the rest of this packet */
2775 					stop_proc = 1;
2776 				}	/* else skip this bad chunk and
2777 					 * continue... */
2778 				break;
2779 			};	/* switch of chunk type */
2780 		}
2781 		*offset += SCTP_SIZE32(chk_length);
2782 		if ((*offset >= length) || stop_proc) {
2783 			/* no more data left in the mbuf chain */
2784 			stop_proc = 1;
2785 			continue;
2786 		}
2787 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2788 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2789 		if (ch == NULL) {
2790 			*offset = length;
2791 			stop_proc = 1;
2792 			break;
2793 
2794 		}
2795 	}			/* while */
2796 	if (break_flag) {
2797 		/*
2798 		 * we need to report rwnd overrun drops.
2799 		 */
2800 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2801 	}
2802 	if (num_chunks) {
2803 		/*
2804 		 * Did we get data, if so update the time for auto-close and
2805 		 * give peer credit for being alive.
2806 		 */
2807 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2808 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2809 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2810 			    stcb->asoc.overall_error_count,
2811 			    0,
2812 			    SCTP_FROM_SCTP_INDATA,
2813 			    __LINE__);
2814 		}
2815 		stcb->asoc.overall_error_count = 0;
2816 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2817 	}
2818 	/* now service all of the reassm queue if needed */
2819 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2820 		sctp_service_queues(stcb, asoc);
2821 
2822 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2823 		/* Assure that we ack right away */
2824 		stcb->asoc.send_sack = 1;
2825 	}
2826 	/* Start a sack timer or QUEUE a SACK for sending */
2827 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2828 	if (abort_flag)
2829 		return (2);
2830 
2831 	return (0);
2832 }
2833 
2834 static int
2835 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2836     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2837     int *num_frs,
2838     uint32_t * biggest_newly_acked_tsn,
2839     uint32_t * this_sack_lowest_newack,
2840     int *ecn_seg_sums, int *rto_ok)
2841 {
2842 	struct sctp_tmit_chunk *tp1;
2843 	unsigned int theTSN;
2844 	int j, wake_him = 0, circled = 0;
2845 
2846 	/* Recover the tp1 we last saw */
2847 	tp1 = *p_tp1;
2848 	if (tp1 == NULL) {
2849 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2850 	}
2851 	for (j = frag_strt; j <= frag_end; j++) {
2852 		theTSN = j + last_tsn;
2853 		while (tp1) {
2854 			if (tp1->rec.data.doing_fast_retransmit)
2855 				(*num_frs) += 1;
2856 
2857 			/*-
2858 			 * CMT: CUCv2 algorithm. For each TSN being
2859 			 * processed from the sent queue, track the
2860 			 * next expected pseudo-cumack, or
2861 			 * rtx_pseudo_cumack, if required. Separate
2862 			 * cumack trackers for first transmissions,
2863 			 * and retransmissions.
2864 			 */
2865 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2866 			    (tp1->snd_count == 1)) {
2867 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2868 				tp1->whoTo->find_pseudo_cumack = 0;
2869 			}
2870 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2871 			    (tp1->snd_count > 1)) {
2872 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2873 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2874 			}
2875 			if (tp1->rec.data.TSN_seq == theTSN) {
2876 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2877 					/*-
2878 					 * must be held until
2879 					 * cum-ack passes
2880 					 */
2881 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2882 						/*-
2883 						 * If it is less than RESEND, it is
2884 						 * now no-longer in flight.
2885 						 * Higher values may already be set
2886 						 * via previous Gap Ack Blocks...
2887 						 * i.e. ACKED or RESEND.
2888 						 */
2889 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2890 						    *biggest_newly_acked_tsn)) {
2891 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2892 						}
2893 						/*-
2894 						 * CMT: SFR algo (and HTNA) - set
2895 						 * saw_newack to 1 for dest being
2896 						 * newly acked. update
2897 						 * this_sack_highest_newack if
2898 						 * appropriate.
2899 						 */
2900 						if (tp1->rec.data.chunk_was_revoked == 0)
2901 							tp1->whoTo->saw_newack = 1;
2902 
2903 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2904 						    tp1->whoTo->this_sack_highest_newack)) {
2905 							tp1->whoTo->this_sack_highest_newack =
2906 							    tp1->rec.data.TSN_seq;
2907 						}
2908 						/*-
2909 						 * CMT DAC algo: also update
2910 						 * this_sack_lowest_newack
2911 						 */
2912 						if (*this_sack_lowest_newack == 0) {
2913 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2914 								sctp_log_sack(*this_sack_lowest_newack,
2915 								    last_tsn,
2916 								    tp1->rec.data.TSN_seq,
2917 								    0,
2918 								    0,
2919 								    SCTP_LOG_TSN_ACKED);
2920 							}
2921 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2922 						}
2923 						/*-
2924 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2925 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2926 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2927 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2928 						 * Separate pseudo_cumack trackers for first transmissions and
2929 						 * retransmissions.
2930 						 */
2931 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2932 							if (tp1->rec.data.chunk_was_revoked == 0) {
2933 								tp1->whoTo->new_pseudo_cumack = 1;
2934 							}
2935 							tp1->whoTo->find_pseudo_cumack = 1;
2936 						}
2937 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2938 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2939 						}
2940 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2941 							if (tp1->rec.data.chunk_was_revoked == 0) {
2942 								tp1->whoTo->new_pseudo_cumack = 1;
2943 							}
2944 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2945 						}
2946 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2947 							sctp_log_sack(*biggest_newly_acked_tsn,
2948 							    last_tsn,
2949 							    tp1->rec.data.TSN_seq,
2950 							    frag_strt,
2951 							    frag_end,
2952 							    SCTP_LOG_TSN_ACKED);
2953 						}
2954 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2955 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2956 							    tp1->whoTo->flight_size,
2957 							    tp1->book_size,
2958 							    (uintptr_t) tp1->whoTo,
2959 							    tp1->rec.data.TSN_seq);
2960 						}
2961 						sctp_flight_size_decrease(tp1);
2962 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2963 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2964 							    tp1);
2965 						}
2966 						sctp_total_flight_decrease(stcb, tp1);
2967 
2968 						tp1->whoTo->net_ack += tp1->send_size;
2969 						if (tp1->snd_count < 2) {
2970 							/*-
2971 							 * True non-retransmited chunk
2972 							 */
2973 							tp1->whoTo->net_ack2 += tp1->send_size;
2974 
2975 							/*-
2976 							 * update RTO too ?
2977 							 */
2978 							if (tp1->do_rtt) {
2979 								if (*rto_ok) {
2980 									tp1->whoTo->RTO =
2981 									    sctp_calculate_rto(stcb,
2982 									    &stcb->asoc,
2983 									    tp1->whoTo,
2984 									    &tp1->sent_rcv_time,
2985 									    sctp_align_safe_nocopy,
2986 									    SCTP_RTT_FROM_DATA);
2987 									*rto_ok = 0;
2988 								}
2989 								if (tp1->whoTo->rto_needed == 0) {
2990 									tp1->whoTo->rto_needed = 1;
2991 								}
2992 								tp1->do_rtt = 0;
2993 							}
2994 						}
2995 					}
2996 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2997 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2998 						    stcb->asoc.this_sack_highest_gap)) {
2999 							stcb->asoc.this_sack_highest_gap =
3000 							    tp1->rec.data.TSN_seq;
3001 						}
3002 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3003 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3004 #ifdef SCTP_AUDITING_ENABLED
3005 							sctp_audit_log(0xB2,
3006 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3007 #endif
3008 						}
3009 					}
3010 					/*-
3011 					 * All chunks NOT UNSENT fall through here and are marked
3012 					 * (leave PR-SCTP ones that are to skip alone though)
3013 					 */
3014 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3015 						tp1->sent = SCTP_DATAGRAM_MARKED;
3016 
3017 					if (tp1->rec.data.chunk_was_revoked) {
3018 						/* deflate the cwnd */
3019 						tp1->whoTo->cwnd -= tp1->book_size;
3020 						tp1->rec.data.chunk_was_revoked = 0;
3021 					}
3022 					/* NR Sack code here */
3023 					if (nr_sacking) {
3024 						if (tp1->data) {
3025 							/*
3026 							 * sa_ignore
3027 							 * NO_NULL_CHK
3028 							 */
3029 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3030 							sctp_m_freem(tp1->data);
3031 							tp1->data = NULL;
3032 						}
3033 						wake_him++;
3034 					}
3035 				}
3036 				break;
3037 			}	/* if (tp1->TSN_seq == theTSN) */
3038 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3039 				break;
3040 			}
3041 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3042 			if ((tp1 == NULL) && (circled == 0)) {
3043 				circled++;
3044 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3045 			}
3046 		}		/* end while (tp1) */
3047 		if (tp1 == NULL) {
3048 			circled = 0;
3049 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3050 		}
3051 		/* In case the fragments were not in order we must reset */
3052 	}			/* end for (j = fragStart */
3053 	*p_tp1 = tp1;
3054 	return (wake_him);	/* Return value only used for nr-sack */
3055 }
3056 
3057 
3058 static int
3059 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3060     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3061     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3062     int num_seg, int num_nr_seg, int *ecn_seg_sums,
3063     int *rto_ok)
3064 {
3065 	struct sctp_gap_ack_block *frag, block;
3066 	struct sctp_tmit_chunk *tp1;
3067 	int i;
3068 	int num_frs = 0;
3069 	int chunk_freed;
3070 	int non_revocable;
3071 	uint16_t frag_strt, frag_end, prev_frag_end;
3072 
3073 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3074 	prev_frag_end = 0;
3075 	chunk_freed = 0;
3076 
3077 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3078 		if (i == num_seg) {
3079 			prev_frag_end = 0;
3080 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3081 		}
3082 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3083 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3084 		*offset += sizeof(block);
3085 		if (frag == NULL) {
3086 			return (chunk_freed);
3087 		}
3088 		frag_strt = ntohs(frag->start);
3089 		frag_end = ntohs(frag->end);
3090 
3091 		if (frag_strt > frag_end) {
3092 			/* This gap report is malformed, skip it. */
3093 			continue;
3094 		}
3095 		if (frag_strt <= prev_frag_end) {
3096 			/* This gap report is not in order, so restart. */
3097 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3098 		}
3099 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3100 			*biggest_tsn_acked = last_tsn + frag_end;
3101 		}
3102 		if (i < num_seg) {
3103 			non_revocable = 0;
3104 		} else {
3105 			non_revocable = 1;
3106 		}
3107 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3108 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3109 		    this_sack_lowest_newack, ecn_seg_sums, rto_ok)) {
3110 			chunk_freed = 1;
3111 		}
3112 		prev_frag_end = frag_end;
3113 	}
3114 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3115 		if (num_frs)
3116 			sctp_log_fr(*biggest_tsn_acked,
3117 			    *biggest_newly_acked_tsn,
3118 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3119 	}
3120 	return (chunk_freed);
3121 }
3122 
3123 static void
3124 sctp_check_for_revoked(struct sctp_tcb *stcb,
3125     struct sctp_association *asoc, uint32_t cumack,
3126     uint32_t biggest_tsn_acked)
3127 {
3128 	struct sctp_tmit_chunk *tp1;
3129 	int tot_revoked = 0;
3130 
3131 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3132 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3133 			/*
3134 			 * ok this guy is either ACK or MARKED. If it is
3135 			 * ACKED it has been previously acked but not this
3136 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3137 			 * again.
3138 			 */
3139 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3140 				break;
3141 			}
3142 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3143 				/* it has been revoked */
3144 				tp1->sent = SCTP_DATAGRAM_SENT;
3145 				tp1->rec.data.chunk_was_revoked = 1;
3146 				/*
3147 				 * We must add this stuff back in to assure
3148 				 * timers and such get started.
3149 				 */
3150 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3151 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3152 					    tp1->whoTo->flight_size,
3153 					    tp1->book_size,
3154 					    (uintptr_t) tp1->whoTo,
3155 					    tp1->rec.data.TSN_seq);
3156 				}
3157 				sctp_flight_size_increase(tp1);
3158 				sctp_total_flight_increase(stcb, tp1);
3159 				/*
3160 				 * We inflate the cwnd to compensate for our
3161 				 * artificial inflation of the flight_size.
3162 				 */
3163 				tp1->whoTo->cwnd += tp1->book_size;
3164 				tot_revoked++;
3165 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3166 					sctp_log_sack(asoc->last_acked_seq,
3167 					    cumack,
3168 					    tp1->rec.data.TSN_seq,
3169 					    0,
3170 					    0,
3171 					    SCTP_LOG_TSN_REVOKED);
3172 				}
3173 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3174 				/* it has been re-acked in this SACK */
3175 				tp1->sent = SCTP_DATAGRAM_ACKED;
3176 			}
3177 		}
3178 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3179 			break;
3180 	}
3181 }
3182 
3183 
3184 static void
3185 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3186     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3187 {
3188 	struct sctp_tmit_chunk *tp1;
3189 	int strike_flag = 0;
3190 	struct timeval now;
3191 	int tot_retrans = 0;
3192 	uint32_t sending_seq;
3193 	struct sctp_nets *net;
3194 	int num_dests_sacked = 0;
3195 
3196 	/*
3197 	 * select the sending_seq, this is either the next thing ready to be
3198 	 * sent but not transmitted, OR, the next seq we assign.
3199 	 */
3200 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3201 	if (tp1 == NULL) {
3202 		sending_seq = asoc->sending_seq;
3203 	} else {
3204 		sending_seq = tp1->rec.data.TSN_seq;
3205 	}
3206 
3207 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3208 	if ((asoc->sctp_cmt_on_off > 0) &&
3209 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3210 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3211 			if (net->saw_newack)
3212 				num_dests_sacked++;
3213 		}
3214 	}
3215 	if (stcb->asoc.peer_supports_prsctp) {
3216 		(void)SCTP_GETTIME_TIMEVAL(&now);
3217 	}
3218 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3219 		strike_flag = 0;
3220 		if (tp1->no_fr_allowed) {
3221 			/* this one had a timeout or something */
3222 			continue;
3223 		}
3224 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3225 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3226 				sctp_log_fr(biggest_tsn_newly_acked,
3227 				    tp1->rec.data.TSN_seq,
3228 				    tp1->sent,
3229 				    SCTP_FR_LOG_CHECK_STRIKE);
3230 		}
3231 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3232 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3233 			/* done */
3234 			break;
3235 		}
3236 		if (stcb->asoc.peer_supports_prsctp) {
3237 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3238 				/* Is it expired? */
3239 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3240 					/* Yes so drop it */
3241 					if (tp1->data != NULL) {
3242 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3243 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3244 						    SCTP_SO_NOT_LOCKED);
3245 					}
3246 					continue;
3247 				}
3248 			}
3249 		}
3250 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3251 			/* we are beyond the tsn in the sack  */
3252 			break;
3253 		}
3254 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3255 			/* either a RESEND, ACKED, or MARKED */
3256 			/* skip */
3257 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3258 				/* Continue strikin FWD-TSN chunks */
3259 				tp1->rec.data.fwd_tsn_cnt++;
3260 			}
3261 			continue;
3262 		}
3263 		/*
3264 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3265 		 */
3266 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3267 			/*
3268 			 * No new acks were receieved for data sent to this
3269 			 * dest. Therefore, according to the SFR algo for
3270 			 * CMT, no data sent to this dest can be marked for
3271 			 * FR using this SACK.
3272 			 */
3273 			continue;
3274 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3275 		    tp1->whoTo->this_sack_highest_newack)) {
3276 			/*
3277 			 * CMT: New acks were receieved for data sent to
3278 			 * this dest. But no new acks were seen for data
3279 			 * sent after tp1. Therefore, according to the SFR
3280 			 * algo for CMT, tp1 cannot be marked for FR using
3281 			 * this SACK. This step covers part of the DAC algo
3282 			 * and the HTNA algo as well.
3283 			 */
3284 			continue;
3285 		}
3286 		/*
3287 		 * Here we check to see if we were have already done a FR
3288 		 * and if so we see if the biggest TSN we saw in the sack is
3289 		 * smaller than the recovery point. If so we don't strike
3290 		 * the tsn... otherwise we CAN strike the TSN.
3291 		 */
3292 		/*
3293 		 * @@@ JRI: Check for CMT if (accum_moved &&
3294 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3295 		 * 0)) {
3296 		 */
3297 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3298 			/*
3299 			 * Strike the TSN if in fast-recovery and cum-ack
3300 			 * moved.
3301 			 */
3302 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3303 				sctp_log_fr(biggest_tsn_newly_acked,
3304 				    tp1->rec.data.TSN_seq,
3305 				    tp1->sent,
3306 				    SCTP_FR_LOG_STRIKE_CHUNK);
3307 			}
3308 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3309 				tp1->sent++;
3310 			}
3311 			if ((asoc->sctp_cmt_on_off > 0) &&
3312 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3313 				/*
3314 				 * CMT DAC algorithm: If SACK flag is set to
3315 				 * 0, then lowest_newack test will not pass
3316 				 * because it would have been set to the
3317 				 * cumack earlier. If not already to be
3318 				 * rtx'd, If not a mixed sack and if tp1 is
3319 				 * not between two sacked TSNs, then mark by
3320 				 * one more. NOTE that we are marking by one
3321 				 * additional time since the SACK DAC flag
3322 				 * indicates that two packets have been
3323 				 * received after this missing TSN.
3324 				 */
3325 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3326 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3327 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3328 						sctp_log_fr(16 + num_dests_sacked,
3329 						    tp1->rec.data.TSN_seq,
3330 						    tp1->sent,
3331 						    SCTP_FR_LOG_STRIKE_CHUNK);
3332 					}
3333 					tp1->sent++;
3334 				}
3335 			}
3336 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3337 		    (asoc->sctp_cmt_on_off == 0)) {
3338 			/*
3339 			 * For those that have done a FR we must take
3340 			 * special consideration if we strike. I.e the
3341 			 * biggest_newly_acked must be higher than the
3342 			 * sending_seq at the time we did the FR.
3343 			 */
3344 			if (
3345 #ifdef SCTP_FR_TO_ALTERNATE
3346 			/*
3347 			 * If FR's go to new networks, then we must only do
3348 			 * this for singly homed asoc's. However if the FR's
3349 			 * go to the same network (Armando's work) then its
3350 			 * ok to FR multiple times.
3351 			 */
3352 			    (asoc->numnets < 2)
3353 #else
3354 			    (1)
3355 #endif
3356 			    ) {
3357 
3358 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3359 				    tp1->rec.data.fast_retran_tsn)) {
3360 					/*
3361 					 * Strike the TSN, since this ack is
3362 					 * beyond where things were when we
3363 					 * did a FR.
3364 					 */
3365 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3366 						sctp_log_fr(biggest_tsn_newly_acked,
3367 						    tp1->rec.data.TSN_seq,
3368 						    tp1->sent,
3369 						    SCTP_FR_LOG_STRIKE_CHUNK);
3370 					}
3371 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3372 						tp1->sent++;
3373 					}
3374 					strike_flag = 1;
3375 					if ((asoc->sctp_cmt_on_off > 0) &&
3376 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3377 						/*
3378 						 * CMT DAC algorithm: If
3379 						 * SACK flag is set to 0,
3380 						 * then lowest_newack test
3381 						 * will not pass because it
3382 						 * would have been set to
3383 						 * the cumack earlier. If
3384 						 * not already to be rtx'd,
3385 						 * If not a mixed sack and
3386 						 * if tp1 is not between two
3387 						 * sacked TSNs, then mark by
3388 						 * one more. NOTE that we
3389 						 * are marking by one
3390 						 * additional time since the
3391 						 * SACK DAC flag indicates
3392 						 * that two packets have
3393 						 * been received after this
3394 						 * missing TSN.
3395 						 */
3396 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3397 						    (num_dests_sacked == 1) &&
3398 						    SCTP_TSN_GT(this_sack_lowest_newack,
3399 						    tp1->rec.data.TSN_seq)) {
3400 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3401 								sctp_log_fr(32 + num_dests_sacked,
3402 								    tp1->rec.data.TSN_seq,
3403 								    tp1->sent,
3404 								    SCTP_FR_LOG_STRIKE_CHUNK);
3405 							}
3406 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3407 								tp1->sent++;
3408 							}
3409 						}
3410 					}
3411 				}
3412 			}
3413 			/*
3414 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3415 			 * algo covers HTNA.
3416 			 */
3417 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3418 		    biggest_tsn_newly_acked)) {
3419 			/*
3420 			 * We don't strike these: This is the  HTNA
3421 			 * algorithm i.e. we don't strike If our TSN is
3422 			 * larger than the Highest TSN Newly Acked.
3423 			 */
3424 			;
3425 		} else {
3426 			/* Strike the TSN */
3427 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3428 				sctp_log_fr(biggest_tsn_newly_acked,
3429 				    tp1->rec.data.TSN_seq,
3430 				    tp1->sent,
3431 				    SCTP_FR_LOG_STRIKE_CHUNK);
3432 			}
3433 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3434 				tp1->sent++;
3435 			}
3436 			if ((asoc->sctp_cmt_on_off > 0) &&
3437 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3438 				/*
3439 				 * CMT DAC algorithm: If SACK flag is set to
3440 				 * 0, then lowest_newack test will not pass
3441 				 * because it would have been set to the
3442 				 * cumack earlier. If not already to be
3443 				 * rtx'd, If not a mixed sack and if tp1 is
3444 				 * not between two sacked TSNs, then mark by
3445 				 * one more. NOTE that we are marking by one
3446 				 * additional time since the SACK DAC flag
3447 				 * indicates that two packets have been
3448 				 * received after this missing TSN.
3449 				 */
3450 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3451 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3452 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3453 						sctp_log_fr(48 + num_dests_sacked,
3454 						    tp1->rec.data.TSN_seq,
3455 						    tp1->sent,
3456 						    SCTP_FR_LOG_STRIKE_CHUNK);
3457 					}
3458 					tp1->sent++;
3459 				}
3460 			}
3461 		}
3462 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3463 			struct sctp_nets *alt;
3464 
3465 			/* fix counts and things */
3466 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3467 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3468 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3469 				    tp1->book_size,
3470 				    (uintptr_t) tp1->whoTo,
3471 				    tp1->rec.data.TSN_seq);
3472 			}
3473 			if (tp1->whoTo) {
3474 				tp1->whoTo->net_ack++;
3475 				sctp_flight_size_decrease(tp1);
3476 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3477 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3478 					    tp1);
3479 				}
3480 			}
3481 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3482 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3483 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3484 			}
3485 			/* add back to the rwnd */
3486 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3487 
3488 			/* remove from the total flight */
3489 			sctp_total_flight_decrease(stcb, tp1);
3490 
3491 			if ((stcb->asoc.peer_supports_prsctp) &&
3492 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3493 				/*
3494 				 * Has it been retransmitted tv_sec times? -
3495 				 * we store the retran count there.
3496 				 */
3497 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3498 					/* Yes, so drop it */
3499 					if (tp1->data != NULL) {
3500 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3501 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3502 						    SCTP_SO_NOT_LOCKED);
3503 					}
3504 					/* Make sure to flag we had a FR */
3505 					tp1->whoTo->net_ack++;
3506 					continue;
3507 				}
3508 			}
3509 			/* printf("OK, we are now ready to FR this guy\n"); */
3510 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3511 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3512 				    0, SCTP_FR_MARKED);
3513 			}
3514 			if (strike_flag) {
3515 				/* This is a subsequent FR */
3516 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3517 			}
3518 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3519 			if (asoc->sctp_cmt_on_off > 0) {
3520 				/*
3521 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3522 				 * If CMT is being used, then pick dest with
3523 				 * largest ssthresh for any retransmission.
3524 				 */
3525 				tp1->no_fr_allowed = 1;
3526 				alt = tp1->whoTo;
3527 				/* sa_ignore NO_NULL_CHK */
3528 				if (asoc->sctp_cmt_pf > 0) {
3529 					/*
3530 					 * JRS 5/18/07 - If CMT PF is on,
3531 					 * use the PF version of
3532 					 * find_alt_net()
3533 					 */
3534 					alt = sctp_find_alternate_net(stcb, alt, 2);
3535 				} else {
3536 					/*
3537 					 * JRS 5/18/07 - If only CMT is on,
3538 					 * use the CMT version of
3539 					 * find_alt_net()
3540 					 */
3541 					/* sa_ignore NO_NULL_CHK */
3542 					alt = sctp_find_alternate_net(stcb, alt, 1);
3543 				}
3544 				if (alt == NULL) {
3545 					alt = tp1->whoTo;
3546 				}
3547 				/*
3548 				 * CUCv2: If a different dest is picked for
3549 				 * the retransmission, then new
3550 				 * (rtx-)pseudo_cumack needs to be tracked
3551 				 * for orig dest. Let CUCv2 track new (rtx-)
3552 				 * pseudo-cumack always.
3553 				 */
3554 				if (tp1->whoTo) {
3555 					tp1->whoTo->find_pseudo_cumack = 1;
3556 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3557 				}
3558 			} else {/* CMT is OFF */
3559 
3560 #ifdef SCTP_FR_TO_ALTERNATE
3561 				/* Can we find an alternate? */
3562 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3563 #else
3564 				/*
3565 				 * default behavior is to NOT retransmit
3566 				 * FR's to an alternate. Armando Caro's
3567 				 * paper details why.
3568 				 */
3569 				alt = tp1->whoTo;
3570 #endif
3571 			}
3572 
3573 			tp1->rec.data.doing_fast_retransmit = 1;
3574 			tot_retrans++;
3575 			/* mark the sending seq for possible subsequent FR's */
3576 			/*
3577 			 * printf("Marking TSN for FR new value %x\n",
3578 			 * (uint32_t)tpi->rec.data.TSN_seq);
3579 			 */
3580 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3581 				/*
3582 				 * If the queue of send is empty then its
3583 				 * the next sequence number that will be
3584 				 * assigned so we subtract one from this to
3585 				 * get the one we last sent.
3586 				 */
3587 				tp1->rec.data.fast_retran_tsn = sending_seq;
3588 			} else {
3589 				/*
3590 				 * If there are chunks on the send queue
3591 				 * (unsent data that has made it from the
3592 				 * stream queues but not out the door, we
3593 				 * take the first one (which will have the
3594 				 * lowest TSN) and subtract one to get the
3595 				 * one we last sent.
3596 				 */
3597 				struct sctp_tmit_chunk *ttt;
3598 
3599 				ttt = TAILQ_FIRST(&asoc->send_queue);
3600 				tp1->rec.data.fast_retran_tsn =
3601 				    ttt->rec.data.TSN_seq;
3602 			}
3603 
3604 			if (tp1->do_rtt) {
3605 				/*
3606 				 * this guy had a RTO calculation pending on
3607 				 * it, cancel it
3608 				 */
3609 				if (tp1->whoTo->rto_needed == 0) {
3610 					tp1->whoTo->rto_needed = 1;
3611 				}
3612 				tp1->do_rtt = 0;
3613 			}
3614 			if (alt != tp1->whoTo) {
3615 				/* yes, there is an alternate. */
3616 				sctp_free_remote_addr(tp1->whoTo);
3617 				/* sa_ignore FREED_MEMORY */
3618 				tp1->whoTo = alt;
3619 				atomic_add_int(&alt->ref_count, 1);
3620 			}
3621 		}
3622 	}
3623 }
3624 
3625 struct sctp_tmit_chunk *
3626 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3627     struct sctp_association *asoc)
3628 {
3629 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3630 	struct timeval now;
3631 	int now_filled = 0;
3632 
3633 	if (asoc->peer_supports_prsctp == 0) {
3634 		return (NULL);
3635 	}
3636 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3637 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3638 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3639 			/* no chance to advance, out of here */
3640 			break;
3641 		}
3642 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3643 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3644 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3645 				    asoc->advanced_peer_ack_point,
3646 				    tp1->rec.data.TSN_seq, 0, 0);
3647 			}
3648 		}
3649 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3650 			/*
3651 			 * We can't fwd-tsn past any that are reliable aka
3652 			 * retransmitted until the asoc fails.
3653 			 */
3654 			break;
3655 		}
3656 		if (!now_filled) {
3657 			(void)SCTP_GETTIME_TIMEVAL(&now);
3658 			now_filled = 1;
3659 		}
3660 		/*
3661 		 * now we got a chunk which is marked for another
3662 		 * retransmission to a PR-stream but has run out its chances
3663 		 * already maybe OR has been marked to skip now. Can we skip
3664 		 * it if its a resend?
3665 		 */
3666 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3667 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3668 			/*
3669 			 * Now is this one marked for resend and its time is
3670 			 * now up?
3671 			 */
3672 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3673 				/* Yes so drop it */
3674 				if (tp1->data) {
3675 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3676 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3677 					    SCTP_SO_NOT_LOCKED);
3678 				}
3679 			} else {
3680 				/*
3681 				 * No, we are done when hit one for resend
3682 				 * whos time as not expired.
3683 				 */
3684 				break;
3685 			}
3686 		}
3687 		/*
3688 		 * Ok now if this chunk is marked to drop it we can clean up
3689 		 * the chunk, advance our peer ack point and we can check
3690 		 * the next chunk.
3691 		 */
3692 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3693 			/* advance PeerAckPoint goes forward */
3694 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3695 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3696 				a_adv = tp1;
3697 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3698 				/* No update but we do save the chk */
3699 				a_adv = tp1;
3700 			}
3701 		} else {
3702 			/*
3703 			 * If it is still in RESEND we can advance no
3704 			 * further
3705 			 */
3706 			break;
3707 		}
3708 	}
3709 	return (a_adv);
3710 }
3711 
3712 static int
3713 sctp_fs_audit(struct sctp_association *asoc)
3714 {
3715 	struct sctp_tmit_chunk *chk;
3716 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3717 	int entry_flight, entry_cnt, ret;
3718 
3719 	entry_flight = asoc->total_flight;
3720 	entry_cnt = asoc->total_flight_count;
3721 	ret = 0;
3722 
3723 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3724 		return (0);
3725 
3726 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3727 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3728 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3729 			    chk->rec.data.TSN_seq,
3730 			    chk->send_size,
3731 			    chk->snd_count
3732 			    );
3733 			inflight++;
3734 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3735 			resend++;
3736 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3737 			inbetween++;
3738 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3739 			above++;
3740 		} else {
3741 			acked++;
3742 		}
3743 	}
3744 
3745 	if ((inflight > 0) || (inbetween > 0)) {
3746 #ifdef INVARIANTS
3747 		panic("Flight size-express incorrect? \n");
3748 #else
3749 		printf("asoc->total_flight:%d cnt:%d\n",
3750 		    entry_flight, entry_cnt);
3751 
3752 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3753 		    inflight, inbetween, resend, above, acked);
3754 		ret = 1;
3755 #endif
3756 	}
3757 	return (ret);
3758 }
3759 
3760 
3761 static void
3762 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3763     struct sctp_association *asoc,
3764     struct sctp_nets *net,
3765     struct sctp_tmit_chunk *tp1)
3766 {
3767 	tp1->window_probe = 0;
3768 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3769 		/* TSN's skipped we do NOT move back. */
3770 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3771 		    tp1->whoTo->flight_size,
3772 		    tp1->book_size,
3773 		    (uintptr_t) tp1->whoTo,
3774 		    tp1->rec.data.TSN_seq);
3775 		return;
3776 	}
3777 	/* First setup this by shrinking flight */
3778 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3779 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3780 		    tp1);
3781 	}
3782 	sctp_flight_size_decrease(tp1);
3783 	sctp_total_flight_decrease(stcb, tp1);
3784 	/* Now mark for resend */
3785 	tp1->sent = SCTP_DATAGRAM_RESEND;
3786 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3787 
3788 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3789 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3790 		    tp1->whoTo->flight_size,
3791 		    tp1->book_size,
3792 		    (uintptr_t) tp1->whoTo,
3793 		    tp1->rec.data.TSN_seq);
3794 	}
3795 }
3796 
3797 void
3798 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3799     uint32_t rwnd, int *abort_now, int ecne_seen)
3800 {
3801 	struct sctp_nets *net;
3802 	struct sctp_association *asoc;
3803 	struct sctp_tmit_chunk *tp1, *tp2;
3804 	uint32_t old_rwnd;
3805 	int win_probe_recovery = 0;
3806 	int win_probe_recovered = 0;
3807 	int j, done_once = 0;
3808 	int rto_ok = 1;
3809 
3810 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3811 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3812 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3813 	}
3814 	SCTP_TCB_LOCK_ASSERT(stcb);
3815 #ifdef SCTP_ASOCLOG_OF_TSNS
3816 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3817 	stcb->asoc.cumack_log_at++;
3818 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3819 		stcb->asoc.cumack_log_at = 0;
3820 	}
3821 #endif
3822 	asoc = &stcb->asoc;
3823 	old_rwnd = asoc->peers_rwnd;
3824 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3825 		/* old ack */
3826 		return;
3827 	} else if (asoc->last_acked_seq == cumack) {
3828 		/* Window update sack */
3829 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3830 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3831 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3832 			/* SWS sender side engages */
3833 			asoc->peers_rwnd = 0;
3834 		}
3835 		if (asoc->peers_rwnd > old_rwnd) {
3836 			goto again;
3837 		}
3838 		return;
3839 	}
3840 	/* First setup for CC stuff */
3841 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3842 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3843 			/* Drag along the window_tsn for cwr's */
3844 			net->cwr_window_tsn = cumack;
3845 		}
3846 		net->prev_cwnd = net->cwnd;
3847 		net->net_ack = 0;
3848 		net->net_ack2 = 0;
3849 
3850 		/*
3851 		 * CMT: Reset CUC and Fast recovery algo variables before
3852 		 * SACK processing
3853 		 */
3854 		net->new_pseudo_cumack = 0;
3855 		net->will_exit_fast_recovery = 0;
3856 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3857 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3858 		}
3859 	}
3860 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3861 		uint32_t send_s;
3862 
3863 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3864 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3865 			    sctpchunk_listhead);
3866 			send_s = tp1->rec.data.TSN_seq + 1;
3867 		} else {
3868 			send_s = asoc->sending_seq;
3869 		}
3870 		if (SCTP_TSN_GE(cumack, send_s)) {
3871 #ifndef INVARIANTS
3872 			struct mbuf *oper;
3873 
3874 #endif
3875 #ifdef INVARIANTS
3876 			panic("Impossible sack 1");
3877 #else
3878 
3879 			*abort_now = 1;
3880 			/* XXX */
3881 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3882 			    0, M_DONTWAIT, 1, MT_DATA);
3883 			if (oper) {
3884 				struct sctp_paramhdr *ph;
3885 				uint32_t *ippp;
3886 
3887 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3888 				    sizeof(uint32_t);
3889 				ph = mtod(oper, struct sctp_paramhdr *);
3890 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3891 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3892 				ippp = (uint32_t *) (ph + 1);
3893 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3894 			}
3895 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3896 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3897 			return;
3898 #endif
3899 		}
3900 	}
3901 	asoc->this_sack_highest_gap = cumack;
3902 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3903 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3904 		    stcb->asoc.overall_error_count,
3905 		    0,
3906 		    SCTP_FROM_SCTP_INDATA,
3907 		    __LINE__);
3908 	}
3909 	stcb->asoc.overall_error_count = 0;
3910 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3911 		/* process the new consecutive TSN first */
3912 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3913 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3914 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3915 					printf("Warning, an unsent is now acked?\n");
3916 				}
3917 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3918 					/*
3919 					 * If it is less than ACKED, it is
3920 					 * now no-longer in flight. Higher
3921 					 * values may occur during marking
3922 					 */
3923 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3924 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3925 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3926 							    tp1->whoTo->flight_size,
3927 							    tp1->book_size,
3928 							    (uintptr_t) tp1->whoTo,
3929 							    tp1->rec.data.TSN_seq);
3930 						}
3931 						sctp_flight_size_decrease(tp1);
3932 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3933 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3934 							    tp1);
3935 						}
3936 						/* sa_ignore NO_NULL_CHK */
3937 						sctp_total_flight_decrease(stcb, tp1);
3938 					}
3939 					tp1->whoTo->net_ack += tp1->send_size;
3940 					if (tp1->snd_count < 2) {
3941 						/*
3942 						 * True non-retransmited
3943 						 * chunk
3944 						 */
3945 						tp1->whoTo->net_ack2 +=
3946 						    tp1->send_size;
3947 
3948 						/* update RTO too? */
3949 						if (tp1->do_rtt) {
3950 							if (rto_ok) {
3951 								tp1->whoTo->RTO =
3952 								/*
3953 								 * sa_ignore
3954 								 * NO_NULL_CH
3955 								 * K
3956 								 */
3957 								    sctp_calculate_rto(stcb,
3958 								    asoc, tp1->whoTo,
3959 								    &tp1->sent_rcv_time,
3960 								    sctp_align_safe_nocopy,
3961 								    SCTP_RTT_FROM_DATA);
3962 								rto_ok = 0;
3963 							}
3964 							if (tp1->whoTo->rto_needed == 0) {
3965 								tp1->whoTo->rto_needed = 1;
3966 							}
3967 							tp1->do_rtt = 0;
3968 						}
3969 					}
3970 					/*
3971 					 * CMT: CUCv2 algorithm. From the
3972 					 * cumack'd TSNs, for each TSN being
3973 					 * acked for the first time, set the
3974 					 * following variables for the
3975 					 * corresp destination.
3976 					 * new_pseudo_cumack will trigger a
3977 					 * cwnd update.
3978 					 * find_(rtx_)pseudo_cumack will
3979 					 * trigger search for the next
3980 					 * expected (rtx-)pseudo-cumack.
3981 					 */
3982 					tp1->whoTo->new_pseudo_cumack = 1;
3983 					tp1->whoTo->find_pseudo_cumack = 1;
3984 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3985 
3986 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3987 						/* sa_ignore NO_NULL_CHK */
3988 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3989 					}
3990 				}
3991 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3992 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3993 				}
3994 				if (tp1->rec.data.chunk_was_revoked) {
3995 					/* deflate the cwnd */
3996 					tp1->whoTo->cwnd -= tp1->book_size;
3997 					tp1->rec.data.chunk_was_revoked = 0;
3998 				}
3999 				tp1->sent = SCTP_DATAGRAM_ACKED;
4000 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4001 				if (tp1->data) {
4002 					/* sa_ignore NO_NULL_CHK */
4003 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4004 					sctp_m_freem(tp1->data);
4005 					tp1->data = NULL;
4006 				}
4007 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4008 					sctp_log_sack(asoc->last_acked_seq,
4009 					    cumack,
4010 					    tp1->rec.data.TSN_seq,
4011 					    0,
4012 					    0,
4013 					    SCTP_LOG_FREE_SENT);
4014 				}
4015 				asoc->sent_queue_cnt--;
4016 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4017 			} else {
4018 				break;
4019 			}
4020 		}
4021 
4022 	}
4023 	/* sa_ignore NO_NULL_CHK */
4024 	if (stcb->sctp_socket) {
4025 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4026 		struct socket *so;
4027 
4028 #endif
4029 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4030 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4031 			/* sa_ignore NO_NULL_CHK */
4032 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4033 		}
4034 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4035 		so = SCTP_INP_SO(stcb->sctp_ep);
4036 		atomic_add_int(&stcb->asoc.refcnt, 1);
4037 		SCTP_TCB_UNLOCK(stcb);
4038 		SCTP_SOCKET_LOCK(so, 1);
4039 		SCTP_TCB_LOCK(stcb);
4040 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4041 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4042 			/* assoc was freed while we were unlocked */
4043 			SCTP_SOCKET_UNLOCK(so, 1);
4044 			return;
4045 		}
4046 #endif
4047 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4048 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4049 		SCTP_SOCKET_UNLOCK(so, 1);
4050 #endif
4051 	} else {
4052 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4053 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4054 		}
4055 	}
4056 
4057 	/* JRS - Use the congestion control given in the CC module */
4058 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4059 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4060 			if (net->net_ack2 > 0) {
4061 				/*
4062 				 * Karn's rule applies to clearing error
4063 				 * count, this is optional.
4064 				 */
4065 				net->error_count = 0;
4066 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4067 					/* addr came good */
4068 					net->dest_state |= SCTP_ADDR_REACHABLE;
4069 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4070 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4071 				}
4072 				if (net == stcb->asoc.primary_destination) {
4073 					if (stcb->asoc.alternate) {
4074 						/*
4075 						 * release the alternate,
4076 						 * primary is good
4077 						 */
4078 						sctp_free_remote_addr(stcb->asoc.alternate);
4079 						stcb->asoc.alternate = NULL;
4080 					}
4081 				}
4082 				if (net->dest_state & SCTP_ADDR_PF) {
4083 					net->dest_state &= ~SCTP_ADDR_PF;
4084 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4085 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4086 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4087 					/* Done with this net */
4088 					net->net_ack = 0;
4089 				}
4090 				/* restore any doubled timers */
4091 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4092 				if (net->RTO < stcb->asoc.minrto) {
4093 					net->RTO = stcb->asoc.minrto;
4094 				}
4095 				if (net->RTO > stcb->asoc.maxrto) {
4096 					net->RTO = stcb->asoc.maxrto;
4097 				}
4098 			}
4099 		}
4100 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4101 	}
4102 	asoc->last_acked_seq = cumack;
4103 
4104 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4105 		/* nothing left in-flight */
4106 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4107 			net->flight_size = 0;
4108 			net->partial_bytes_acked = 0;
4109 		}
4110 		asoc->total_flight = 0;
4111 		asoc->total_flight_count = 0;
4112 	}
4113 	/* RWND update */
4114 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4115 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4116 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4117 		/* SWS sender side engages */
4118 		asoc->peers_rwnd = 0;
4119 	}
4120 	if (asoc->peers_rwnd > old_rwnd) {
4121 		win_probe_recovery = 1;
4122 	}
4123 	/* Now assure a timer where data is queued at */
4124 again:
4125 	j = 0;
4126 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4127 		int to_ticks;
4128 
4129 		if (win_probe_recovery && (net->window_probe)) {
4130 			win_probe_recovered = 1;
4131 			/*
4132 			 * Find first chunk that was used with window probe
4133 			 * and clear the sent
4134 			 */
4135 			/* sa_ignore FREED_MEMORY */
4136 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4137 				if (tp1->window_probe) {
4138 					/* move back to data send queue */
4139 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4140 					break;
4141 				}
4142 			}
4143 		}
4144 		if (net->RTO == 0) {
4145 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4146 		} else {
4147 			to_ticks = MSEC_TO_TICKS(net->RTO);
4148 		}
4149 		if (net->flight_size) {
4150 			j++;
4151 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4152 			    sctp_timeout_handler, &net->rxt_timer);
4153 			if (net->window_probe) {
4154 				net->window_probe = 0;
4155 			}
4156 		} else {
4157 			if (net->window_probe) {
4158 				/*
4159 				 * In window probes we must assure a timer
4160 				 * is still running there
4161 				 */
4162 				net->window_probe = 0;
4163 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4164 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4165 					    sctp_timeout_handler, &net->rxt_timer);
4166 				}
4167 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4168 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4169 				    stcb, net,
4170 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4171 			}
4172 		}
4173 	}
4174 	if ((j == 0) &&
4175 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4176 	    (asoc->sent_queue_retran_cnt == 0) &&
4177 	    (win_probe_recovered == 0) &&
4178 	    (done_once == 0)) {
4179 		/*
4180 		 * huh, this should not happen unless all packets are
4181 		 * PR-SCTP and marked to skip of course.
4182 		 */
4183 		if (sctp_fs_audit(asoc)) {
4184 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4185 				net->flight_size = 0;
4186 			}
4187 			asoc->total_flight = 0;
4188 			asoc->total_flight_count = 0;
4189 			asoc->sent_queue_retran_cnt = 0;
4190 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4191 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4192 					sctp_flight_size_increase(tp1);
4193 					sctp_total_flight_increase(stcb, tp1);
4194 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4195 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4196 				}
4197 			}
4198 		}
4199 		done_once = 1;
4200 		goto again;
4201 	}
4202 	/**********************************/
4203 	/* Now what about shutdown issues */
4204 	/**********************************/
4205 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4206 		/* nothing left on sendqueue.. consider done */
4207 		/* clean up */
4208 		if ((asoc->stream_queue_cnt == 1) &&
4209 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4210 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4211 		    (asoc->locked_on_sending)
4212 		    ) {
4213 			struct sctp_stream_queue_pending *sp;
4214 
4215 			/*
4216 			 * I may be in a state where we got all across.. but
4217 			 * cannot write more due to a shutdown... we abort
4218 			 * since the user did not indicate EOR in this case.
4219 			 * The sp will be cleaned during free of the asoc.
4220 			 */
4221 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4222 			    sctp_streamhead);
4223 			if ((sp) && (sp->length == 0)) {
4224 				/* Let cleanup code purge it */
4225 				if (sp->msg_is_complete) {
4226 					asoc->stream_queue_cnt--;
4227 				} else {
4228 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4229 					asoc->locked_on_sending = NULL;
4230 					asoc->stream_queue_cnt--;
4231 				}
4232 			}
4233 		}
4234 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4235 		    (asoc->stream_queue_cnt == 0)) {
4236 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4237 				/* Need to abort here */
4238 				struct mbuf *oper;
4239 
4240 		abort_out_now:
4241 				*abort_now = 1;
4242 				/* XXX */
4243 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4244 				    0, M_DONTWAIT, 1, MT_DATA);
4245 				if (oper) {
4246 					struct sctp_paramhdr *ph;
4247 					uint32_t *ippp;
4248 
4249 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4250 					    sizeof(uint32_t);
4251 					ph = mtod(oper, struct sctp_paramhdr *);
4252 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4253 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4254 					ippp = (uint32_t *) (ph + 1);
4255 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4256 				}
4257 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4258 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4259 			} else {
4260 				struct sctp_nets *netp;
4261 
4262 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4263 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4264 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4265 				}
4266 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4267 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4268 				sctp_stop_timers_for_shutdown(stcb);
4269 				if (asoc->alternate) {
4270 					netp = asoc->alternate;
4271 				} else {
4272 					netp = asoc->primary_destination;
4273 				}
4274 				sctp_send_shutdown(stcb, netp);
4275 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4276 				    stcb->sctp_ep, stcb, netp);
4277 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4278 				    stcb->sctp_ep, stcb, netp);
4279 			}
4280 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4281 		    (asoc->stream_queue_cnt == 0)) {
4282 			struct sctp_nets *netp;
4283 
4284 			if (asoc->alternate) {
4285 				netp = asoc->alternate;
4286 			} else {
4287 				netp = asoc->primary_destination;
4288 			}
4289 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4290 				goto abort_out_now;
4291 			}
4292 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4293 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4294 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4295 			sctp_send_shutdown_ack(stcb, netp);
4296 			sctp_stop_timers_for_shutdown(stcb);
4297 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4298 			    stcb->sctp_ep, stcb, netp);
4299 		}
4300 	}
4301 	/*********************************************/
4302 	/* Here we perform PR-SCTP procedures        */
4303 	/* (section 4.2)                             */
4304 	/*********************************************/
4305 	/* C1. update advancedPeerAckPoint */
4306 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4307 		asoc->advanced_peer_ack_point = cumack;
4308 	}
4309 	/* PR-Sctp issues need to be addressed too */
4310 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4311 		struct sctp_tmit_chunk *lchk;
4312 		uint32_t old_adv_peer_ack_point;
4313 
4314 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4315 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4316 		/* C3. See if we need to send a Fwd-TSN */
4317 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4318 			/*
4319 			 * ISSUE with ECN, see FWD-TSN processing.
4320 			 */
4321 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4322 				send_forward_tsn(stcb, asoc);
4323 			} else if (lchk) {
4324 				/* try to FR fwd-tsn's that get lost too */
4325 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4326 					send_forward_tsn(stcb, asoc);
4327 				}
4328 			}
4329 		}
4330 		if (lchk) {
4331 			/* Assure a timer is up */
4332 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4333 			    stcb->sctp_ep, stcb, lchk->whoTo);
4334 		}
4335 	}
4336 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4337 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4338 		    rwnd,
4339 		    stcb->asoc.peers_rwnd,
4340 		    stcb->asoc.total_flight,
4341 		    stcb->asoc.total_output_queue_size);
4342 	}
4343 }
4344 
4345 void
4346 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4347     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4348     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4349     int *abort_now, uint8_t flags,
4350     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4351 {
4352 	struct sctp_association *asoc;
4353 	struct sctp_tmit_chunk *tp1, *tp2;
4354 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4355 	uint32_t sav_cum_ack;
4356 	uint16_t wake_him = 0;
4357 	uint32_t send_s = 0;
4358 	long j;
4359 	int accum_moved = 0;
4360 	int will_exit_fast_recovery = 0;
4361 	uint32_t a_rwnd, old_rwnd;
4362 	int win_probe_recovery = 0;
4363 	int win_probe_recovered = 0;
4364 	struct sctp_nets *net = NULL;
4365 	int ecn_seg_sums = 0;
4366 	int done_once;
4367 	int rto_ok = 1;
4368 	uint8_t reneged_all = 0;
4369 	uint8_t cmt_dac_flag;
4370 
4371 	/*
4372 	 * we take any chance we can to service our queues since we cannot
4373 	 * get awoken when the socket is read from :<
4374 	 */
4375 	/*
4376 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4377 	 * old sack, if so discard. 2) If there is nothing left in the send
4378 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4379 	 * too, update any rwnd change and verify no timers are running.
4380 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4381 	 * moved process these first and note that it moved. 4) Process any
4382 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4383 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4384 	 * sync up flightsizes and things, stop all timers and also check
4385 	 * for shutdown_pending state. If so then go ahead and send off the
4386 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4387 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4388 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4389 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4390 	 * if in shutdown_recv state.
4391 	 */
4392 	SCTP_TCB_LOCK_ASSERT(stcb);
4393 	/* CMT DAC algo */
4394 	this_sack_lowest_newack = 0;
4395 	j = 0;
4396 	SCTP_STAT_INCR(sctps_slowpath_sack);
4397 	last_tsn = cum_ack;
4398 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4399 #ifdef SCTP_ASOCLOG_OF_TSNS
4400 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4401 	stcb->asoc.cumack_log_at++;
4402 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4403 		stcb->asoc.cumack_log_at = 0;
4404 	}
4405 #endif
4406 	a_rwnd = rwnd;
4407 
4408 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4409 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4410 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4411 	}
4412 	old_rwnd = stcb->asoc.peers_rwnd;
4413 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4414 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4415 		    stcb->asoc.overall_error_count,
4416 		    0,
4417 		    SCTP_FROM_SCTP_INDATA,
4418 		    __LINE__);
4419 	}
4420 	stcb->asoc.overall_error_count = 0;
4421 	asoc = &stcb->asoc;
4422 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4423 		sctp_log_sack(asoc->last_acked_seq,
4424 		    cum_ack,
4425 		    0,
4426 		    num_seg,
4427 		    num_dup,
4428 		    SCTP_LOG_NEW_SACK);
4429 	}
4430 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4431 		uint16_t i;
4432 		uint32_t *dupdata, dblock;
4433 
4434 		for (i = 0; i < num_dup; i++) {
4435 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4436 			    sizeof(uint32_t), (uint8_t *) & dblock);
4437 			if (dupdata == NULL) {
4438 				break;
4439 			}
4440 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4441 		}
4442 	}
4443 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4444 		/* reality check */
4445 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4446 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4447 			    sctpchunk_listhead);
4448 			send_s = tp1->rec.data.TSN_seq + 1;
4449 		} else {
4450 			tp1 = NULL;
4451 			send_s = asoc->sending_seq;
4452 		}
4453 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4454 			struct mbuf *oper;
4455 
4456 			/*
4457 			 * no way, we have not even sent this TSN out yet.
4458 			 * Peer is hopelessly messed up with us.
4459 			 */
4460 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4461 			    cum_ack, send_s);
4462 			if (tp1) {
4463 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4464 				    tp1->rec.data.TSN_seq, tp1);
4465 			}
4466 	hopeless_peer:
4467 			*abort_now = 1;
4468 			/* XXX */
4469 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4470 			    0, M_DONTWAIT, 1, MT_DATA);
4471 			if (oper) {
4472 				struct sctp_paramhdr *ph;
4473 				uint32_t *ippp;
4474 
4475 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4476 				    sizeof(uint32_t);
4477 				ph = mtod(oper, struct sctp_paramhdr *);
4478 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4479 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4480 				ippp = (uint32_t *) (ph + 1);
4481 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4482 			}
4483 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4484 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4485 			return;
4486 		}
4487 	}
4488 	/**********************/
4489 	/* 1) check the range */
4490 	/**********************/
4491 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4492 		/* acking something behind */
4493 		return;
4494 	}
4495 	sav_cum_ack = asoc->last_acked_seq;
4496 
4497 	/* update the Rwnd of the peer */
4498 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4499 	    TAILQ_EMPTY(&asoc->send_queue) &&
4500 	    (asoc->stream_queue_cnt == 0)) {
4501 		/* nothing left on send/sent and strmq */
4502 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4503 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4504 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4505 		}
4506 		asoc->peers_rwnd = a_rwnd;
4507 		if (asoc->sent_queue_retran_cnt) {
4508 			asoc->sent_queue_retran_cnt = 0;
4509 		}
4510 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4511 			/* SWS sender side engages */
4512 			asoc->peers_rwnd = 0;
4513 		}
4514 		/* stop any timers */
4515 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4516 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4517 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4518 			net->partial_bytes_acked = 0;
4519 			net->flight_size = 0;
4520 		}
4521 		asoc->total_flight = 0;
4522 		asoc->total_flight_count = 0;
4523 		return;
4524 	}
4525 	/*
4526 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4527 	 * things. The total byte count acked is tracked in netAckSz AND
4528 	 * netAck2 is used to track the total bytes acked that are un-
4529 	 * amibguious and were never retransmitted. We track these on a per
4530 	 * destination address basis.
4531 	 */
4532 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4533 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4534 			/* Drag along the window_tsn for cwr's */
4535 			net->cwr_window_tsn = cum_ack;
4536 		}
4537 		net->prev_cwnd = net->cwnd;
4538 		net->net_ack = 0;
4539 		net->net_ack2 = 0;
4540 
4541 		/*
4542 		 * CMT: Reset CUC and Fast recovery algo variables before
4543 		 * SACK processing
4544 		 */
4545 		net->new_pseudo_cumack = 0;
4546 		net->will_exit_fast_recovery = 0;
4547 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4548 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4549 		}
4550 	}
4551 	/* process the new consecutive TSN first */
4552 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4553 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4554 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4555 				accum_moved = 1;
4556 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4557 					/*
4558 					 * If it is less than ACKED, it is
4559 					 * now no-longer in flight. Higher
4560 					 * values may occur during marking
4561 					 */
4562 					if ((tp1->whoTo->dest_state &
4563 					    SCTP_ADDR_UNCONFIRMED) &&
4564 					    (tp1->snd_count < 2)) {
4565 						/*
4566 						 * If there was no retran
4567 						 * and the address is
4568 						 * un-confirmed and we sent
4569 						 * there and are now
4570 						 * sacked.. its confirmed,
4571 						 * mark it so.
4572 						 */
4573 						tp1->whoTo->dest_state &=
4574 						    ~SCTP_ADDR_UNCONFIRMED;
4575 					}
4576 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4577 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4578 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4579 							    tp1->whoTo->flight_size,
4580 							    tp1->book_size,
4581 							    (uintptr_t) tp1->whoTo,
4582 							    tp1->rec.data.TSN_seq);
4583 						}
4584 						sctp_flight_size_decrease(tp1);
4585 						sctp_total_flight_decrease(stcb, tp1);
4586 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4587 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4588 							    tp1);
4589 						}
4590 					}
4591 					tp1->whoTo->net_ack += tp1->send_size;
4592 
4593 					/* CMT SFR and DAC algos */
4594 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4595 					tp1->whoTo->saw_newack = 1;
4596 
4597 					if (tp1->snd_count < 2) {
4598 						/*
4599 						 * True non-retransmited
4600 						 * chunk
4601 						 */
4602 						tp1->whoTo->net_ack2 +=
4603 						    tp1->send_size;
4604 
4605 						/* update RTO too? */
4606 						if (tp1->do_rtt) {
4607 							if (rto_ok) {
4608 								tp1->whoTo->RTO =
4609 								    sctp_calculate_rto(stcb,
4610 								    asoc, tp1->whoTo,
4611 								    &tp1->sent_rcv_time,
4612 								    sctp_align_safe_nocopy,
4613 								    SCTP_RTT_FROM_DATA);
4614 								rto_ok = 0;
4615 							}
4616 							if (tp1->whoTo->rto_needed == 0) {
4617 								tp1->whoTo->rto_needed = 1;
4618 							}
4619 							tp1->do_rtt = 0;
4620 						}
4621 					}
4622 					/*
4623 					 * CMT: CUCv2 algorithm. From the
4624 					 * cumack'd TSNs, for each TSN being
4625 					 * acked for the first time, set the
4626 					 * following variables for the
4627 					 * corresp destination.
4628 					 * new_pseudo_cumack will trigger a
4629 					 * cwnd update.
4630 					 * find_(rtx_)pseudo_cumack will
4631 					 * trigger search for the next
4632 					 * expected (rtx-)pseudo-cumack.
4633 					 */
4634 					tp1->whoTo->new_pseudo_cumack = 1;
4635 					tp1->whoTo->find_pseudo_cumack = 1;
4636 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4637 
4638 
4639 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4640 						sctp_log_sack(asoc->last_acked_seq,
4641 						    cum_ack,
4642 						    tp1->rec.data.TSN_seq,
4643 						    0,
4644 						    0,
4645 						    SCTP_LOG_TSN_ACKED);
4646 					}
4647 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4648 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4649 					}
4650 				}
4651 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4652 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4653 #ifdef SCTP_AUDITING_ENABLED
4654 					sctp_audit_log(0xB3,
4655 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4656 #endif
4657 				}
4658 				if (tp1->rec.data.chunk_was_revoked) {
4659 					/* deflate the cwnd */
4660 					tp1->whoTo->cwnd -= tp1->book_size;
4661 					tp1->rec.data.chunk_was_revoked = 0;
4662 				}
4663 				tp1->sent = SCTP_DATAGRAM_ACKED;
4664 			}
4665 		} else {
4666 			break;
4667 		}
4668 	}
4669 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4670 	/* always set this up to cum-ack */
4671 	asoc->this_sack_highest_gap = last_tsn;
4672 
4673 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4674 
4675 		/*
4676 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4677 		 * to be greater than the cumack. Also reset saw_newack to 0
4678 		 * for all dests.
4679 		 */
4680 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4681 			net->saw_newack = 0;
4682 			net->this_sack_highest_newack = last_tsn;
4683 		}
4684 
4685 		/*
4686 		 * thisSackHighestGap will increase while handling NEW
4687 		 * segments this_sack_highest_newack will increase while
4688 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4689 		 * used for CMT DAC algo. saw_newack will also change.
4690 		 */
4691 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4692 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4693 		    num_seg, num_nr_seg, &ecn_seg_sums,
4694 		    &rto_ok)) {
4695 			wake_him++;
4696 		}
4697 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4698 			/*
4699 			 * validate the biggest_tsn_acked in the gap acks if
4700 			 * strict adherence is wanted.
4701 			 */
4702 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4703 				/*
4704 				 * peer is either confused or we are under
4705 				 * attack. We must abort.
4706 				 */
4707 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4708 				    biggest_tsn_acked,
4709 				    send_s);
4710 
4711 				goto hopeless_peer;
4712 			}
4713 		}
4714 	}
4715 	/*******************************************/
4716 	/* cancel ALL T3-send timer if accum moved */
4717 	/*******************************************/
4718 	if (asoc->sctp_cmt_on_off > 0) {
4719 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4720 			if (net->new_pseudo_cumack)
4721 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4722 				    stcb, net,
4723 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4724 
4725 		}
4726 	} else {
4727 		if (accum_moved) {
4728 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4729 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4730 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4731 			}
4732 		}
4733 	}
4734 	/********************************************/
4735 	/* drop the acked chunks from the sentqueue */
4736 	/********************************************/
4737 	asoc->last_acked_seq = cum_ack;
4738 
4739 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4740 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4741 			break;
4742 		}
4743 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4744 			/* no more sent on list */
4745 			printf("Warning, tp1->sent == %d and its now acked?\n",
4746 			    tp1->sent);
4747 		}
4748 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4749 		if (tp1->pr_sctp_on) {
4750 			if (asoc->pr_sctp_cnt != 0)
4751 				asoc->pr_sctp_cnt--;
4752 		}
4753 		asoc->sent_queue_cnt--;
4754 		if (tp1->data) {
4755 			/* sa_ignore NO_NULL_CHK */
4756 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4757 			sctp_m_freem(tp1->data);
4758 			tp1->data = NULL;
4759 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4760 				asoc->sent_queue_cnt_removeable--;
4761 			}
4762 		}
4763 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4764 			sctp_log_sack(asoc->last_acked_seq,
4765 			    cum_ack,
4766 			    tp1->rec.data.TSN_seq,
4767 			    0,
4768 			    0,
4769 			    SCTP_LOG_FREE_SENT);
4770 		}
4771 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4772 		wake_him++;
4773 	}
4774 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4775 #ifdef INVARIANTS
4776 		panic("Warning flight size is postive and should be 0");
4777 #else
4778 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4779 		    asoc->total_flight);
4780 #endif
4781 		asoc->total_flight = 0;
4782 	}
4783 	/* sa_ignore NO_NULL_CHK */
4784 	if ((wake_him) && (stcb->sctp_socket)) {
4785 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4786 		struct socket *so;
4787 
4788 #endif
4789 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4790 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4791 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4792 		}
4793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4794 		so = SCTP_INP_SO(stcb->sctp_ep);
4795 		atomic_add_int(&stcb->asoc.refcnt, 1);
4796 		SCTP_TCB_UNLOCK(stcb);
4797 		SCTP_SOCKET_LOCK(so, 1);
4798 		SCTP_TCB_LOCK(stcb);
4799 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4800 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4801 			/* assoc was freed while we were unlocked */
4802 			SCTP_SOCKET_UNLOCK(so, 1);
4803 			return;
4804 		}
4805 #endif
4806 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4808 		SCTP_SOCKET_UNLOCK(so, 1);
4809 #endif
4810 	} else {
4811 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4812 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4813 		}
4814 	}
4815 
4816 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4817 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4818 			/* Setup so we will exit RFC2582 fast recovery */
4819 			will_exit_fast_recovery = 1;
4820 		}
4821 	}
4822 	/*
4823 	 * Check for revoked fragments:
4824 	 *
4825 	 * if Previous sack - Had no frags then we can't have any revoked if
4826 	 * Previous sack - Had frag's then - If we now have frags aka
4827 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4828 	 * some of them. else - The peer revoked all ACKED fragments, since
4829 	 * we had some before and now we have NONE.
4830 	 */
4831 
4832 	if (num_seg) {
4833 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4834 		asoc->saw_sack_with_frags = 1;
4835 	} else if (asoc->saw_sack_with_frags) {
4836 		int cnt_revoked = 0;
4837 
4838 		/* Peer revoked all dg's marked or acked */
4839 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4840 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4841 				tp1->sent = SCTP_DATAGRAM_SENT;
4842 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4843 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4844 					    tp1->whoTo->flight_size,
4845 					    tp1->book_size,
4846 					    (uintptr_t) tp1->whoTo,
4847 					    tp1->rec.data.TSN_seq);
4848 				}
4849 				sctp_flight_size_increase(tp1);
4850 				sctp_total_flight_increase(stcb, tp1);
4851 				tp1->rec.data.chunk_was_revoked = 1;
4852 				/*
4853 				 * To ensure that this increase in
4854 				 * flightsize, which is artificial, does not
4855 				 * throttle the sender, we also increase the
4856 				 * cwnd artificially.
4857 				 */
4858 				tp1->whoTo->cwnd += tp1->book_size;
4859 				cnt_revoked++;
4860 			}
4861 		}
4862 		if (cnt_revoked) {
4863 			reneged_all = 1;
4864 		}
4865 		asoc->saw_sack_with_frags = 0;
4866 	}
4867 	if (num_nr_seg > 0)
4868 		asoc->saw_sack_with_nr_frags = 1;
4869 	else
4870 		asoc->saw_sack_with_nr_frags = 0;
4871 
4872 	/* JRS - Use the congestion control given in the CC module */
4873 	if (ecne_seen == 0) {
4874 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4875 			if (net->net_ack2 > 0) {
4876 				/*
4877 				 * Karn's rule applies to clearing error
4878 				 * count, this is optional.
4879 				 */
4880 				net->error_count = 0;
4881 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4882 					/* addr came good */
4883 					net->dest_state |= SCTP_ADDR_REACHABLE;
4884 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4885 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4886 				}
4887 				if (net == stcb->asoc.primary_destination) {
4888 					if (stcb->asoc.alternate) {
4889 						/*
4890 						 * release the alternate,
4891 						 * primary is good
4892 						 */
4893 						sctp_free_remote_addr(stcb->asoc.alternate);
4894 						stcb->asoc.alternate = NULL;
4895 					}
4896 				}
4897 				if (net->dest_state & SCTP_ADDR_PF) {
4898 					net->dest_state &= ~SCTP_ADDR_PF;
4899 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4900 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4901 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4902 					/* Done with this net */
4903 					net->net_ack = 0;
4904 				}
4905 				/* restore any doubled timers */
4906 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4907 				if (net->RTO < stcb->asoc.minrto) {
4908 					net->RTO = stcb->asoc.minrto;
4909 				}
4910 				if (net->RTO > stcb->asoc.maxrto) {
4911 					net->RTO = stcb->asoc.maxrto;
4912 				}
4913 			}
4914 		}
4915 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4916 	}
4917 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4918 		/* nothing left in-flight */
4919 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4920 			/* stop all timers */
4921 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4922 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4923 			net->flight_size = 0;
4924 			net->partial_bytes_acked = 0;
4925 		}
4926 		asoc->total_flight = 0;
4927 		asoc->total_flight_count = 0;
4928 	}
4929 	/**********************************/
4930 	/* Now what about shutdown issues */
4931 	/**********************************/
4932 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4933 		/* nothing left on sendqueue.. consider done */
4934 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4935 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4936 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4937 		}
4938 		asoc->peers_rwnd = a_rwnd;
4939 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4940 			/* SWS sender side engages */
4941 			asoc->peers_rwnd = 0;
4942 		}
4943 		/* clean up */
4944 		if ((asoc->stream_queue_cnt == 1) &&
4945 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4946 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4947 		    (asoc->locked_on_sending)
4948 		    ) {
4949 			struct sctp_stream_queue_pending *sp;
4950 
4951 			/*
4952 			 * I may be in a state where we got all across.. but
4953 			 * cannot write more due to a shutdown... we abort
4954 			 * since the user did not indicate EOR in this case.
4955 			 */
4956 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4957 			    sctp_streamhead);
4958 			if ((sp) && (sp->length == 0)) {
4959 				asoc->locked_on_sending = NULL;
4960 				if (sp->msg_is_complete) {
4961 					asoc->stream_queue_cnt--;
4962 				} else {
4963 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4964 					asoc->stream_queue_cnt--;
4965 				}
4966 			}
4967 		}
4968 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4969 		    (asoc->stream_queue_cnt == 0)) {
4970 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4971 				/* Need to abort here */
4972 				struct mbuf *oper;
4973 
4974 		abort_out_now:
4975 				*abort_now = 1;
4976 				/* XXX */
4977 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4978 				    0, M_DONTWAIT, 1, MT_DATA);
4979 				if (oper) {
4980 					struct sctp_paramhdr *ph;
4981 					uint32_t *ippp;
4982 
4983 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4984 					    sizeof(uint32_t);
4985 					ph = mtod(oper, struct sctp_paramhdr *);
4986 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4987 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4988 					ippp = (uint32_t *) (ph + 1);
4989 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4990 				}
4991 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4992 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4993 				return;
4994 			} else {
4995 				struct sctp_nets *netp;
4996 
4997 				if (asoc->alternate) {
4998 					netp = asoc->alternate;
4999 				} else {
5000 					netp = asoc->primary_destination;
5001 				}
5002 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5003 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5004 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5005 				}
5006 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5007 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5008 				sctp_stop_timers_for_shutdown(stcb);
5009 				sctp_send_shutdown(stcb, netp);
5010 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5011 				    stcb->sctp_ep, stcb, netp);
5012 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5013 				    stcb->sctp_ep, stcb, netp);
5014 			}
5015 			return;
5016 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5017 		    (asoc->stream_queue_cnt == 0)) {
5018 			struct sctp_nets *netp;
5019 
5020 			if (asoc->alternate) {
5021 				netp = asoc->alternate;
5022 			} else {
5023 				netp = asoc->primary_destination;
5024 			}
5025 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5026 				goto abort_out_now;
5027 			}
5028 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5029 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5030 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5031 			sctp_send_shutdown_ack(stcb, netp);
5032 			sctp_stop_timers_for_shutdown(stcb);
5033 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5034 			    stcb->sctp_ep, stcb, netp);
5035 			return;
5036 		}
5037 	}
5038 	/*
5039 	 * Now here we are going to recycle net_ack for a different use...
5040 	 * HEADS UP.
5041 	 */
5042 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5043 		net->net_ack = 0;
5044 	}
5045 
5046 	/*
5047 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5048 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5049 	 * automatically ensure that.
5050 	 */
5051 	if ((asoc->sctp_cmt_on_off > 0) &&
5052 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5053 	    (cmt_dac_flag == 0)) {
5054 		this_sack_lowest_newack = cum_ack;
5055 	}
5056 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5057 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5058 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5059 	}
5060 	/* JRS - Use the congestion control given in the CC module */
5061 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5062 
5063 	/* Now are we exiting loss recovery ? */
5064 	if (will_exit_fast_recovery) {
5065 		/* Ok, we must exit fast recovery */
5066 		asoc->fast_retran_loss_recovery = 0;
5067 	}
5068 	if ((asoc->sat_t3_loss_recovery) &&
5069 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5070 		/* end satellite t3 loss recovery */
5071 		asoc->sat_t3_loss_recovery = 0;
5072 	}
5073 	/*
5074 	 * CMT Fast recovery
5075 	 */
5076 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5077 		if (net->will_exit_fast_recovery) {
5078 			/* Ok, we must exit fast recovery */
5079 			net->fast_retran_loss_recovery = 0;
5080 		}
5081 	}
5082 
5083 	/* Adjust and set the new rwnd value */
5084 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5085 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5086 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5087 	}
5088 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5089 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5090 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5091 		/* SWS sender side engages */
5092 		asoc->peers_rwnd = 0;
5093 	}
5094 	if (asoc->peers_rwnd > old_rwnd) {
5095 		win_probe_recovery = 1;
5096 	}
5097 	/*
5098 	 * Now we must setup so we have a timer up for anyone with
5099 	 * outstanding data.
5100 	 */
5101 	done_once = 0;
5102 again:
5103 	j = 0;
5104 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5105 		if (win_probe_recovery && (net->window_probe)) {
5106 			win_probe_recovered = 1;
5107 			/*-
5108 			 * Find first chunk that was used with
5109 			 * window probe and clear the event. Put
5110 			 * it back into the send queue as if has
5111 			 * not been sent.
5112 			 */
5113 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5114 				if (tp1->window_probe) {
5115 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5116 					break;
5117 				}
5118 			}
5119 		}
5120 		if (net->flight_size) {
5121 			j++;
5122 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5123 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5124 				    stcb->sctp_ep, stcb, net);
5125 			}
5126 			if (net->window_probe) {
5127 				net->window_probe = 0;
5128 			}
5129 		} else {
5130 			if (net->window_probe) {
5131 				/*
5132 				 * In window probes we must assure a timer
5133 				 * is still running there
5134 				 */
5135 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5136 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5137 					    stcb->sctp_ep, stcb, net);
5138 
5139 				}
5140 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5141 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5142 				    stcb, net,
5143 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5144 			}
5145 		}
5146 	}
5147 	if ((j == 0) &&
5148 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5149 	    (asoc->sent_queue_retran_cnt == 0) &&
5150 	    (win_probe_recovered == 0) &&
5151 	    (done_once == 0)) {
5152 		/*
5153 		 * huh, this should not happen unless all packets are
5154 		 * PR-SCTP and marked to skip of course.
5155 		 */
5156 		if (sctp_fs_audit(asoc)) {
5157 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5158 				net->flight_size = 0;
5159 			}
5160 			asoc->total_flight = 0;
5161 			asoc->total_flight_count = 0;
5162 			asoc->sent_queue_retran_cnt = 0;
5163 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5164 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5165 					sctp_flight_size_increase(tp1);
5166 					sctp_total_flight_increase(stcb, tp1);
5167 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5168 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5169 				}
5170 			}
5171 		}
5172 		done_once = 1;
5173 		goto again;
5174 	}
5175 	/*********************************************/
5176 	/* Here we perform PR-SCTP procedures        */
5177 	/* (section 4.2)                             */
5178 	/*********************************************/
5179 	/* C1. update advancedPeerAckPoint */
5180 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5181 		asoc->advanced_peer_ack_point = cum_ack;
5182 	}
5183 	/* C2. try to further move advancedPeerAckPoint ahead */
5184 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5185 		struct sctp_tmit_chunk *lchk;
5186 		uint32_t old_adv_peer_ack_point;
5187 
5188 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5189 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5190 		/* C3. See if we need to send a Fwd-TSN */
5191 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5192 			/*
5193 			 * ISSUE with ECN, see FWD-TSN processing.
5194 			 */
5195 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5196 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5197 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5198 				    old_adv_peer_ack_point);
5199 			}
5200 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5201 				send_forward_tsn(stcb, asoc);
5202 			} else if (lchk) {
5203 				/* try to FR fwd-tsn's that get lost too */
5204 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5205 					send_forward_tsn(stcb, asoc);
5206 				}
5207 			}
5208 		}
5209 		if (lchk) {
5210 			/* Assure a timer is up */
5211 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5212 			    stcb->sctp_ep, stcb, lchk->whoTo);
5213 		}
5214 	}
5215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5216 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5217 		    a_rwnd,
5218 		    stcb->asoc.peers_rwnd,
5219 		    stcb->asoc.total_flight,
5220 		    stcb->asoc.total_output_queue_size);
5221 	}
5222 }
5223 
5224 void
5225 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5226     struct sctp_nets *netp, int *abort_flag)
5227 {
5228 	/* Copy cum-ack */
5229 	uint32_t cum_ack, a_rwnd;
5230 
5231 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5232 	/* Arrange so a_rwnd does NOT change */
5233 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5234 
5235 	/* Now call the express sack handling */
5236 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5237 }
5238 
5239 static void
5240 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5241     struct sctp_stream_in *strmin)
5242 {
5243 	struct sctp_queued_to_read *ctl, *nctl;
5244 	struct sctp_association *asoc;
5245 	uint16_t tt;
5246 
5247 	asoc = &stcb->asoc;
5248 	tt = strmin->last_sequence_delivered;
5249 	/*
5250 	 * First deliver anything prior to and including the stream no that
5251 	 * came in
5252 	 */
5253 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5254 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5255 			/* this is deliverable now */
5256 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5257 			/* subtract pending on streams */
5258 			asoc->size_on_all_streams -= ctl->length;
5259 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5260 			/* deliver it to at least the delivery-q */
5261 			if (stcb->sctp_socket) {
5262 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5263 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5264 				    ctl,
5265 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5266 			}
5267 		} else {
5268 			/* no more delivery now. */
5269 			break;
5270 		}
5271 	}
5272 	/*
5273 	 * now we must deliver things in queue the normal way  if any are
5274 	 * now ready.
5275 	 */
5276 	tt = strmin->last_sequence_delivered + 1;
5277 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5278 		if (tt == ctl->sinfo_ssn) {
5279 			/* this is deliverable now */
5280 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5281 			/* subtract pending on streams */
5282 			asoc->size_on_all_streams -= ctl->length;
5283 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5284 			/* deliver it to at least the delivery-q */
5285 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5286 			if (stcb->sctp_socket) {
5287 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5288 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5289 				    ctl,
5290 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5291 
5292 			}
5293 			tt = strmin->last_sequence_delivered + 1;
5294 		} else {
5295 			break;
5296 		}
5297 	}
5298 }
5299 
5300 static void
5301 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5302     struct sctp_association *asoc,
5303     uint16_t stream, uint16_t seq)
5304 {
5305 	struct sctp_tmit_chunk *chk, *nchk;
5306 
5307 	/* For each one on here see if we need to toss it */
5308 	/*
5309 	 * For now large messages held on the reasmqueue that are complete
5310 	 * will be tossed too. We could in theory do more work to spin
5311 	 * through and stop after dumping one msg aka seeing the start of a
5312 	 * new msg at the head, and call the delivery function... to see if
5313 	 * it can be delivered... But for now we just dump everything on the
5314 	 * queue.
5315 	 */
5316 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5317 		/*
5318 		 * Do not toss it if on a different stream or marked for
5319 		 * unordered delivery in which case the stream sequence
5320 		 * number has no meaning.
5321 		 */
5322 		if ((chk->rec.data.stream_number != stream) ||
5323 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5324 			continue;
5325 		}
5326 		if (chk->rec.data.stream_seq == seq) {
5327 			/* It needs to be tossed */
5328 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5329 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5330 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5331 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5332 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5333 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5334 			}
5335 			asoc->size_on_reasm_queue -= chk->send_size;
5336 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5337 
5338 			/* Clear up any stream problem */
5339 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5340 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5341 				/*
5342 				 * We must dump forward this streams
5343 				 * sequence number if the chunk is not
5344 				 * unordered that is being skipped. There is
5345 				 * a chance that if the peer does not
5346 				 * include the last fragment in its FWD-TSN
5347 				 * we WILL have a problem here since you
5348 				 * would have a partial chunk in queue that
5349 				 * may not be deliverable. Also if a Partial
5350 				 * delivery API as started the user may get
5351 				 * a partial chunk. The next read returning
5352 				 * a new chunk... really ugly but I see no
5353 				 * way around it! Maybe a notify??
5354 				 */
5355 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5356 			}
5357 			if (chk->data) {
5358 				sctp_m_freem(chk->data);
5359 				chk->data = NULL;
5360 			}
5361 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5362 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5363 			/*
5364 			 * If the stream_seq is > than the purging one, we
5365 			 * are done
5366 			 */
5367 			break;
5368 		}
5369 	}
5370 }
5371 
5372 
5373 void
5374 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5375     struct sctp_forward_tsn_chunk *fwd,
5376     int *abort_flag, struct mbuf *m, int offset)
5377 {
5378 	/* The pr-sctp fwd tsn */
5379 	/*
5380 	 * here we will perform all the data receiver side steps for
5381 	 * processing FwdTSN, as required in by pr-sctp draft:
5382 	 *
5383 	 * Assume we get FwdTSN(x):
5384 	 *
5385 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5386 	 * others we have 3) examine and update re-ordering queue on
5387 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5388 	 * report where we are.
5389 	 */
5390 	struct sctp_association *asoc;
5391 	uint32_t new_cum_tsn, gap;
5392 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5393 	uint32_t str_seq;
5394 	struct sctp_stream_in *strm;
5395 	struct sctp_tmit_chunk *chk, *nchk;
5396 	struct sctp_queued_to_read *ctl, *sv;
5397 
5398 	cumack_set_flag = 0;
5399 	asoc = &stcb->asoc;
5400 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5401 		SCTPDBG(SCTP_DEBUG_INDATA1,
5402 		    "Bad size too small/big fwd-tsn\n");
5403 		return;
5404 	}
5405 	m_size = (stcb->asoc.mapping_array_size << 3);
5406 	/*************************************************************/
5407 	/* 1. Here we update local cumTSN and shift the bitmap array */
5408 	/*************************************************************/
5409 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5410 
5411 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5412 		/* Already got there ... */
5413 		return;
5414 	}
5415 	/*
5416 	 * now we know the new TSN is more advanced, let's find the actual
5417 	 * gap
5418 	 */
5419 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5420 	asoc->cumulative_tsn = new_cum_tsn;
5421 	if (gap >= m_size) {
5422 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5423 			struct mbuf *oper;
5424 
5425 			/*
5426 			 * out of range (of single byte chunks in the rwnd I
5427 			 * give out). This must be an attacker.
5428 			 */
5429 			*abort_flag = 1;
5430 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5431 			    0, M_DONTWAIT, 1, MT_DATA);
5432 			if (oper) {
5433 				struct sctp_paramhdr *ph;
5434 				uint32_t *ippp;
5435 
5436 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5437 				    (sizeof(uint32_t) * 3);
5438 				ph = mtod(oper, struct sctp_paramhdr *);
5439 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5440 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5441 				ippp = (uint32_t *) (ph + 1);
5442 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5443 				ippp++;
5444 				*ippp = asoc->highest_tsn_inside_map;
5445 				ippp++;
5446 				*ippp = new_cum_tsn;
5447 			}
5448 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5449 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5450 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5451 			return;
5452 		}
5453 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5454 
5455 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5456 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5457 		asoc->highest_tsn_inside_map = new_cum_tsn;
5458 
5459 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5460 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5461 
5462 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5463 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5464 		}
5465 	} else {
5466 		SCTP_TCB_LOCK_ASSERT(stcb);
5467 		for (i = 0; i <= gap; i++) {
5468 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5469 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5470 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5471 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5472 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5473 				}
5474 			}
5475 		}
5476 	}
5477 	/*************************************************************/
5478 	/* 2. Clear up re-assembly queue                             */
5479 	/*************************************************************/
5480 	/*
5481 	 * First service it if pd-api is up, just in case we can progress it
5482 	 * forward
5483 	 */
5484 	if (asoc->fragmented_delivery_inprogress) {
5485 		sctp_service_reassembly(stcb, asoc);
5486 	}
5487 	/* For each one on here see if we need to toss it */
5488 	/*
5489 	 * For now large messages held on the reasmqueue that are complete
5490 	 * will be tossed too. We could in theory do more work to spin
5491 	 * through and stop after dumping one msg aka seeing the start of a
5492 	 * new msg at the head, and call the delivery function... to see if
5493 	 * it can be delivered... But for now we just dump everything on the
5494 	 * queue.
5495 	 */
5496 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5497 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5498 			/* It needs to be tossed */
5499 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5500 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5501 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5502 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5503 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5504 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5505 			}
5506 			asoc->size_on_reasm_queue -= chk->send_size;
5507 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5508 
5509 			/* Clear up any stream problem */
5510 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5511 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5512 				/*
5513 				 * We must dump forward this streams
5514 				 * sequence number if the chunk is not
5515 				 * unordered that is being skipped. There is
5516 				 * a chance that if the peer does not
5517 				 * include the last fragment in its FWD-TSN
5518 				 * we WILL have a problem here since you
5519 				 * would have a partial chunk in queue that
5520 				 * may not be deliverable. Also if a Partial
5521 				 * delivery API as started the user may get
5522 				 * a partial chunk. The next read returning
5523 				 * a new chunk... really ugly but I see no
5524 				 * way around it! Maybe a notify??
5525 				 */
5526 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5527 			}
5528 			if (chk->data) {
5529 				sctp_m_freem(chk->data);
5530 				chk->data = NULL;
5531 			}
5532 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5533 		} else {
5534 			/*
5535 			 * Ok we have gone beyond the end of the fwd-tsn's
5536 			 * mark.
5537 			 */
5538 			break;
5539 		}
5540 	}
5541 	/*******************************************************/
5542 	/* 3. Update the PR-stream re-ordering queues and fix  */
5543 	/* delivery issues as needed.                       */
5544 	/*******************************************************/
5545 	fwd_sz -= sizeof(*fwd);
5546 	if (m && fwd_sz) {
5547 		/* New method. */
5548 		unsigned int num_str;
5549 		struct sctp_strseq *stseq, strseqbuf;
5550 
5551 		offset += sizeof(*fwd);
5552 
5553 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5554 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5555 		for (i = 0; i < num_str; i++) {
5556 			uint16_t st;
5557 
5558 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5559 			    sizeof(struct sctp_strseq),
5560 			    (uint8_t *) & strseqbuf);
5561 			offset += sizeof(struct sctp_strseq);
5562 			if (stseq == NULL) {
5563 				break;
5564 			}
5565 			/* Convert */
5566 			st = ntohs(stseq->stream);
5567 			stseq->stream = st;
5568 			st = ntohs(stseq->sequence);
5569 			stseq->sequence = st;
5570 
5571 			/* now process */
5572 
5573 			/*
5574 			 * Ok we now look for the stream/seq on the read
5575 			 * queue where its not all delivered. If we find it
5576 			 * we transmute the read entry into a PDI_ABORTED.
5577 			 */
5578 			if (stseq->stream >= asoc->streamincnt) {
5579 				/* screwed up streams, stop!  */
5580 				break;
5581 			}
5582 			if ((asoc->str_of_pdapi == stseq->stream) &&
5583 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5584 				/*
5585 				 * If this is the one we were partially
5586 				 * delivering now then we no longer are.
5587 				 * Note this will change with the reassembly
5588 				 * re-write.
5589 				 */
5590 				asoc->fragmented_delivery_inprogress = 0;
5591 			}
5592 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5593 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5594 				if ((ctl->sinfo_stream == stseq->stream) &&
5595 				    (ctl->sinfo_ssn == stseq->sequence)) {
5596 					str_seq = (stseq->stream << 16) | stseq->sequence;
5597 					ctl->end_added = 1;
5598 					ctl->pdapi_aborted = 1;
5599 					sv = stcb->asoc.control_pdapi;
5600 					stcb->asoc.control_pdapi = ctl;
5601 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5602 					    stcb,
5603 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5604 					    (void *)&str_seq,
5605 					    SCTP_SO_NOT_LOCKED);
5606 					stcb->asoc.control_pdapi = sv;
5607 					break;
5608 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5609 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5610 					/* We are past our victim SSN */
5611 					break;
5612 				}
5613 			}
5614 			strm = &asoc->strmin[stseq->stream];
5615 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5616 				/* Update the sequence number */
5617 				strm->last_sequence_delivered = stseq->sequence;
5618 			}
5619 			/* now kick the stream the new way */
5620 			/* sa_ignore NO_NULL_CHK */
5621 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5622 		}
5623 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5624 	}
5625 	/*
5626 	 * Now slide thing forward.
5627 	 */
5628 	sctp_slide_mapping_arrays(stcb);
5629 
5630 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5631 		/* now lets kick out and check for more fragmented delivery */
5632 		/* sa_ignore NO_NULL_CHK */
5633 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5634 	}
5635 }
5636