xref: /freebsd/sys/netinet/sctp_indata.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 		cmh->cmsg_level = IPPROTO_SCTP;
255 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 		cmh->cmsg_type = SCTP_RCVINFO;
257 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 		rcvinfo->rcv_context = sinfo->sinfo_context;
265 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268 	}
269 	if (provide_nxt) {
270 		cmh->cmsg_level = IPPROTO_SCTP;
271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 		cmh->cmsg_type = SCTP_NXTINFO;
273 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 		nxtinfo->nxt_flags = 0;
276 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278 		}
279 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281 		}
282 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284 		}
285 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/*
322 		 * this tsn is behind the cum ack and thus we don't need to
323 		 * worry about it being moved from one to the other.
324 		 */
325 		return;
326 	}
327 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 		sctp_print_mapping_array(asoc);
331 #ifdef INVARIANTS
332 		panic("Things are really messed up now!!");
333 #endif
334 	}
335 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 		asoc->highest_tsn_inside_nr_map = tsn;
339 	}
340 	if (tsn == asoc->highest_tsn_inside_map) {
341 		/* We must back down to see what the new highest is */
342 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 				asoc->highest_tsn_inside_map = i;
346 				fnd = 1;
347 				break;
348 			}
349 		}
350 		if (!fnd) {
351 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352 		}
353 	}
354 }
355 
356 
357 /*
358  * We are delivering currently from the reassembly queue. We must continue to
359  * deliver until we either: 1) run out of space. 2) run out of sequential
360  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361  */
362 static void
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364 {
365 	struct sctp_tmit_chunk *chk, *nchk;
366 	uint16_t nxt_todel;
367 	uint16_t stream_no;
368 	int end = 0;
369 	int cntDel;
370 	struct sctp_queued_to_read *control, *ctl, *nctl;
371 
372 	if (stcb == NULL)
373 		return;
374 
375 	cntDel = stream_no = 0;
376 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 		/* socket above is long gone or going.. */
380 abandon:
381 		asoc->fragmented_delivery_inprogress = 0;
382 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 			asoc->size_on_reasm_queue -= chk->send_size;
385 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386 			/*
387 			 * Lose the data pointer, since its in the socket
388 			 * buffer
389 			 */
390 			if (chk->data) {
391 				sctp_m_freem(chk->data);
392 				chk->data = NULL;
393 			}
394 			/* Now free the address and data */
395 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 			/* sa_ignore FREED_MEMORY */
397 		}
398 		return;
399 	}
400 	SCTP_TCB_LOCK_ASSERT(stcb);
401 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 			/* Can't deliver more :< */
404 			return;
405 		}
406 		stream_no = chk->rec.data.stream_number;
407 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 		if (nxt_todel != chk->rec.data.stream_seq &&
409 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410 			/*
411 			 * Not the next sequence to deliver in its stream OR
412 			 * unordered
413 			 */
414 			return;
415 		}
416 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417 
418 			control = sctp_build_readq_entry_chk(stcb, chk);
419 			if (control == NULL) {
420 				/* out of memory? */
421 				return;
422 			}
423 			/* save it off for our future deliveries */
424 			stcb->asoc.control_pdapi = control;
425 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 				end = 1;
427 			else
428 				end = 0;
429 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 			sctp_add_to_readq(stcb->sctp_ep,
431 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433 			cntDel++;
434 		} else {
435 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436 				end = 1;
437 			else
438 				end = 0;
439 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 			    stcb->asoc.control_pdapi,
442 			    chk->data, end, chk->rec.data.TSN_seq,
443 			    &stcb->sctp_socket->so_rcv)) {
444 				/*
445 				 * something is very wrong, either
446 				 * control_pdapi is NULL, or the tail_mbuf
447 				 * is corrupt, or there is a EOM already on
448 				 * the mbuf chain.
449 				 */
450 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451 					goto abandon;
452 				} else {
453 #ifdef INVARIANTS
454 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 						panic("This should not happen control_pdapi NULL?");
456 					}
457 					/* if we did not panic, it was a EOM */
458 					panic("Bad chunking ??");
459 #else
460 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462 					}
463 					SCTP_PRINTF("Bad chunking ??\n");
464 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465 
466 #endif
467 					goto abandon;
468 				}
469 			}
470 			cntDel++;
471 		}
472 		/* pull it we did it */
473 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 			asoc->fragmented_delivery_inprogress = 0;
476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 				asoc->strmin[stream_no].last_sequence_delivered++;
478 			}
479 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481 			}
482 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483 			/*
484 			 * turn the flag back on since we just  delivered
485 			 * yet another one.
486 			 */
487 			asoc->fragmented_delivery_inprogress = 1;
488 		}
489 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493 
494 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->size_on_reasm_queue -= chk->send_size;
496 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 		/* free up the chk */
498 		chk->data = NULL;
499 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500 
501 		if (asoc->fragmented_delivery_inprogress == 0) {
502 			/*
503 			 * Now lets see if we can deliver the next one on
504 			 * the stream
505 			 */
506 			struct sctp_stream_in *strm;
507 
508 			strm = &asoc->strmin[stream_no];
509 			nxt_todel = strm->last_sequence_delivered + 1;
510 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 				/* Deliver more if we can. */
512 				if (nxt_todel == ctl->sinfo_ssn) {
513 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 					asoc->size_on_all_streams -= ctl->length;
515 					sctp_ucount_decr(asoc->cnt_on_all_streams);
516 					strm->last_sequence_delivered++;
517 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 					sctp_add_to_readq(stcb->sctp_ep, stcb,
519 					    ctl,
520 					    &stcb->sctp_socket->so_rcv, 1,
521 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 				} else {
523 					break;
524 				}
525 				nxt_todel = strm->last_sequence_delivered + 1;
526 			}
527 			break;
528 		}
529 	}
530 }
531 
532 /*
533  * Queue the chunk either right into the socket buffer if it is the next one
534  * to go OR put it in the correct place in the delivery queue.  If we do
535  * append to the so_buf, keep doing so until we are out of order. One big
536  * question still remains, what to do when the socket buffer is FULL??
537  */
538 static void
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540     struct sctp_queued_to_read *control, int *abort_flag)
541 {
542 	/*
543 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 	 * all the data in one stream this could happen quite rapidly. One
545 	 * could use the TSN to keep track of things, but this scheme breaks
546 	 * down in the other type of stream useage that could occur. Send a
547 	 * single msg to stream 0, send 4Billion messages to stream 1, now
548 	 * send a message to stream 0. You have a situation where the TSN
549 	 * has wrapped but not in the stream. Is this worth worrying about
550 	 * or should we just change our queue sort at the bottom to be by
551 	 * TSN.
552 	 *
553 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 	 * assignment this could happen... and I don't see how this would be
556 	 * a violation. So for now I am undecided an will leave the sort by
557 	 * SSN alone. Maybe a hybred approach is the answer
558 	 *
559 	 */
560 	struct sctp_stream_in *strm;
561 	struct sctp_queued_to_read *at;
562 	int queue_needed;
563 	uint16_t nxt_todel;
564 	struct mbuf *oper;
565 
566 	queue_needed = 1;
567 	asoc->size_on_all_streams += control->length;
568 	sctp_ucount_incr(asoc->cnt_on_all_streams);
569 	strm = &asoc->strmin[control->sinfo_stream];
570 	nxt_todel = strm->last_sequence_delivered + 1;
571 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
573 	}
574 	SCTPDBG(SCTP_DEBUG_INDATA1,
575 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 	    (uint32_t) control->sinfo_stream,
577 	    (uint32_t) strm->last_sequence_delivered,
578 	    (uint32_t) nxt_todel);
579 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 		/* The incoming sseq is behind where we last delivered? */
581 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
582 		    control->sinfo_ssn, strm->last_sequence_delivered);
583 protocol_error:
584 		/*
585 		 * throw it in the stream so it gets cleaned up in
586 		 * association destruction
587 		 */
588 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 		    0, M_NOWAIT, 1, MT_DATA);
591 		if (oper) {
592 			struct sctp_paramhdr *ph;
593 			uint32_t *ippp;
594 
595 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 			    (sizeof(uint32_t) * 3);
597 			ph = mtod(oper, struct sctp_paramhdr *);
598 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 			ph->param_length = htons(SCTP_BUF_LEN(oper));
600 			ippp = (uint32_t *) (ph + 1);
601 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
602 			ippp++;
603 			*ippp = control->sinfo_tsn;
604 			ippp++;
605 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
606 		}
607 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
630 			/* all delivered */
631 			nxt_todel = strm->last_sequence_delivered + 1;
632 			if (nxt_todel == control->sinfo_ssn) {
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				continue;
654 			}
655 			break;
656 		}
657 	}
658 	if (queue_needed) {
659 		/*
660 		 * Ok, we did not deliver this guy, find the correct place
661 		 * to put it on the queue.
662 		 */
663 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
664 			goto protocol_error;
665 		}
666 		if (TAILQ_EMPTY(&strm->inqueue)) {
667 			/* Empty queue */
668 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
670 			}
671 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
672 		} else {
673 			TAILQ_FOREACH(at, &strm->inqueue, next) {
674 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
675 					/*
676 					 * one in queue is bigger than the
677 					 * new one, insert before this one
678 					 */
679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 						sctp_log_strm_del(control, at,
681 						    SCTP_STR_LOG_FROM_INSERT_MD);
682 					}
683 					TAILQ_INSERT_BEFORE(at, control, next);
684 					break;
685 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
686 					/*
687 					 * Gak, He sent me a duplicate str
688 					 * seq number
689 					 */
690 					/*
691 					 * foo bar, I guess I will just free
692 					 * this new guy, should we abort
693 					 * too? FIX ME MAYBE? Or it COULD be
694 					 * that the SSN's have wrapped.
695 					 * Maybe I should compare to TSN
696 					 * somehow... sigh for now just blow
697 					 * away the chunk!
698 					 */
699 
700 					if (control->data)
701 						sctp_m_freem(control->data);
702 					control->data = NULL;
703 					asoc->size_on_all_streams -= control->length;
704 					sctp_ucount_decr(asoc->cnt_on_all_streams);
705 					if (control->whoFrom) {
706 						sctp_free_remote_addr(control->whoFrom);
707 						control->whoFrom = NULL;
708 					}
709 					sctp_free_a_readq(stcb, control);
710 					return;
711 				} else {
712 					if (TAILQ_NEXT(at, next) == NULL) {
713 						/*
714 						 * We are at the end, insert
715 						 * it after this one
716 						 */
717 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 							sctp_log_strm_del(control, at,
719 							    SCTP_STR_LOG_FROM_INSERT_TL);
720 						}
721 						TAILQ_INSERT_AFTER(&strm->inqueue,
722 						    at, control, next);
723 						break;
724 					}
725 				}
726 			}
727 		}
728 	}
729 }
730 
731 /*
732  * Returns two things: You get the total size of the deliverable parts of the
733  * first fragmented message on the reassembly queue. And you get a 1 back if
734  * all of the message is ready or a 0 back if the message is still incomplete
735  */
736 static int
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
738 {
739 	struct sctp_tmit_chunk *chk;
740 	uint32_t tsn;
741 
742 	*t_size = 0;
743 	chk = TAILQ_FIRST(&asoc->reasmqueue);
744 	if (chk == NULL) {
745 		/* nothing on the queue */
746 		return (0);
747 	}
748 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 		/* Not a first on the queue */
750 		return (0);
751 	}
752 	tsn = chk->rec.data.TSN_seq;
753 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 		if (tsn != chk->rec.data.TSN_seq) {
755 			return (0);
756 		}
757 		*t_size += chk->send_size;
758 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
759 			return (1);
760 		}
761 		tsn++;
762 	}
763 	return (0);
764 }
765 
766 static void
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
768 {
769 	struct sctp_tmit_chunk *chk;
770 	uint16_t nxt_todel;
771 	uint32_t tsize, pd_point;
772 
773 doit_again:
774 	chk = TAILQ_FIRST(&asoc->reasmqueue);
775 	if (chk == NULL) {
776 		/* Huh? */
777 		asoc->size_on_reasm_queue = 0;
778 		asoc->cnt_on_reasm_queue = 0;
779 		return;
780 	}
781 	if (asoc->fragmented_delivery_inprogress == 0) {
782 		nxt_todel =
783 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 		    (nxt_todel == chk->rec.data.stream_seq ||
786 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
787 			/*
788 			 * Yep the first one is here and its ok to deliver
789 			 * but should we?
790 			 */
791 			if (stcb->sctp_socket) {
792 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
793 				    stcb->sctp_ep->partial_delivery_point);
794 			} else {
795 				pd_point = stcb->sctp_ep->partial_delivery_point;
796 			}
797 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
798 				/*
799 				 * Yes, we setup to start reception, by
800 				 * backing down the TSN just in case we
801 				 * can't deliver. If we
802 				 */
803 				asoc->fragmented_delivery_inprogress = 1;
804 				asoc->tsn_last_delivered =
805 				    chk->rec.data.TSN_seq - 1;
806 				asoc->str_of_pdapi =
807 				    chk->rec.data.stream_number;
808 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
809 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
810 				asoc->fragment_flags = chk->rec.data.rcv_flags;
811 				sctp_service_reassembly(stcb, asoc);
812 			}
813 		}
814 	} else {
815 		/*
816 		 * Service re-assembly will deliver stream data queued at
817 		 * the end of fragmented delivery.. but it wont know to go
818 		 * back and call itself again... we do that here with the
819 		 * got doit_again
820 		 */
821 		sctp_service_reassembly(stcb, asoc);
822 		if (asoc->fragmented_delivery_inprogress == 0) {
823 			/*
824 			 * finished our Fragmented delivery, could be more
825 			 * waiting?
826 			 */
827 			goto doit_again;
828 		}
829 	}
830 }
831 
832 /*
833  * Dump onto the re-assembly queue, in its proper place. After dumping on the
834  * queue, see if anthing can be delivered. If so pull it off (or as much as
835  * we can. If we run out of space then we must dump what we can and set the
836  * appropriate flag to say we queued what we could.
837  */
838 static void
839 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
840     struct sctp_tmit_chunk *chk, int *abort_flag)
841 {
842 	struct mbuf *oper;
843 	uint32_t cum_ackp1, prev_tsn, post_tsn;
844 	struct sctp_tmit_chunk *at, *prev, *next;
845 
846 	prev = next = NULL;
847 	cum_ackp1 = asoc->tsn_last_delivered + 1;
848 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
849 		/* This is the first one on the queue */
850 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
851 		/*
852 		 * we do not check for delivery of anything when only one
853 		 * fragment is here
854 		 */
855 		asoc->size_on_reasm_queue = chk->send_size;
856 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
857 		if (chk->rec.data.TSN_seq == cum_ackp1) {
858 			if (asoc->fragmented_delivery_inprogress == 0 &&
859 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
860 			    SCTP_DATA_FIRST_FRAG) {
861 				/*
862 				 * An empty queue, no delivery inprogress,
863 				 * we hit the next one and it does NOT have
864 				 * a FIRST fragment mark.
865 				 */
866 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
867 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
868 				    0, M_NOWAIT, 1, MT_DATA);
869 
870 				if (oper) {
871 					struct sctp_paramhdr *ph;
872 					uint32_t *ippp;
873 
874 					SCTP_BUF_LEN(oper) =
875 					    sizeof(struct sctp_paramhdr) +
876 					    (sizeof(uint32_t) * 3);
877 					ph = mtod(oper, struct sctp_paramhdr *);
878 					ph->param_type =
879 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
880 					ph->param_length = htons(SCTP_BUF_LEN(oper));
881 					ippp = (uint32_t *) (ph + 1);
882 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
883 					ippp++;
884 					*ippp = chk->rec.data.TSN_seq;
885 					ippp++;
886 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
887 
888 				}
889 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
891 				*abort_flag = 1;
892 			} else if (asoc->fragmented_delivery_inprogress &&
893 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
894 				/*
895 				 * We are doing a partial delivery and the
896 				 * NEXT chunk MUST be either the LAST or
897 				 * MIDDLE fragment NOT a FIRST
898 				 */
899 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
901 				    0, M_NOWAIT, 1, MT_DATA);
902 				if (oper) {
903 					struct sctp_paramhdr *ph;
904 					uint32_t *ippp;
905 
906 					SCTP_BUF_LEN(oper) =
907 					    sizeof(struct sctp_paramhdr) +
908 					    (3 * sizeof(uint32_t));
909 					ph = mtod(oper, struct sctp_paramhdr *);
910 					ph->param_type =
911 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
912 					ph->param_length = htons(SCTP_BUF_LEN(oper));
913 					ippp = (uint32_t *) (ph + 1);
914 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
915 					ippp++;
916 					*ippp = chk->rec.data.TSN_seq;
917 					ippp++;
918 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
919 				}
920 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
921 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
922 				*abort_flag = 1;
923 			} else if (asoc->fragmented_delivery_inprogress) {
924 				/*
925 				 * Here we are ok with a MIDDLE or LAST
926 				 * piece
927 				 */
928 				if (chk->rec.data.stream_number !=
929 				    asoc->str_of_pdapi) {
930 					/* Got to be the right STR No */
931 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 					    chk->rec.data.stream_number,
933 					    asoc->str_of_pdapi);
934 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 					    0, M_NOWAIT, 1, MT_DATA);
936 					if (oper) {
937 						struct sctp_paramhdr *ph;
938 						uint32_t *ippp;
939 
940 						SCTP_BUF_LEN(oper) =
941 						    sizeof(struct sctp_paramhdr) +
942 						    (sizeof(uint32_t) * 3);
943 						ph = mtod(oper,
944 						    struct sctp_paramhdr *);
945 						ph->param_type =
946 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
947 						ph->param_length =
948 						    htons(SCTP_BUF_LEN(oper));
949 						ippp = (uint32_t *) (ph + 1);
950 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
951 						ippp++;
952 						*ippp = chk->rec.data.TSN_seq;
953 						ippp++;
954 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
955 					}
956 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
958 					*abort_flag = 1;
959 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
960 					    SCTP_DATA_UNORDERED &&
961 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
962 					/* Got to be the right STR Seq */
963 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
964 					    chk->rec.data.stream_seq,
965 					    asoc->ssn_of_pdapi);
966 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
967 					    0, M_NOWAIT, 1, MT_DATA);
968 					if (oper) {
969 						struct sctp_paramhdr *ph;
970 						uint32_t *ippp;
971 
972 						SCTP_BUF_LEN(oper) =
973 						    sizeof(struct sctp_paramhdr) +
974 						    (3 * sizeof(uint32_t));
975 						ph = mtod(oper,
976 						    struct sctp_paramhdr *);
977 						ph->param_type =
978 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
979 						ph->param_length =
980 						    htons(SCTP_BUF_LEN(oper));
981 						ippp = (uint32_t *) (ph + 1);
982 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
983 						ippp++;
984 						*ippp = chk->rec.data.TSN_seq;
985 						ippp++;
986 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
987 
988 					}
989 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
990 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
991 					*abort_flag = 1;
992 				}
993 			}
994 		}
995 		return;
996 	}
997 	/* Find its place */
998 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
999 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1000 			/*
1001 			 * one in queue is bigger than the new one, insert
1002 			 * before this one
1003 			 */
1004 			/* A check */
1005 			asoc->size_on_reasm_queue += chk->send_size;
1006 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1007 			next = at;
1008 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1009 			break;
1010 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1011 			/* Gak, He sent me a duplicate str seq number */
1012 			/*
1013 			 * foo bar, I guess I will just free this new guy,
1014 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1015 			 * that the SSN's have wrapped. Maybe I should
1016 			 * compare to TSN somehow... sigh for now just blow
1017 			 * away the chunk!
1018 			 */
1019 			if (chk->data) {
1020 				sctp_m_freem(chk->data);
1021 				chk->data = NULL;
1022 			}
1023 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1024 			return;
1025 		} else {
1026 			prev = at;
1027 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1028 				/*
1029 				 * We are at the end, insert it after this
1030 				 * one
1031 				 */
1032 				/* check it first */
1033 				asoc->size_on_reasm_queue += chk->send_size;
1034 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1035 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1036 				break;
1037 			}
1038 		}
1039 	}
1040 	/* Now the audits */
1041 	if (prev) {
1042 		prev_tsn = chk->rec.data.TSN_seq - 1;
1043 		if (prev_tsn == prev->rec.data.TSN_seq) {
1044 			/*
1045 			 * Ok the one I am dropping onto the end is the
1046 			 * NEXT. A bit of valdiation here.
1047 			 */
1048 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1049 			    SCTP_DATA_FIRST_FRAG ||
1050 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1051 			    SCTP_DATA_MIDDLE_FRAG) {
1052 				/*
1053 				 * Insert chk MUST be a MIDDLE or LAST
1054 				 * fragment
1055 				 */
1056 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 				    SCTP_DATA_FIRST_FRAG) {
1058 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1059 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1060 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1061 					    0, M_NOWAIT, 1, MT_DATA);
1062 					if (oper) {
1063 						struct sctp_paramhdr *ph;
1064 						uint32_t *ippp;
1065 
1066 						SCTP_BUF_LEN(oper) =
1067 						    sizeof(struct sctp_paramhdr) +
1068 						    (3 * sizeof(uint32_t));
1069 						ph = mtod(oper,
1070 						    struct sctp_paramhdr *);
1071 						ph->param_type =
1072 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1073 						ph->param_length =
1074 						    htons(SCTP_BUF_LEN(oper));
1075 						ippp = (uint32_t *) (ph + 1);
1076 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1077 						ippp++;
1078 						*ippp = chk->rec.data.TSN_seq;
1079 						ippp++;
1080 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1081 
1082 					}
1083 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1084 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1085 					*abort_flag = 1;
1086 					return;
1087 				}
1088 				if (chk->rec.data.stream_number !=
1089 				    prev->rec.data.stream_number) {
1090 					/*
1091 					 * Huh, need the correct STR here,
1092 					 * they must be the same.
1093 					 */
1094 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1095 					    chk->rec.data.stream_number,
1096 					    prev->rec.data.stream_number);
1097 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1098 					    0, M_NOWAIT, 1, MT_DATA);
1099 					if (oper) {
1100 						struct sctp_paramhdr *ph;
1101 						uint32_t *ippp;
1102 
1103 						SCTP_BUF_LEN(oper) =
1104 						    sizeof(struct sctp_paramhdr) +
1105 						    (3 * sizeof(uint32_t));
1106 						ph = mtod(oper,
1107 						    struct sctp_paramhdr *);
1108 						ph->param_type =
1109 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1110 						ph->param_length =
1111 						    htons(SCTP_BUF_LEN(oper));
1112 						ippp = (uint32_t *) (ph + 1);
1113 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1114 						ippp++;
1115 						*ippp = chk->rec.data.TSN_seq;
1116 						ippp++;
1117 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1118 					}
1119 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1120 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1121 					*abort_flag = 1;
1122 					return;
1123 				}
1124 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1125 				    chk->rec.data.stream_seq !=
1126 				    prev->rec.data.stream_seq) {
1127 					/*
1128 					 * Huh, need the correct STR here,
1129 					 * they must be the same.
1130 					 */
1131 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1132 					    chk->rec.data.stream_seq,
1133 					    prev->rec.data.stream_seq);
1134 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1135 					    0, M_NOWAIT, 1, MT_DATA);
1136 					if (oper) {
1137 						struct sctp_paramhdr *ph;
1138 						uint32_t *ippp;
1139 
1140 						SCTP_BUF_LEN(oper) =
1141 						    sizeof(struct sctp_paramhdr) +
1142 						    (3 * sizeof(uint32_t));
1143 						ph = mtod(oper,
1144 						    struct sctp_paramhdr *);
1145 						ph->param_type =
1146 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1147 						ph->param_length =
1148 						    htons(SCTP_BUF_LEN(oper));
1149 						ippp = (uint32_t *) (ph + 1);
1150 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1151 						ippp++;
1152 						*ippp = chk->rec.data.TSN_seq;
1153 						ippp++;
1154 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1155 					}
1156 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1157 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1158 					*abort_flag = 1;
1159 					return;
1160 				}
1161 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1162 			    SCTP_DATA_LAST_FRAG) {
1163 				/* Insert chk MUST be a FIRST */
1164 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1165 				    SCTP_DATA_FIRST_FRAG) {
1166 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1167 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1168 					    0, M_NOWAIT, 1, MT_DATA);
1169 					if (oper) {
1170 						struct sctp_paramhdr *ph;
1171 						uint32_t *ippp;
1172 
1173 						SCTP_BUF_LEN(oper) =
1174 						    sizeof(struct sctp_paramhdr) +
1175 						    (3 * sizeof(uint32_t));
1176 						ph = mtod(oper,
1177 						    struct sctp_paramhdr *);
1178 						ph->param_type =
1179 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1180 						ph->param_length =
1181 						    htons(SCTP_BUF_LEN(oper));
1182 						ippp = (uint32_t *) (ph + 1);
1183 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1184 						ippp++;
1185 						*ippp = chk->rec.data.TSN_seq;
1186 						ippp++;
1187 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1188 
1189 					}
1190 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1191 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1192 					*abort_flag = 1;
1193 					return;
1194 				}
1195 			}
1196 		}
1197 	}
1198 	if (next) {
1199 		post_tsn = chk->rec.data.TSN_seq + 1;
1200 		if (post_tsn == next->rec.data.TSN_seq) {
1201 			/*
1202 			 * Ok the one I am inserting ahead of is my NEXT
1203 			 * one. A bit of valdiation here.
1204 			 */
1205 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1206 				/* Insert chk MUST be a last fragment */
1207 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1208 				    != SCTP_DATA_LAST_FRAG) {
1209 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1210 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1211 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212 					    0, M_NOWAIT, 1, MT_DATA);
1213 					if (oper) {
1214 						struct sctp_paramhdr *ph;
1215 						uint32_t *ippp;
1216 
1217 						SCTP_BUF_LEN(oper) =
1218 						    sizeof(struct sctp_paramhdr) +
1219 						    (3 * sizeof(uint32_t));
1220 						ph = mtod(oper,
1221 						    struct sctp_paramhdr *);
1222 						ph->param_type =
1223 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1224 						ph->param_length =
1225 						    htons(SCTP_BUF_LEN(oper));
1226 						ippp = (uint32_t *) (ph + 1);
1227 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1228 						ippp++;
1229 						*ippp = chk->rec.data.TSN_seq;
1230 						ippp++;
1231 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1232 					}
1233 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1235 					*abort_flag = 1;
1236 					return;
1237 				}
1238 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1239 				    SCTP_DATA_MIDDLE_FRAG ||
1240 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 			    SCTP_DATA_LAST_FRAG) {
1242 				/*
1243 				 * Insert chk CAN be MIDDLE or FIRST NOT
1244 				 * LAST
1245 				 */
1246 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 				    SCTP_DATA_LAST_FRAG) {
1248 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1249 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1250 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1251 					    0, M_NOWAIT, 1, MT_DATA);
1252 					if (oper) {
1253 						struct sctp_paramhdr *ph;
1254 						uint32_t *ippp;
1255 
1256 						SCTP_BUF_LEN(oper) =
1257 						    sizeof(struct sctp_paramhdr) +
1258 						    (3 * sizeof(uint32_t));
1259 						ph = mtod(oper,
1260 						    struct sctp_paramhdr *);
1261 						ph->param_type =
1262 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1263 						ph->param_length =
1264 						    htons(SCTP_BUF_LEN(oper));
1265 						ippp = (uint32_t *) (ph + 1);
1266 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1267 						ippp++;
1268 						*ippp = chk->rec.data.TSN_seq;
1269 						ippp++;
1270 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1271 
1272 					}
1273 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1274 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1275 					*abort_flag = 1;
1276 					return;
1277 				}
1278 				if (chk->rec.data.stream_number !=
1279 				    next->rec.data.stream_number) {
1280 					/*
1281 					 * Huh, need the correct STR here,
1282 					 * they must be the same.
1283 					 */
1284 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1285 					    chk->rec.data.stream_number,
1286 					    next->rec.data.stream_number);
1287 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1288 					    0, M_NOWAIT, 1, MT_DATA);
1289 					if (oper) {
1290 						struct sctp_paramhdr *ph;
1291 						uint32_t *ippp;
1292 
1293 						SCTP_BUF_LEN(oper) =
1294 						    sizeof(struct sctp_paramhdr) +
1295 						    (3 * sizeof(uint32_t));
1296 						ph = mtod(oper,
1297 						    struct sctp_paramhdr *);
1298 						ph->param_type =
1299 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1300 						ph->param_length =
1301 						    htons(SCTP_BUF_LEN(oper));
1302 						ippp = (uint32_t *) (ph + 1);
1303 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1304 						ippp++;
1305 						*ippp = chk->rec.data.TSN_seq;
1306 						ippp++;
1307 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1308 
1309 					}
1310 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1311 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1312 					*abort_flag = 1;
1313 					return;
1314 				}
1315 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1316 				    chk->rec.data.stream_seq !=
1317 				    next->rec.data.stream_seq) {
1318 					/*
1319 					 * Huh, need the correct STR here,
1320 					 * they must be the same.
1321 					 */
1322 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1323 					    chk->rec.data.stream_seq,
1324 					    next->rec.data.stream_seq);
1325 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1326 					    0, M_NOWAIT, 1, MT_DATA);
1327 					if (oper) {
1328 						struct sctp_paramhdr *ph;
1329 						uint32_t *ippp;
1330 
1331 						SCTP_BUF_LEN(oper) =
1332 						    sizeof(struct sctp_paramhdr) +
1333 						    (3 * sizeof(uint32_t));
1334 						ph = mtod(oper,
1335 						    struct sctp_paramhdr *);
1336 						ph->param_type =
1337 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1338 						ph->param_length =
1339 						    htons(SCTP_BUF_LEN(oper));
1340 						ippp = (uint32_t *) (ph + 1);
1341 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1342 						ippp++;
1343 						*ippp = chk->rec.data.TSN_seq;
1344 						ippp++;
1345 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1346 					}
1347 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1348 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1349 					*abort_flag = 1;
1350 					return;
1351 				}
1352 			}
1353 		}
1354 	}
1355 	/* Do we need to do some delivery? check */
1356 	sctp_deliver_reasm_check(stcb, asoc);
1357 }
1358 
1359 /*
1360  * This is an unfortunate routine. It checks to make sure a evil guy is not
1361  * stuffing us full of bad packet fragments. A broken peer could also do this
1362  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1363  * :< more cycles.
1364  */
1365 static int
1366 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1367     uint32_t TSN_seq)
1368 {
1369 	struct sctp_tmit_chunk *at;
1370 	uint32_t tsn_est;
1371 
1372 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1373 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1374 			/* is it one bigger? */
1375 			tsn_est = at->rec.data.TSN_seq + 1;
1376 			if (tsn_est == TSN_seq) {
1377 				/* yep. It better be a last then */
1378 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1379 				    SCTP_DATA_LAST_FRAG) {
1380 					/*
1381 					 * Ok this guy belongs next to a guy
1382 					 * that is NOT last, it should be a
1383 					 * middle/last, not a complete
1384 					 * chunk.
1385 					 */
1386 					return (1);
1387 				} else {
1388 					/*
1389 					 * This guy is ok since its a LAST
1390 					 * and the new chunk is a fully
1391 					 * self- contained one.
1392 					 */
1393 					return (0);
1394 				}
1395 			}
1396 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1397 			/* Software error since I have a dup? */
1398 			return (1);
1399 		} else {
1400 			/*
1401 			 * Ok, 'at' is larger than new chunk but does it
1402 			 * need to be right before it.
1403 			 */
1404 			tsn_est = TSN_seq + 1;
1405 			if (tsn_est == at->rec.data.TSN_seq) {
1406 				/* Yep, It better be a first */
1407 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 				    SCTP_DATA_FIRST_FRAG) {
1409 					return (1);
1410 				} else {
1411 					return (0);
1412 				}
1413 			}
1414 		}
1415 	}
1416 	return (0);
1417 }
1418 
1419 static int
1420 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1421     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1422     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1423     int *break_flag, int last_chunk)
1424 {
1425 	/* Process a data chunk */
1426 	/* struct sctp_tmit_chunk *chk; */
1427 	struct sctp_tmit_chunk *chk;
1428 	uint32_t tsn, gap;
1429 	struct mbuf *dmbuf;
1430 	int the_len;
1431 	int need_reasm_check = 0;
1432 	uint16_t strmno, strmseq;
1433 	struct mbuf *oper;
1434 	struct sctp_queued_to_read *control;
1435 	int ordered;
1436 	uint32_t protocol_id;
1437 	uint8_t chunk_flags;
1438 	struct sctp_stream_reset_list *liste;
1439 
1440 	chk = NULL;
1441 	tsn = ntohl(ch->dp.tsn);
1442 	chunk_flags = ch->ch.chunk_flags;
1443 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1444 		asoc->send_sack = 1;
1445 	}
1446 	protocol_id = ch->dp.protocol_id;
1447 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1448 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1449 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1450 	}
1451 	if (stcb == NULL) {
1452 		return (0);
1453 	}
1454 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1455 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1456 		/* It is a duplicate */
1457 		SCTP_STAT_INCR(sctps_recvdupdata);
1458 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1459 			/* Record a dup for the next outbound sack */
1460 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1461 			asoc->numduptsns++;
1462 		}
1463 		asoc->send_sack = 1;
1464 		return (0);
1465 	}
1466 	/* Calculate the number of TSN's between the base and this TSN */
1467 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1468 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1469 		/* Can't hold the bit in the mapping at max array, toss it */
1470 		return (0);
1471 	}
1472 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1473 		SCTP_TCB_LOCK_ASSERT(stcb);
1474 		if (sctp_expand_mapping_array(asoc, gap)) {
1475 			/* Can't expand, drop it */
1476 			return (0);
1477 		}
1478 	}
1479 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1480 		*high_tsn = tsn;
1481 	}
1482 	/* See if we have received this one already */
1483 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1484 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1485 		SCTP_STAT_INCR(sctps_recvdupdata);
1486 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1487 			/* Record a dup for the next outbound sack */
1488 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1489 			asoc->numduptsns++;
1490 		}
1491 		asoc->send_sack = 1;
1492 		return (0);
1493 	}
1494 	/*
1495 	 * Check to see about the GONE flag, duplicates would cause a sack
1496 	 * to be sent up above
1497 	 */
1498 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1499 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1500 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1501 	    ) {
1502 		/*
1503 		 * wait a minute, this guy is gone, there is no longer a
1504 		 * receiver. Send peer an ABORT!
1505 		 */
1506 		struct mbuf *op_err;
1507 
1508 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1509 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1510 		*abort_flag = 1;
1511 		return (0);
1512 	}
1513 	/*
1514 	 * Now before going further we see if there is room. If NOT then we
1515 	 * MAY let one through only IF this TSN is the one we are waiting
1516 	 * for on a partial delivery API.
1517 	 */
1518 
1519 	/* now do the tests */
1520 	if (((asoc->cnt_on_all_streams +
1521 	    asoc->cnt_on_reasm_queue +
1522 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1523 	    (((int)asoc->my_rwnd) <= 0)) {
1524 		/*
1525 		 * When we have NO room in the rwnd we check to make sure
1526 		 * the reader is doing its job...
1527 		 */
1528 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1529 			/* some to read, wake-up */
1530 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1531 			struct socket *so;
1532 
1533 			so = SCTP_INP_SO(stcb->sctp_ep);
1534 			atomic_add_int(&stcb->asoc.refcnt, 1);
1535 			SCTP_TCB_UNLOCK(stcb);
1536 			SCTP_SOCKET_LOCK(so, 1);
1537 			SCTP_TCB_LOCK(stcb);
1538 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1539 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1540 				/* assoc was freed while we were unlocked */
1541 				SCTP_SOCKET_UNLOCK(so, 1);
1542 				return (0);
1543 			}
1544 #endif
1545 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1546 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1547 			SCTP_SOCKET_UNLOCK(so, 1);
1548 #endif
1549 		}
1550 		/* now is it in the mapping array of what we have accepted? */
1551 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1552 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1553 			/* Nope not in the valid range dump it */
1554 			sctp_set_rwnd(stcb, asoc);
1555 			if ((asoc->cnt_on_all_streams +
1556 			    asoc->cnt_on_reasm_queue +
1557 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1558 				SCTP_STAT_INCR(sctps_datadropchklmt);
1559 			} else {
1560 				SCTP_STAT_INCR(sctps_datadroprwnd);
1561 			}
1562 			*break_flag = 1;
1563 			return (0);
1564 		}
1565 	}
1566 	strmno = ntohs(ch->dp.stream_id);
1567 	if (strmno >= asoc->streamincnt) {
1568 		struct sctp_paramhdr *phdr;
1569 		struct mbuf *mb;
1570 
1571 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1572 		    0, M_NOWAIT, 1, MT_DATA);
1573 		if (mb != NULL) {
1574 			/* add some space up front so prepend will work well */
1575 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1576 			phdr = mtod(mb, struct sctp_paramhdr *);
1577 			/*
1578 			 * Error causes are just param's and this one has
1579 			 * two back to back phdr, one with the error type
1580 			 * and size, the other with the streamid and a rsvd
1581 			 */
1582 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1583 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1584 			phdr->param_length =
1585 			    htons(sizeof(struct sctp_paramhdr) * 2);
1586 			phdr++;
1587 			/* We insert the stream in the type field */
1588 			phdr->param_type = ch->dp.stream_id;
1589 			/* And set the length to 0 for the rsvd field */
1590 			phdr->param_length = 0;
1591 			sctp_queue_op_err(stcb, mb);
1592 		}
1593 		SCTP_STAT_INCR(sctps_badsid);
1594 		SCTP_TCB_LOCK_ASSERT(stcb);
1595 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1596 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1597 			asoc->highest_tsn_inside_nr_map = tsn;
1598 		}
1599 		if (tsn == (asoc->cumulative_tsn + 1)) {
1600 			/* Update cum-ack */
1601 			asoc->cumulative_tsn = tsn;
1602 		}
1603 		return (0);
1604 	}
1605 	/*
1606 	 * Before we continue lets validate that we are not being fooled by
1607 	 * an evil attacker. We can only have 4k chunks based on our TSN
1608 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1609 	 * way our stream sequence numbers could have wrapped. We of course
1610 	 * only validate the FIRST fragment so the bit must be set.
1611 	 */
1612 	strmseq = ntohs(ch->dp.stream_sequence);
1613 #ifdef SCTP_ASOCLOG_OF_TSNS
1614 	SCTP_TCB_LOCK_ASSERT(stcb);
1615 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1616 		asoc->tsn_in_at = 0;
1617 		asoc->tsn_in_wrapped = 1;
1618 	}
1619 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1620 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1621 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1622 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1623 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1624 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1625 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1626 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1627 	asoc->tsn_in_at++;
1628 #endif
1629 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1630 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1631 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1632 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1633 		/* The incoming sseq is behind where we last delivered? */
1634 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1635 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1636 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1637 		    0, M_NOWAIT, 1, MT_DATA);
1638 		if (oper) {
1639 			struct sctp_paramhdr *ph;
1640 			uint32_t *ippp;
1641 
1642 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1643 			    (3 * sizeof(uint32_t));
1644 			ph = mtod(oper, struct sctp_paramhdr *);
1645 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1646 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1647 			ippp = (uint32_t *) (ph + 1);
1648 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1649 			ippp++;
1650 			*ippp = tsn;
1651 			ippp++;
1652 			*ippp = ((strmno << 16) | strmseq);
1653 
1654 		}
1655 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1656 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1657 		*abort_flag = 1;
1658 		return (0);
1659 	}
1660 	/************************************
1661 	 * From here down we may find ch-> invalid
1662 	 * so its a good idea NOT to use it.
1663 	 *************************************/
1664 
1665 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1666 	if (last_chunk == 0) {
1667 		dmbuf = SCTP_M_COPYM(*m,
1668 		    (offset + sizeof(struct sctp_data_chunk)),
1669 		    the_len, M_NOWAIT);
1670 #ifdef SCTP_MBUF_LOGGING
1671 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1672 			struct mbuf *mat;
1673 
1674 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1675 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1676 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1677 				}
1678 			}
1679 		}
1680 #endif
1681 	} else {
1682 		/* We can steal the last chunk */
1683 		int l_len;
1684 
1685 		dmbuf = *m;
1686 		/* lop off the top part */
1687 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1688 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1689 			l_len = SCTP_BUF_LEN(dmbuf);
1690 		} else {
1691 			/*
1692 			 * need to count up the size hopefully does not hit
1693 			 * this to often :-0
1694 			 */
1695 			struct mbuf *lat;
1696 
1697 			l_len = 0;
1698 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1699 				l_len += SCTP_BUF_LEN(lat);
1700 			}
1701 		}
1702 		if (l_len > the_len) {
1703 			/* Trim the end round bytes off  too */
1704 			m_adj(dmbuf, -(l_len - the_len));
1705 		}
1706 	}
1707 	if (dmbuf == NULL) {
1708 		SCTP_STAT_INCR(sctps_nomem);
1709 		return (0);
1710 	}
1711 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1712 	    asoc->fragmented_delivery_inprogress == 0 &&
1713 	    TAILQ_EMPTY(&asoc->resetHead) &&
1714 	    ((ordered == 0) ||
1715 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1716 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1717 		/* Candidate for express delivery */
1718 		/*
1719 		 * Its not fragmented, No PD-API is up, Nothing in the
1720 		 * delivery queue, Its un-ordered OR ordered and the next to
1721 		 * deliver AND nothing else is stuck on the stream queue,
1722 		 * And there is room for it in the socket buffer. Lets just
1723 		 * stuff it up the buffer....
1724 		 */
1725 
1726 		/* It would be nice to avoid this copy if we could :< */
1727 		sctp_alloc_a_readq(stcb, control);
1728 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1729 		    protocol_id,
1730 		    strmno, strmseq,
1731 		    chunk_flags,
1732 		    dmbuf);
1733 		if (control == NULL) {
1734 			goto failed_express_del;
1735 		}
1736 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1737 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1738 			asoc->highest_tsn_inside_nr_map = tsn;
1739 		}
1740 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1741 		    control, &stcb->sctp_socket->so_rcv,
1742 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1743 
1744 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1745 			/* for ordered, bump what we delivered */
1746 			asoc->strmin[strmno].last_sequence_delivered++;
1747 		}
1748 		SCTP_STAT_INCR(sctps_recvexpress);
1749 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1750 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1751 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1752 		}
1753 		control = NULL;
1754 
1755 		goto finish_express_del;
1756 	}
1757 failed_express_del:
1758 	/* If we reach here this is a new chunk */
1759 	chk = NULL;
1760 	control = NULL;
1761 	/* Express for fragmented delivery? */
1762 	if ((asoc->fragmented_delivery_inprogress) &&
1763 	    (stcb->asoc.control_pdapi) &&
1764 	    (asoc->str_of_pdapi == strmno) &&
1765 	    (asoc->ssn_of_pdapi == strmseq)
1766 	    ) {
1767 		control = stcb->asoc.control_pdapi;
1768 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1769 			/* Can't be another first? */
1770 			goto failed_pdapi_express_del;
1771 		}
1772 		if (tsn == (control->sinfo_tsn + 1)) {
1773 			/* Yep, we can add it on */
1774 			int end = 0;
1775 
1776 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1777 				end = 1;
1778 			}
1779 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1780 			    tsn,
1781 			    &stcb->sctp_socket->so_rcv)) {
1782 				SCTP_PRINTF("Append fails end:%d\n", end);
1783 				goto failed_pdapi_express_del;
1784 			}
1785 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1786 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1787 				asoc->highest_tsn_inside_nr_map = tsn;
1788 			}
1789 			SCTP_STAT_INCR(sctps_recvexpressm);
1790 			control->sinfo_tsn = tsn;
1791 			asoc->tsn_last_delivered = tsn;
1792 			asoc->fragment_flags = chunk_flags;
1793 			asoc->tsn_of_pdapi_last_delivered = tsn;
1794 			asoc->last_flags_delivered = chunk_flags;
1795 			asoc->last_strm_seq_delivered = strmseq;
1796 			asoc->last_strm_no_delivered = strmno;
1797 			if (end) {
1798 				/* clean up the flags and such */
1799 				asoc->fragmented_delivery_inprogress = 0;
1800 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1801 					asoc->strmin[strmno].last_sequence_delivered++;
1802 				}
1803 				stcb->asoc.control_pdapi = NULL;
1804 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1805 					/*
1806 					 * There could be another message
1807 					 * ready
1808 					 */
1809 					need_reasm_check = 1;
1810 				}
1811 			}
1812 			control = NULL;
1813 			goto finish_express_del;
1814 		}
1815 	}
1816 failed_pdapi_express_del:
1817 	control = NULL;
1818 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1819 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1820 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1821 			asoc->highest_tsn_inside_nr_map = tsn;
1822 		}
1823 	} else {
1824 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1825 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1826 			asoc->highest_tsn_inside_map = tsn;
1827 		}
1828 	}
1829 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1830 		sctp_alloc_a_chunk(stcb, chk);
1831 		if (chk == NULL) {
1832 			/* No memory so we drop the chunk */
1833 			SCTP_STAT_INCR(sctps_nomem);
1834 			if (last_chunk == 0) {
1835 				/* we copied it, free the copy */
1836 				sctp_m_freem(dmbuf);
1837 			}
1838 			return (0);
1839 		}
1840 		chk->rec.data.TSN_seq = tsn;
1841 		chk->no_fr_allowed = 0;
1842 		chk->rec.data.stream_seq = strmseq;
1843 		chk->rec.data.stream_number = strmno;
1844 		chk->rec.data.payloadtype = protocol_id;
1845 		chk->rec.data.context = stcb->asoc.context;
1846 		chk->rec.data.doing_fast_retransmit = 0;
1847 		chk->rec.data.rcv_flags = chunk_flags;
1848 		chk->asoc = asoc;
1849 		chk->send_size = the_len;
1850 		chk->whoTo = net;
1851 		atomic_add_int(&net->ref_count, 1);
1852 		chk->data = dmbuf;
1853 	} else {
1854 		sctp_alloc_a_readq(stcb, control);
1855 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1856 		    protocol_id,
1857 		    strmno, strmseq,
1858 		    chunk_flags,
1859 		    dmbuf);
1860 		if (control == NULL) {
1861 			/* No memory so we drop the chunk */
1862 			SCTP_STAT_INCR(sctps_nomem);
1863 			if (last_chunk == 0) {
1864 				/* we copied it, free the copy */
1865 				sctp_m_freem(dmbuf);
1866 			}
1867 			return (0);
1868 		}
1869 		control->length = the_len;
1870 	}
1871 
1872 	/* Mark it as received */
1873 	/* Now queue it where it belongs */
1874 	if (control != NULL) {
1875 		/* First a sanity check */
1876 		if (asoc->fragmented_delivery_inprogress) {
1877 			/*
1878 			 * Ok, we have a fragmented delivery in progress if
1879 			 * this chunk is next to deliver OR belongs in our
1880 			 * view to the reassembly, the peer is evil or
1881 			 * broken.
1882 			 */
1883 			uint32_t estimate_tsn;
1884 
1885 			estimate_tsn = asoc->tsn_last_delivered + 1;
1886 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1887 			    (estimate_tsn == control->sinfo_tsn)) {
1888 				/* Evil/Broke peer */
1889 				sctp_m_freem(control->data);
1890 				control->data = NULL;
1891 				if (control->whoFrom) {
1892 					sctp_free_remote_addr(control->whoFrom);
1893 					control->whoFrom = NULL;
1894 				}
1895 				sctp_free_a_readq(stcb, control);
1896 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1897 				    0, M_NOWAIT, 1, MT_DATA);
1898 				if (oper) {
1899 					struct sctp_paramhdr *ph;
1900 					uint32_t *ippp;
1901 
1902 					SCTP_BUF_LEN(oper) =
1903 					    sizeof(struct sctp_paramhdr) +
1904 					    (3 * sizeof(uint32_t));
1905 					ph = mtod(oper, struct sctp_paramhdr *);
1906 					ph->param_type =
1907 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1908 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1909 					ippp = (uint32_t *) (ph + 1);
1910 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1911 					ippp++;
1912 					*ippp = tsn;
1913 					ippp++;
1914 					*ippp = ((strmno << 16) | strmseq);
1915 				}
1916 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1917 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1918 				*abort_flag = 1;
1919 				return (0);
1920 			} else {
1921 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1922 					sctp_m_freem(control->data);
1923 					control->data = NULL;
1924 					if (control->whoFrom) {
1925 						sctp_free_remote_addr(control->whoFrom);
1926 						control->whoFrom = NULL;
1927 					}
1928 					sctp_free_a_readq(stcb, control);
1929 
1930 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1931 					    0, M_NOWAIT, 1, MT_DATA);
1932 					if (oper) {
1933 						struct sctp_paramhdr *ph;
1934 						uint32_t *ippp;
1935 
1936 						SCTP_BUF_LEN(oper) =
1937 						    sizeof(struct sctp_paramhdr) +
1938 						    (3 * sizeof(uint32_t));
1939 						ph = mtod(oper,
1940 						    struct sctp_paramhdr *);
1941 						ph->param_type =
1942 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1943 						ph->param_length =
1944 						    htons(SCTP_BUF_LEN(oper));
1945 						ippp = (uint32_t *) (ph + 1);
1946 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1947 						ippp++;
1948 						*ippp = tsn;
1949 						ippp++;
1950 						*ippp = ((strmno << 16) | strmseq);
1951 					}
1952 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1953 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1954 					*abort_flag = 1;
1955 					return (0);
1956 				}
1957 			}
1958 		} else {
1959 			/* No PDAPI running */
1960 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1961 				/*
1962 				 * Reassembly queue is NOT empty validate
1963 				 * that this tsn does not need to be in
1964 				 * reasembly queue. If it does then our peer
1965 				 * is broken or evil.
1966 				 */
1967 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1968 					sctp_m_freem(control->data);
1969 					control->data = NULL;
1970 					if (control->whoFrom) {
1971 						sctp_free_remote_addr(control->whoFrom);
1972 						control->whoFrom = NULL;
1973 					}
1974 					sctp_free_a_readq(stcb, control);
1975 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1976 					    0, M_NOWAIT, 1, MT_DATA);
1977 					if (oper) {
1978 						struct sctp_paramhdr *ph;
1979 						uint32_t *ippp;
1980 
1981 						SCTP_BUF_LEN(oper) =
1982 						    sizeof(struct sctp_paramhdr) +
1983 						    (3 * sizeof(uint32_t));
1984 						ph = mtod(oper,
1985 						    struct sctp_paramhdr *);
1986 						ph->param_type =
1987 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1988 						ph->param_length =
1989 						    htons(SCTP_BUF_LEN(oper));
1990 						ippp = (uint32_t *) (ph + 1);
1991 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1992 						ippp++;
1993 						*ippp = tsn;
1994 						ippp++;
1995 						*ippp = ((strmno << 16) | strmseq);
1996 					}
1997 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1998 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1999 					*abort_flag = 1;
2000 					return (0);
2001 				}
2002 			}
2003 		}
2004 		/* ok, if we reach here we have passed the sanity checks */
2005 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2006 			/* queue directly into socket buffer */
2007 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2008 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2009 			    control,
2010 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2011 		} else {
2012 			/*
2013 			 * Special check for when streams are resetting. We
2014 			 * could be more smart about this and check the
2015 			 * actual stream to see if it is not being reset..
2016 			 * that way we would not create a HOLB when amongst
2017 			 * streams being reset and those not being reset.
2018 			 *
2019 			 * We take complete messages that have a stream reset
2020 			 * intervening (aka the TSN is after where our
2021 			 * cum-ack needs to be) off and put them on a
2022 			 * pending_reply_queue. The reassembly ones we do
2023 			 * not have to worry about since they are all sorted
2024 			 * and proceessed by TSN order. It is only the
2025 			 * singletons I must worry about.
2026 			 */
2027 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2028 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2029 				/*
2030 				 * yep its past where we need to reset... go
2031 				 * ahead and queue it.
2032 				 */
2033 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2034 					/* first one on */
2035 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2036 				} else {
2037 					struct sctp_queued_to_read *ctlOn,
2038 					                   *nctlOn;
2039 					unsigned char inserted = 0;
2040 
2041 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2042 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2043 							continue;
2044 						} else {
2045 							/* found it */
2046 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2047 							inserted = 1;
2048 							break;
2049 						}
2050 					}
2051 					if (inserted == 0) {
2052 						/*
2053 						 * must be put at end, use
2054 						 * prevP (all setup from
2055 						 * loop) to setup nextP.
2056 						 */
2057 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2058 					}
2059 				}
2060 			} else {
2061 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2062 				if (*abort_flag) {
2063 					return (0);
2064 				}
2065 			}
2066 		}
2067 	} else {
2068 		/* Into the re-assembly queue */
2069 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2070 		if (*abort_flag) {
2071 			/*
2072 			 * the assoc is now gone and chk was put onto the
2073 			 * reasm queue, which has all been freed.
2074 			 */
2075 			*m = NULL;
2076 			return (0);
2077 		}
2078 	}
2079 finish_express_del:
2080 	if (tsn == (asoc->cumulative_tsn + 1)) {
2081 		/* Update cum-ack */
2082 		asoc->cumulative_tsn = tsn;
2083 	}
2084 	if (last_chunk) {
2085 		*m = NULL;
2086 	}
2087 	if (ordered) {
2088 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2089 	} else {
2090 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2091 	}
2092 	SCTP_STAT_INCR(sctps_recvdata);
2093 	/* Set it present please */
2094 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2095 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2096 	}
2097 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2098 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2099 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2100 	}
2101 	/* check the special flag for stream resets */
2102 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2103 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2104 		/*
2105 		 * we have finished working through the backlogged TSN's now
2106 		 * time to reset streams. 1: call reset function. 2: free
2107 		 * pending_reply space 3: distribute any chunks in
2108 		 * pending_reply_queue.
2109 		 */
2110 		struct sctp_queued_to_read *ctl, *nctl;
2111 
2112 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2113 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2114 		SCTP_FREE(liste, SCTP_M_STRESET);
2115 		/* sa_ignore FREED_MEMORY */
2116 		liste = TAILQ_FIRST(&asoc->resetHead);
2117 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2118 			/* All can be removed */
2119 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2120 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2121 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2122 				if (*abort_flag) {
2123 					return (0);
2124 				}
2125 			}
2126 		} else {
2127 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2128 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2129 					break;
2130 				}
2131 				/*
2132 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2133 				 * process it which is the NOT of
2134 				 * ctl->sinfo_tsn > liste->tsn
2135 				 */
2136 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2137 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2138 				if (*abort_flag) {
2139 					return (0);
2140 				}
2141 			}
2142 		}
2143 		/*
2144 		 * Now service re-assembly to pick up anything that has been
2145 		 * held on reassembly queue?
2146 		 */
2147 		sctp_deliver_reasm_check(stcb, asoc);
2148 		need_reasm_check = 0;
2149 	}
2150 	if (need_reasm_check) {
2151 		/* Another one waits ? */
2152 		sctp_deliver_reasm_check(stcb, asoc);
2153 	}
2154 	return (1);
2155 }
2156 
2157 int8_t sctp_map_lookup_tab[256] = {
2158 	0, 1, 0, 2, 0, 1, 0, 3,
2159 	0, 1, 0, 2, 0, 1, 0, 4,
2160 	0, 1, 0, 2, 0, 1, 0, 3,
2161 	0, 1, 0, 2, 0, 1, 0, 5,
2162 	0, 1, 0, 2, 0, 1, 0, 3,
2163 	0, 1, 0, 2, 0, 1, 0, 4,
2164 	0, 1, 0, 2, 0, 1, 0, 3,
2165 	0, 1, 0, 2, 0, 1, 0, 6,
2166 	0, 1, 0, 2, 0, 1, 0, 3,
2167 	0, 1, 0, 2, 0, 1, 0, 4,
2168 	0, 1, 0, 2, 0, 1, 0, 3,
2169 	0, 1, 0, 2, 0, 1, 0, 5,
2170 	0, 1, 0, 2, 0, 1, 0, 3,
2171 	0, 1, 0, 2, 0, 1, 0, 4,
2172 	0, 1, 0, 2, 0, 1, 0, 3,
2173 	0, 1, 0, 2, 0, 1, 0, 7,
2174 	0, 1, 0, 2, 0, 1, 0, 3,
2175 	0, 1, 0, 2, 0, 1, 0, 4,
2176 	0, 1, 0, 2, 0, 1, 0, 3,
2177 	0, 1, 0, 2, 0, 1, 0, 5,
2178 	0, 1, 0, 2, 0, 1, 0, 3,
2179 	0, 1, 0, 2, 0, 1, 0, 4,
2180 	0, 1, 0, 2, 0, 1, 0, 3,
2181 	0, 1, 0, 2, 0, 1, 0, 6,
2182 	0, 1, 0, 2, 0, 1, 0, 3,
2183 	0, 1, 0, 2, 0, 1, 0, 4,
2184 	0, 1, 0, 2, 0, 1, 0, 3,
2185 	0, 1, 0, 2, 0, 1, 0, 5,
2186 	0, 1, 0, 2, 0, 1, 0, 3,
2187 	0, 1, 0, 2, 0, 1, 0, 4,
2188 	0, 1, 0, 2, 0, 1, 0, 3,
2189 	0, 1, 0, 2, 0, 1, 0, 8
2190 };
2191 
2192 
2193 void
2194 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2195 {
2196 	/*
2197 	 * Now we also need to check the mapping array in a couple of ways.
2198 	 * 1) Did we move the cum-ack point?
2199 	 *
2200 	 * When you first glance at this you might think that all entries that
2201 	 * make up the postion of the cum-ack would be in the nr-mapping
2202 	 * array only.. i.e. things up to the cum-ack are always
2203 	 * deliverable. Thats true with one exception, when its a fragmented
2204 	 * message we may not deliver the data until some threshold (or all
2205 	 * of it) is in place. So we must OR the nr_mapping_array and
2206 	 * mapping_array to get a true picture of the cum-ack.
2207 	 */
2208 	struct sctp_association *asoc;
2209 	int at;
2210 	uint8_t val;
2211 	int slide_from, slide_end, lgap, distance;
2212 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2213 
2214 	asoc = &stcb->asoc;
2215 
2216 	old_cumack = asoc->cumulative_tsn;
2217 	old_base = asoc->mapping_array_base_tsn;
2218 	old_highest = asoc->highest_tsn_inside_map;
2219 	/*
2220 	 * We could probably improve this a small bit by calculating the
2221 	 * offset of the current cum-ack as the starting point.
2222 	 */
2223 	at = 0;
2224 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2225 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2226 		if (val == 0xff) {
2227 			at += 8;
2228 		} else {
2229 			/* there is a 0 bit */
2230 			at += sctp_map_lookup_tab[val];
2231 			break;
2232 		}
2233 	}
2234 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2235 
2236 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2237 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2238 #ifdef INVARIANTS
2239 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2240 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2241 #else
2242 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2243 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2244 		sctp_print_mapping_array(asoc);
2245 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2246 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2247 		}
2248 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2249 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2250 #endif
2251 	}
2252 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2253 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2254 	} else {
2255 		highest_tsn = asoc->highest_tsn_inside_map;
2256 	}
2257 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2258 		/* The complete array was completed by a single FR */
2259 		/* highest becomes the cum-ack */
2260 		int clr;
2261 
2262 #ifdef INVARIANTS
2263 		unsigned int i;
2264 
2265 #endif
2266 
2267 		/* clear the array */
2268 		clr = ((at + 7) >> 3);
2269 		if (clr > asoc->mapping_array_size) {
2270 			clr = asoc->mapping_array_size;
2271 		}
2272 		memset(asoc->mapping_array, 0, clr);
2273 		memset(asoc->nr_mapping_array, 0, clr);
2274 #ifdef INVARIANTS
2275 		for (i = 0; i < asoc->mapping_array_size; i++) {
2276 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2277 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2278 				sctp_print_mapping_array(asoc);
2279 			}
2280 		}
2281 #endif
2282 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2283 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2284 	} else if (at >= 8) {
2285 		/* we can slide the mapping array down */
2286 		/* slide_from holds where we hit the first NON 0xff byte */
2287 
2288 		/*
2289 		 * now calculate the ceiling of the move using our highest
2290 		 * TSN value
2291 		 */
2292 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2293 		slide_end = (lgap >> 3);
2294 		if (slide_end < slide_from) {
2295 			sctp_print_mapping_array(asoc);
2296 #ifdef INVARIANTS
2297 			panic("impossible slide");
2298 #else
2299 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2300 			    lgap, slide_end, slide_from, at);
2301 			return;
2302 #endif
2303 		}
2304 		if (slide_end > asoc->mapping_array_size) {
2305 #ifdef INVARIANTS
2306 			panic("would overrun buffer");
2307 #else
2308 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2309 			    asoc->mapping_array_size, slide_end);
2310 			slide_end = asoc->mapping_array_size;
2311 #endif
2312 		}
2313 		distance = (slide_end - slide_from) + 1;
2314 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2315 			sctp_log_map(old_base, old_cumack, old_highest,
2316 			    SCTP_MAP_PREPARE_SLIDE);
2317 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2318 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2319 		}
2320 		if (distance + slide_from > asoc->mapping_array_size ||
2321 		    distance < 0) {
2322 			/*
2323 			 * Here we do NOT slide forward the array so that
2324 			 * hopefully when more data comes in to fill it up
2325 			 * we will be able to slide it forward. Really I
2326 			 * don't think this should happen :-0
2327 			 */
2328 
2329 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2330 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2331 				    (uint32_t) asoc->mapping_array_size,
2332 				    SCTP_MAP_SLIDE_NONE);
2333 			}
2334 		} else {
2335 			int ii;
2336 
2337 			for (ii = 0; ii < distance; ii++) {
2338 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2339 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2340 
2341 			}
2342 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2343 				asoc->mapping_array[ii] = 0;
2344 				asoc->nr_mapping_array[ii] = 0;
2345 			}
2346 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2347 				asoc->highest_tsn_inside_map += (slide_from << 3);
2348 			}
2349 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2350 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2351 			}
2352 			asoc->mapping_array_base_tsn += (slide_from << 3);
2353 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2354 				sctp_log_map(asoc->mapping_array_base_tsn,
2355 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2356 				    SCTP_MAP_SLIDE_RESULT);
2357 			}
2358 		}
2359 	}
2360 }
2361 
2362 void
2363 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2364 {
2365 	struct sctp_association *asoc;
2366 	uint32_t highest_tsn;
2367 
2368 	asoc = &stcb->asoc;
2369 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2370 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2371 	} else {
2372 		highest_tsn = asoc->highest_tsn_inside_map;
2373 	}
2374 
2375 	/*
2376 	 * Now we need to see if we need to queue a sack or just start the
2377 	 * timer (if allowed).
2378 	 */
2379 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2380 		/*
2381 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2382 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2383 		 * SACK
2384 		 */
2385 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2386 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2387 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2388 		}
2389 		sctp_send_shutdown(stcb,
2390 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2391 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2392 	} else {
2393 		int is_a_gap;
2394 
2395 		/* is there a gap now ? */
2396 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2397 
2398 		/*
2399 		 * CMT DAC algorithm: increase number of packets received
2400 		 * since last ack
2401 		 */
2402 		stcb->asoc.cmt_dac_pkts_rcvd++;
2403 
2404 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2405 							 * SACK */
2406 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2407 							 * longer is one */
2408 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2409 		    (is_a_gap) ||	/* is still a gap */
2410 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2411 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2412 		    ) {
2413 
2414 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2415 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2416 			    (stcb->asoc.send_sack == 0) &&
2417 			    (stcb->asoc.numduptsns == 0) &&
2418 			    (stcb->asoc.delayed_ack) &&
2419 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2420 
2421 				/*
2422 				 * CMT DAC algorithm: With CMT, delay acks
2423 				 * even in the face of
2424 				 *
2425 				 * reordering. Therefore, if acks that do not
2426 				 * have to be sent because of the above
2427 				 * reasons, will be delayed. That is, acks
2428 				 * that would have been sent due to gap
2429 				 * reports will be delayed with DAC. Start
2430 				 * the delayed ack timer.
2431 				 */
2432 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2433 				    stcb->sctp_ep, stcb, NULL);
2434 			} else {
2435 				/*
2436 				 * Ok we must build a SACK since the timer
2437 				 * is pending, we got our first packet OR
2438 				 * there are gaps or duplicates.
2439 				 */
2440 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2441 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2442 			}
2443 		} else {
2444 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2445 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2446 				    stcb->sctp_ep, stcb, NULL);
2447 			}
2448 		}
2449 	}
2450 }
2451 
2452 void
2453 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2454 {
2455 	struct sctp_tmit_chunk *chk;
2456 	uint32_t tsize, pd_point;
2457 	uint16_t nxt_todel;
2458 
2459 	if (asoc->fragmented_delivery_inprogress) {
2460 		sctp_service_reassembly(stcb, asoc);
2461 	}
2462 	/* Can we proceed further, i.e. the PD-API is complete */
2463 	if (asoc->fragmented_delivery_inprogress) {
2464 		/* no */
2465 		return;
2466 	}
2467 	/*
2468 	 * Now is there some other chunk I can deliver from the reassembly
2469 	 * queue.
2470 	 */
2471 doit_again:
2472 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2473 	if (chk == NULL) {
2474 		asoc->size_on_reasm_queue = 0;
2475 		asoc->cnt_on_reasm_queue = 0;
2476 		return;
2477 	}
2478 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2479 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2480 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2481 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2482 		/*
2483 		 * Yep the first one is here. We setup to start reception,
2484 		 * by backing down the TSN just in case we can't deliver.
2485 		 */
2486 
2487 		/*
2488 		 * Before we start though either all of the message should
2489 		 * be here or the socket buffer max or nothing on the
2490 		 * delivery queue and something can be delivered.
2491 		 */
2492 		if (stcb->sctp_socket) {
2493 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2494 			    stcb->sctp_ep->partial_delivery_point);
2495 		} else {
2496 			pd_point = stcb->sctp_ep->partial_delivery_point;
2497 		}
2498 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2499 			asoc->fragmented_delivery_inprogress = 1;
2500 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2501 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2502 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2503 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2504 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2505 			sctp_service_reassembly(stcb, asoc);
2506 			if (asoc->fragmented_delivery_inprogress == 0) {
2507 				goto doit_again;
2508 			}
2509 		}
2510 	}
2511 }
2512 
2513 int
2514 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2515     struct sockaddr *src, struct sockaddr *dst,
2516     struct sctphdr *sh, struct sctp_inpcb *inp,
2517     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2518     uint8_t use_mflowid, uint32_t mflowid,
2519     uint32_t vrf_id, uint16_t port)
2520 {
2521 	struct sctp_data_chunk *ch, chunk_buf;
2522 	struct sctp_association *asoc;
2523 	int num_chunks = 0;	/* number of control chunks processed */
2524 	int stop_proc = 0;
2525 	int chk_length, break_flag, last_chunk;
2526 	int abort_flag = 0, was_a_gap;
2527 	struct mbuf *m;
2528 	uint32_t highest_tsn;
2529 
2530 	/* set the rwnd */
2531 	sctp_set_rwnd(stcb, &stcb->asoc);
2532 
2533 	m = *mm;
2534 	SCTP_TCB_LOCK_ASSERT(stcb);
2535 	asoc = &stcb->asoc;
2536 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2537 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2538 	} else {
2539 		highest_tsn = asoc->highest_tsn_inside_map;
2540 	}
2541 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2542 	/*
2543 	 * setup where we got the last DATA packet from for any SACK that
2544 	 * may need to go out. Don't bump the net. This is done ONLY when a
2545 	 * chunk is assigned.
2546 	 */
2547 	asoc->last_data_chunk_from = net;
2548 
2549 	/*-
2550 	 * Now before we proceed we must figure out if this is a wasted
2551 	 * cluster... i.e. it is a small packet sent in and yet the driver
2552 	 * underneath allocated a full cluster for it. If so we must copy it
2553 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2554 	 * with cluster starvation. Note for __Panda__ we don't do this
2555 	 * since it has clusters all the way down to 64 bytes.
2556 	 */
2557 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2558 		/* we only handle mbufs that are singletons.. not chains */
2559 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2560 		if (m) {
2561 			/* ok lets see if we can copy the data up */
2562 			caddr_t *from, *to;
2563 
2564 			/* get the pointers and copy */
2565 			to = mtod(m, caddr_t *);
2566 			from = mtod((*mm), caddr_t *);
2567 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2568 			/* copy the length and free up the old */
2569 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2570 			sctp_m_freem(*mm);
2571 			/* sucess, back copy */
2572 			*mm = m;
2573 		} else {
2574 			/* We are in trouble in the mbuf world .. yikes */
2575 			m = *mm;
2576 		}
2577 	}
2578 	/* get pointer to the first chunk header */
2579 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2580 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2581 	if (ch == NULL) {
2582 		return (1);
2583 	}
2584 	/*
2585 	 * process all DATA chunks...
2586 	 */
2587 	*high_tsn = asoc->cumulative_tsn;
2588 	break_flag = 0;
2589 	asoc->data_pkts_seen++;
2590 	while (stop_proc == 0) {
2591 		/* validate chunk length */
2592 		chk_length = ntohs(ch->ch.chunk_length);
2593 		if (length - *offset < chk_length) {
2594 			/* all done, mutulated chunk */
2595 			stop_proc = 1;
2596 			continue;
2597 		}
2598 		if (ch->ch.chunk_type == SCTP_DATA) {
2599 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2600 				/*
2601 				 * Need to send an abort since we had a
2602 				 * invalid data chunk.
2603 				 */
2604 				struct mbuf *op_err;
2605 
2606 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2607 				    0, M_NOWAIT, 1, MT_DATA);
2608 
2609 				if (op_err) {
2610 					struct sctp_paramhdr *ph;
2611 					uint32_t *ippp;
2612 
2613 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2614 					    (2 * sizeof(uint32_t));
2615 					ph = mtod(op_err, struct sctp_paramhdr *);
2616 					ph->param_type =
2617 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2618 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2619 					ippp = (uint32_t *) (ph + 1);
2620 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2621 					ippp++;
2622 					*ippp = asoc->cumulative_tsn;
2623 
2624 				}
2625 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2626 				sctp_abort_association(inp, stcb, m, iphlen,
2627 				    src, dst, sh, op_err,
2628 				    use_mflowid, mflowid,
2629 				    vrf_id, port);
2630 				return (2);
2631 			}
2632 #ifdef SCTP_AUDITING_ENABLED
2633 			sctp_audit_log(0xB1, 0);
2634 #endif
2635 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2636 				last_chunk = 1;
2637 			} else {
2638 				last_chunk = 0;
2639 			}
2640 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2641 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2642 			    last_chunk)) {
2643 				num_chunks++;
2644 			}
2645 			if (abort_flag)
2646 				return (2);
2647 
2648 			if (break_flag) {
2649 				/*
2650 				 * Set because of out of rwnd space and no
2651 				 * drop rep space left.
2652 				 */
2653 				stop_proc = 1;
2654 				continue;
2655 			}
2656 		} else {
2657 			/* not a data chunk in the data region */
2658 			switch (ch->ch.chunk_type) {
2659 			case SCTP_INITIATION:
2660 			case SCTP_INITIATION_ACK:
2661 			case SCTP_SELECTIVE_ACK:
2662 			case SCTP_NR_SELECTIVE_ACK:
2663 			case SCTP_HEARTBEAT_REQUEST:
2664 			case SCTP_HEARTBEAT_ACK:
2665 			case SCTP_ABORT_ASSOCIATION:
2666 			case SCTP_SHUTDOWN:
2667 			case SCTP_SHUTDOWN_ACK:
2668 			case SCTP_OPERATION_ERROR:
2669 			case SCTP_COOKIE_ECHO:
2670 			case SCTP_COOKIE_ACK:
2671 			case SCTP_ECN_ECHO:
2672 			case SCTP_ECN_CWR:
2673 			case SCTP_SHUTDOWN_COMPLETE:
2674 			case SCTP_AUTHENTICATION:
2675 			case SCTP_ASCONF_ACK:
2676 			case SCTP_PACKET_DROPPED:
2677 			case SCTP_STREAM_RESET:
2678 			case SCTP_FORWARD_CUM_TSN:
2679 			case SCTP_ASCONF:
2680 				/*
2681 				 * Now, what do we do with KNOWN chunks that
2682 				 * are NOT in the right place?
2683 				 *
2684 				 * For now, I do nothing but ignore them. We
2685 				 * may later want to add sysctl stuff to
2686 				 * switch out and do either an ABORT() or
2687 				 * possibly process them.
2688 				 */
2689 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2690 					struct mbuf *op_err;
2691 
2692 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2693 					sctp_abort_association(inp, stcb,
2694 					    m, iphlen,
2695 					    src, dst,
2696 					    sh, op_err,
2697 					    use_mflowid, mflowid,
2698 					    vrf_id, port);
2699 					return (2);
2700 				}
2701 				break;
2702 			default:
2703 				/* unknown chunk type, use bit rules */
2704 				if (ch->ch.chunk_type & 0x40) {
2705 					/* Add a error report to the queue */
2706 					struct mbuf *merr;
2707 					struct sctp_paramhdr *phd;
2708 
2709 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2710 					if (merr) {
2711 						phd = mtod(merr, struct sctp_paramhdr *);
2712 						/*
2713 						 * We cheat and use param
2714 						 * type since we did not
2715 						 * bother to define a error
2716 						 * cause struct. They are
2717 						 * the same basic format
2718 						 * with different names.
2719 						 */
2720 						phd->param_type =
2721 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2722 						phd->param_length =
2723 						    htons(chk_length + sizeof(*phd));
2724 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2725 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2726 						if (SCTP_BUF_NEXT(merr)) {
2727 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2728 								sctp_m_freem(merr);
2729 							} else {
2730 								sctp_queue_op_err(stcb, merr);
2731 							}
2732 						} else {
2733 							sctp_m_freem(merr);
2734 						}
2735 					}
2736 				}
2737 				if ((ch->ch.chunk_type & 0x80) == 0) {
2738 					/* discard the rest of this packet */
2739 					stop_proc = 1;
2740 				}	/* else skip this bad chunk and
2741 					 * continue... */
2742 				break;
2743 			}	/* switch of chunk type */
2744 		}
2745 		*offset += SCTP_SIZE32(chk_length);
2746 		if ((*offset >= length) || stop_proc) {
2747 			/* no more data left in the mbuf chain */
2748 			stop_proc = 1;
2749 			continue;
2750 		}
2751 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2752 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2753 		if (ch == NULL) {
2754 			*offset = length;
2755 			stop_proc = 1;
2756 			continue;
2757 		}
2758 	}
2759 	if (break_flag) {
2760 		/*
2761 		 * we need to report rwnd overrun drops.
2762 		 */
2763 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2764 	}
2765 	if (num_chunks) {
2766 		/*
2767 		 * Did we get data, if so update the time for auto-close and
2768 		 * give peer credit for being alive.
2769 		 */
2770 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2771 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2772 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2773 			    stcb->asoc.overall_error_count,
2774 			    0,
2775 			    SCTP_FROM_SCTP_INDATA,
2776 			    __LINE__);
2777 		}
2778 		stcb->asoc.overall_error_count = 0;
2779 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2780 	}
2781 	/* now service all of the reassm queue if needed */
2782 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2783 		sctp_service_queues(stcb, asoc);
2784 
2785 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2786 		/* Assure that we ack right away */
2787 		stcb->asoc.send_sack = 1;
2788 	}
2789 	/* Start a sack timer or QUEUE a SACK for sending */
2790 	sctp_sack_check(stcb, was_a_gap);
2791 	return (0);
2792 }
2793 
2794 static int
2795 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2796     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2797     int *num_frs,
2798     uint32_t * biggest_newly_acked_tsn,
2799     uint32_t * this_sack_lowest_newack,
2800     int *rto_ok)
2801 {
2802 	struct sctp_tmit_chunk *tp1;
2803 	unsigned int theTSN;
2804 	int j, wake_him = 0, circled = 0;
2805 
2806 	/* Recover the tp1 we last saw */
2807 	tp1 = *p_tp1;
2808 	if (tp1 == NULL) {
2809 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2810 	}
2811 	for (j = frag_strt; j <= frag_end; j++) {
2812 		theTSN = j + last_tsn;
2813 		while (tp1) {
2814 			if (tp1->rec.data.doing_fast_retransmit)
2815 				(*num_frs) += 1;
2816 
2817 			/*-
2818 			 * CMT: CUCv2 algorithm. For each TSN being
2819 			 * processed from the sent queue, track the
2820 			 * next expected pseudo-cumack, or
2821 			 * rtx_pseudo_cumack, if required. Separate
2822 			 * cumack trackers for first transmissions,
2823 			 * and retransmissions.
2824 			 */
2825 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2826 			    (tp1->snd_count == 1)) {
2827 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2828 				tp1->whoTo->find_pseudo_cumack = 0;
2829 			}
2830 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2831 			    (tp1->snd_count > 1)) {
2832 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2833 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2834 			}
2835 			if (tp1->rec.data.TSN_seq == theTSN) {
2836 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2837 					/*-
2838 					 * must be held until
2839 					 * cum-ack passes
2840 					 */
2841 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2842 						/*-
2843 						 * If it is less than RESEND, it is
2844 						 * now no-longer in flight.
2845 						 * Higher values may already be set
2846 						 * via previous Gap Ack Blocks...
2847 						 * i.e. ACKED or RESEND.
2848 						 */
2849 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2850 						    *biggest_newly_acked_tsn)) {
2851 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2852 						}
2853 						/*-
2854 						 * CMT: SFR algo (and HTNA) - set
2855 						 * saw_newack to 1 for dest being
2856 						 * newly acked. update
2857 						 * this_sack_highest_newack if
2858 						 * appropriate.
2859 						 */
2860 						if (tp1->rec.data.chunk_was_revoked == 0)
2861 							tp1->whoTo->saw_newack = 1;
2862 
2863 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2864 						    tp1->whoTo->this_sack_highest_newack)) {
2865 							tp1->whoTo->this_sack_highest_newack =
2866 							    tp1->rec.data.TSN_seq;
2867 						}
2868 						/*-
2869 						 * CMT DAC algo: also update
2870 						 * this_sack_lowest_newack
2871 						 */
2872 						if (*this_sack_lowest_newack == 0) {
2873 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2874 								sctp_log_sack(*this_sack_lowest_newack,
2875 								    last_tsn,
2876 								    tp1->rec.data.TSN_seq,
2877 								    0,
2878 								    0,
2879 								    SCTP_LOG_TSN_ACKED);
2880 							}
2881 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2882 						}
2883 						/*-
2884 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2885 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2886 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2887 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2888 						 * Separate pseudo_cumack trackers for first transmissions and
2889 						 * retransmissions.
2890 						 */
2891 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2892 							if (tp1->rec.data.chunk_was_revoked == 0) {
2893 								tp1->whoTo->new_pseudo_cumack = 1;
2894 							}
2895 							tp1->whoTo->find_pseudo_cumack = 1;
2896 						}
2897 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2898 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2899 						}
2900 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2901 							if (tp1->rec.data.chunk_was_revoked == 0) {
2902 								tp1->whoTo->new_pseudo_cumack = 1;
2903 							}
2904 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2905 						}
2906 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2907 							sctp_log_sack(*biggest_newly_acked_tsn,
2908 							    last_tsn,
2909 							    tp1->rec.data.TSN_seq,
2910 							    frag_strt,
2911 							    frag_end,
2912 							    SCTP_LOG_TSN_ACKED);
2913 						}
2914 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2915 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2916 							    tp1->whoTo->flight_size,
2917 							    tp1->book_size,
2918 							    (uintptr_t) tp1->whoTo,
2919 							    tp1->rec.data.TSN_seq);
2920 						}
2921 						sctp_flight_size_decrease(tp1);
2922 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2923 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2924 							    tp1);
2925 						}
2926 						sctp_total_flight_decrease(stcb, tp1);
2927 
2928 						tp1->whoTo->net_ack += tp1->send_size;
2929 						if (tp1->snd_count < 2) {
2930 							/*-
2931 							 * True non-retransmited chunk
2932 							 */
2933 							tp1->whoTo->net_ack2 += tp1->send_size;
2934 
2935 							/*-
2936 							 * update RTO too ?
2937 							 */
2938 							if (tp1->do_rtt) {
2939 								if (*rto_ok) {
2940 									tp1->whoTo->RTO =
2941 									    sctp_calculate_rto(stcb,
2942 									    &stcb->asoc,
2943 									    tp1->whoTo,
2944 									    &tp1->sent_rcv_time,
2945 									    sctp_align_safe_nocopy,
2946 									    SCTP_RTT_FROM_DATA);
2947 									*rto_ok = 0;
2948 								}
2949 								if (tp1->whoTo->rto_needed == 0) {
2950 									tp1->whoTo->rto_needed = 1;
2951 								}
2952 								tp1->do_rtt = 0;
2953 							}
2954 						}
2955 					}
2956 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2957 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2958 						    stcb->asoc.this_sack_highest_gap)) {
2959 							stcb->asoc.this_sack_highest_gap =
2960 							    tp1->rec.data.TSN_seq;
2961 						}
2962 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2963 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2964 #ifdef SCTP_AUDITING_ENABLED
2965 							sctp_audit_log(0xB2,
2966 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2967 #endif
2968 						}
2969 					}
2970 					/*-
2971 					 * All chunks NOT UNSENT fall through here and are marked
2972 					 * (leave PR-SCTP ones that are to skip alone though)
2973 					 */
2974 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2975 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2976 						tp1->sent = SCTP_DATAGRAM_MARKED;
2977 					}
2978 					if (tp1->rec.data.chunk_was_revoked) {
2979 						/* deflate the cwnd */
2980 						tp1->whoTo->cwnd -= tp1->book_size;
2981 						tp1->rec.data.chunk_was_revoked = 0;
2982 					}
2983 					/* NR Sack code here */
2984 					if (nr_sacking &&
2985 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2986 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2987 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2988 #ifdef INVARIANTS
2989 						} else {
2990 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2991 #endif
2992 						}
2993 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2994 						if (tp1->data) {
2995 							/*
2996 							 * sa_ignore
2997 							 * NO_NULL_CHK
2998 							 */
2999 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3000 							sctp_m_freem(tp1->data);
3001 							tp1->data = NULL;
3002 						}
3003 						wake_him++;
3004 					}
3005 				}
3006 				break;
3007 			}	/* if (tp1->TSN_seq == theTSN) */
3008 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3009 				break;
3010 			}
3011 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3012 			if ((tp1 == NULL) && (circled == 0)) {
3013 				circled++;
3014 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3015 			}
3016 		}		/* end while (tp1) */
3017 		if (tp1 == NULL) {
3018 			circled = 0;
3019 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3020 		}
3021 		/* In case the fragments were not in order we must reset */
3022 	}			/* end for (j = fragStart */
3023 	*p_tp1 = tp1;
3024 	return (wake_him);	/* Return value only used for nr-sack */
3025 }
3026 
3027 
3028 static int
3029 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3030     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3031     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3032     int num_seg, int num_nr_seg, int *rto_ok)
3033 {
3034 	struct sctp_gap_ack_block *frag, block;
3035 	struct sctp_tmit_chunk *tp1;
3036 	int i;
3037 	int num_frs = 0;
3038 	int chunk_freed;
3039 	int non_revocable;
3040 	uint16_t frag_strt, frag_end, prev_frag_end;
3041 
3042 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3043 	prev_frag_end = 0;
3044 	chunk_freed = 0;
3045 
3046 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3047 		if (i == num_seg) {
3048 			prev_frag_end = 0;
3049 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3050 		}
3051 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3052 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3053 		*offset += sizeof(block);
3054 		if (frag == NULL) {
3055 			return (chunk_freed);
3056 		}
3057 		frag_strt = ntohs(frag->start);
3058 		frag_end = ntohs(frag->end);
3059 
3060 		if (frag_strt > frag_end) {
3061 			/* This gap report is malformed, skip it. */
3062 			continue;
3063 		}
3064 		if (frag_strt <= prev_frag_end) {
3065 			/* This gap report is not in order, so restart. */
3066 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3067 		}
3068 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3069 			*biggest_tsn_acked = last_tsn + frag_end;
3070 		}
3071 		if (i < num_seg) {
3072 			non_revocable = 0;
3073 		} else {
3074 			non_revocable = 1;
3075 		}
3076 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3077 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3078 		    this_sack_lowest_newack, rto_ok)) {
3079 			chunk_freed = 1;
3080 		}
3081 		prev_frag_end = frag_end;
3082 	}
3083 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3084 		if (num_frs)
3085 			sctp_log_fr(*biggest_tsn_acked,
3086 			    *biggest_newly_acked_tsn,
3087 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3088 	}
3089 	return (chunk_freed);
3090 }
3091 
3092 static void
3093 sctp_check_for_revoked(struct sctp_tcb *stcb,
3094     struct sctp_association *asoc, uint32_t cumack,
3095     uint32_t biggest_tsn_acked)
3096 {
3097 	struct sctp_tmit_chunk *tp1;
3098 
3099 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3100 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3101 			/*
3102 			 * ok this guy is either ACK or MARKED. If it is
3103 			 * ACKED it has been previously acked but not this
3104 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3105 			 * again.
3106 			 */
3107 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3108 				break;
3109 			}
3110 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3111 				/* it has been revoked */
3112 				tp1->sent = SCTP_DATAGRAM_SENT;
3113 				tp1->rec.data.chunk_was_revoked = 1;
3114 				/*
3115 				 * We must add this stuff back in to assure
3116 				 * timers and such get started.
3117 				 */
3118 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3119 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3120 					    tp1->whoTo->flight_size,
3121 					    tp1->book_size,
3122 					    (uintptr_t) tp1->whoTo,
3123 					    tp1->rec.data.TSN_seq);
3124 				}
3125 				sctp_flight_size_increase(tp1);
3126 				sctp_total_flight_increase(stcb, tp1);
3127 				/*
3128 				 * We inflate the cwnd to compensate for our
3129 				 * artificial inflation of the flight_size.
3130 				 */
3131 				tp1->whoTo->cwnd += tp1->book_size;
3132 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3133 					sctp_log_sack(asoc->last_acked_seq,
3134 					    cumack,
3135 					    tp1->rec.data.TSN_seq,
3136 					    0,
3137 					    0,
3138 					    SCTP_LOG_TSN_REVOKED);
3139 				}
3140 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3141 				/* it has been re-acked in this SACK */
3142 				tp1->sent = SCTP_DATAGRAM_ACKED;
3143 			}
3144 		}
3145 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3146 			break;
3147 	}
3148 }
3149 
3150 
3151 static void
3152 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3153     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3154 {
3155 	struct sctp_tmit_chunk *tp1;
3156 	int strike_flag = 0;
3157 	struct timeval now;
3158 	int tot_retrans = 0;
3159 	uint32_t sending_seq;
3160 	struct sctp_nets *net;
3161 	int num_dests_sacked = 0;
3162 
3163 	/*
3164 	 * select the sending_seq, this is either the next thing ready to be
3165 	 * sent but not transmitted, OR, the next seq we assign.
3166 	 */
3167 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3168 	if (tp1 == NULL) {
3169 		sending_seq = asoc->sending_seq;
3170 	} else {
3171 		sending_seq = tp1->rec.data.TSN_seq;
3172 	}
3173 
3174 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3175 	if ((asoc->sctp_cmt_on_off > 0) &&
3176 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3177 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3178 			if (net->saw_newack)
3179 				num_dests_sacked++;
3180 		}
3181 	}
3182 	if (stcb->asoc.peer_supports_prsctp) {
3183 		(void)SCTP_GETTIME_TIMEVAL(&now);
3184 	}
3185 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3186 		strike_flag = 0;
3187 		if (tp1->no_fr_allowed) {
3188 			/* this one had a timeout or something */
3189 			continue;
3190 		}
3191 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3192 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3193 				sctp_log_fr(biggest_tsn_newly_acked,
3194 				    tp1->rec.data.TSN_seq,
3195 				    tp1->sent,
3196 				    SCTP_FR_LOG_CHECK_STRIKE);
3197 		}
3198 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3199 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3200 			/* done */
3201 			break;
3202 		}
3203 		if (stcb->asoc.peer_supports_prsctp) {
3204 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3205 				/* Is it expired? */
3206 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3207 					/* Yes so drop it */
3208 					if (tp1->data != NULL) {
3209 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3210 						    SCTP_SO_NOT_LOCKED);
3211 					}
3212 					continue;
3213 				}
3214 			}
3215 		}
3216 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3217 			/* we are beyond the tsn in the sack  */
3218 			break;
3219 		}
3220 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3221 			/* either a RESEND, ACKED, or MARKED */
3222 			/* skip */
3223 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3224 				/* Continue strikin FWD-TSN chunks */
3225 				tp1->rec.data.fwd_tsn_cnt++;
3226 			}
3227 			continue;
3228 		}
3229 		/*
3230 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3231 		 */
3232 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3233 			/*
3234 			 * No new acks were receieved for data sent to this
3235 			 * dest. Therefore, according to the SFR algo for
3236 			 * CMT, no data sent to this dest can be marked for
3237 			 * FR using this SACK.
3238 			 */
3239 			continue;
3240 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3241 		    tp1->whoTo->this_sack_highest_newack)) {
3242 			/*
3243 			 * CMT: New acks were receieved for data sent to
3244 			 * this dest. But no new acks were seen for data
3245 			 * sent after tp1. Therefore, according to the SFR
3246 			 * algo for CMT, tp1 cannot be marked for FR using
3247 			 * this SACK. This step covers part of the DAC algo
3248 			 * and the HTNA algo as well.
3249 			 */
3250 			continue;
3251 		}
3252 		/*
3253 		 * Here we check to see if we were have already done a FR
3254 		 * and if so we see if the biggest TSN we saw in the sack is
3255 		 * smaller than the recovery point. If so we don't strike
3256 		 * the tsn... otherwise we CAN strike the TSN.
3257 		 */
3258 		/*
3259 		 * @@@ JRI: Check for CMT if (accum_moved &&
3260 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3261 		 * 0)) {
3262 		 */
3263 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3264 			/*
3265 			 * Strike the TSN if in fast-recovery and cum-ack
3266 			 * moved.
3267 			 */
3268 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3269 				sctp_log_fr(biggest_tsn_newly_acked,
3270 				    tp1->rec.data.TSN_seq,
3271 				    tp1->sent,
3272 				    SCTP_FR_LOG_STRIKE_CHUNK);
3273 			}
3274 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3275 				tp1->sent++;
3276 			}
3277 			if ((asoc->sctp_cmt_on_off > 0) &&
3278 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3279 				/*
3280 				 * CMT DAC algorithm: If SACK flag is set to
3281 				 * 0, then lowest_newack test will not pass
3282 				 * because it would have been set to the
3283 				 * cumack earlier. If not already to be
3284 				 * rtx'd, If not a mixed sack and if tp1 is
3285 				 * not between two sacked TSNs, then mark by
3286 				 * one more. NOTE that we are marking by one
3287 				 * additional time since the SACK DAC flag
3288 				 * indicates that two packets have been
3289 				 * received after this missing TSN.
3290 				 */
3291 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3292 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3293 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 						sctp_log_fr(16 + num_dests_sacked,
3295 						    tp1->rec.data.TSN_seq,
3296 						    tp1->sent,
3297 						    SCTP_FR_LOG_STRIKE_CHUNK);
3298 					}
3299 					tp1->sent++;
3300 				}
3301 			}
3302 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3303 		    (asoc->sctp_cmt_on_off == 0)) {
3304 			/*
3305 			 * For those that have done a FR we must take
3306 			 * special consideration if we strike. I.e the
3307 			 * biggest_newly_acked must be higher than the
3308 			 * sending_seq at the time we did the FR.
3309 			 */
3310 			if (
3311 #ifdef SCTP_FR_TO_ALTERNATE
3312 			/*
3313 			 * If FR's go to new networks, then we must only do
3314 			 * this for singly homed asoc's. However if the FR's
3315 			 * go to the same network (Armando's work) then its
3316 			 * ok to FR multiple times.
3317 			 */
3318 			    (asoc->numnets < 2)
3319 #else
3320 			    (1)
3321 #endif
3322 			    ) {
3323 
3324 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3325 				    tp1->rec.data.fast_retran_tsn)) {
3326 					/*
3327 					 * Strike the TSN, since this ack is
3328 					 * beyond where things were when we
3329 					 * did a FR.
3330 					 */
3331 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3332 						sctp_log_fr(biggest_tsn_newly_acked,
3333 						    tp1->rec.data.TSN_seq,
3334 						    tp1->sent,
3335 						    SCTP_FR_LOG_STRIKE_CHUNK);
3336 					}
3337 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3338 						tp1->sent++;
3339 					}
3340 					strike_flag = 1;
3341 					if ((asoc->sctp_cmt_on_off > 0) &&
3342 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3343 						/*
3344 						 * CMT DAC algorithm: If
3345 						 * SACK flag is set to 0,
3346 						 * then lowest_newack test
3347 						 * will not pass because it
3348 						 * would have been set to
3349 						 * the cumack earlier. If
3350 						 * not already to be rtx'd,
3351 						 * If not a mixed sack and
3352 						 * if tp1 is not between two
3353 						 * sacked TSNs, then mark by
3354 						 * one more. NOTE that we
3355 						 * are marking by one
3356 						 * additional time since the
3357 						 * SACK DAC flag indicates
3358 						 * that two packets have
3359 						 * been received after this
3360 						 * missing TSN.
3361 						 */
3362 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3363 						    (num_dests_sacked == 1) &&
3364 						    SCTP_TSN_GT(this_sack_lowest_newack,
3365 						    tp1->rec.data.TSN_seq)) {
3366 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3367 								sctp_log_fr(32 + num_dests_sacked,
3368 								    tp1->rec.data.TSN_seq,
3369 								    tp1->sent,
3370 								    SCTP_FR_LOG_STRIKE_CHUNK);
3371 							}
3372 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3373 								tp1->sent++;
3374 							}
3375 						}
3376 					}
3377 				}
3378 			}
3379 			/*
3380 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3381 			 * algo covers HTNA.
3382 			 */
3383 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3384 		    biggest_tsn_newly_acked)) {
3385 			/*
3386 			 * We don't strike these: This is the  HTNA
3387 			 * algorithm i.e. we don't strike If our TSN is
3388 			 * larger than the Highest TSN Newly Acked.
3389 			 */
3390 			;
3391 		} else {
3392 			/* Strike the TSN */
3393 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3394 				sctp_log_fr(biggest_tsn_newly_acked,
3395 				    tp1->rec.data.TSN_seq,
3396 				    tp1->sent,
3397 				    SCTP_FR_LOG_STRIKE_CHUNK);
3398 			}
3399 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3400 				tp1->sent++;
3401 			}
3402 			if ((asoc->sctp_cmt_on_off > 0) &&
3403 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3404 				/*
3405 				 * CMT DAC algorithm: If SACK flag is set to
3406 				 * 0, then lowest_newack test will not pass
3407 				 * because it would have been set to the
3408 				 * cumack earlier. If not already to be
3409 				 * rtx'd, If not a mixed sack and if tp1 is
3410 				 * not between two sacked TSNs, then mark by
3411 				 * one more. NOTE that we are marking by one
3412 				 * additional time since the SACK DAC flag
3413 				 * indicates that two packets have been
3414 				 * received after this missing TSN.
3415 				 */
3416 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3417 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3418 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 						sctp_log_fr(48 + num_dests_sacked,
3420 						    tp1->rec.data.TSN_seq,
3421 						    tp1->sent,
3422 						    SCTP_FR_LOG_STRIKE_CHUNK);
3423 					}
3424 					tp1->sent++;
3425 				}
3426 			}
3427 		}
3428 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3429 			struct sctp_nets *alt;
3430 
3431 			/* fix counts and things */
3432 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3433 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3434 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3435 				    tp1->book_size,
3436 				    (uintptr_t) tp1->whoTo,
3437 				    tp1->rec.data.TSN_seq);
3438 			}
3439 			if (tp1->whoTo) {
3440 				tp1->whoTo->net_ack++;
3441 				sctp_flight_size_decrease(tp1);
3442 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3443 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3444 					    tp1);
3445 				}
3446 			}
3447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3448 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3449 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3450 			}
3451 			/* add back to the rwnd */
3452 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3453 
3454 			/* remove from the total flight */
3455 			sctp_total_flight_decrease(stcb, tp1);
3456 
3457 			if ((stcb->asoc.peer_supports_prsctp) &&
3458 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3459 				/*
3460 				 * Has it been retransmitted tv_sec times? -
3461 				 * we store the retran count there.
3462 				 */
3463 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3464 					/* Yes, so drop it */
3465 					if (tp1->data != NULL) {
3466 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3467 						    SCTP_SO_NOT_LOCKED);
3468 					}
3469 					/* Make sure to flag we had a FR */
3470 					tp1->whoTo->net_ack++;
3471 					continue;
3472 				}
3473 			}
3474 			/*
3475 			 * SCTP_PRINTF("OK, we are now ready to FR this
3476 			 * guy\n");
3477 			 */
3478 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3479 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3480 				    0, SCTP_FR_MARKED);
3481 			}
3482 			if (strike_flag) {
3483 				/* This is a subsequent FR */
3484 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3485 			}
3486 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3487 			if (asoc->sctp_cmt_on_off > 0) {
3488 				/*
3489 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3490 				 * If CMT is being used, then pick dest with
3491 				 * largest ssthresh for any retransmission.
3492 				 */
3493 				tp1->no_fr_allowed = 1;
3494 				alt = tp1->whoTo;
3495 				/* sa_ignore NO_NULL_CHK */
3496 				if (asoc->sctp_cmt_pf > 0) {
3497 					/*
3498 					 * JRS 5/18/07 - If CMT PF is on,
3499 					 * use the PF version of
3500 					 * find_alt_net()
3501 					 */
3502 					alt = sctp_find_alternate_net(stcb, alt, 2);
3503 				} else {
3504 					/*
3505 					 * JRS 5/18/07 - If only CMT is on,
3506 					 * use the CMT version of
3507 					 * find_alt_net()
3508 					 */
3509 					/* sa_ignore NO_NULL_CHK */
3510 					alt = sctp_find_alternate_net(stcb, alt, 1);
3511 				}
3512 				if (alt == NULL) {
3513 					alt = tp1->whoTo;
3514 				}
3515 				/*
3516 				 * CUCv2: If a different dest is picked for
3517 				 * the retransmission, then new
3518 				 * (rtx-)pseudo_cumack needs to be tracked
3519 				 * for orig dest. Let CUCv2 track new (rtx-)
3520 				 * pseudo-cumack always.
3521 				 */
3522 				if (tp1->whoTo) {
3523 					tp1->whoTo->find_pseudo_cumack = 1;
3524 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3525 				}
3526 			} else {/* CMT is OFF */
3527 
3528 #ifdef SCTP_FR_TO_ALTERNATE
3529 				/* Can we find an alternate? */
3530 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3531 #else
3532 				/*
3533 				 * default behavior is to NOT retransmit
3534 				 * FR's to an alternate. Armando Caro's
3535 				 * paper details why.
3536 				 */
3537 				alt = tp1->whoTo;
3538 #endif
3539 			}
3540 
3541 			tp1->rec.data.doing_fast_retransmit = 1;
3542 			tot_retrans++;
3543 			/* mark the sending seq for possible subsequent FR's */
3544 			/*
3545 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3546 			 * (uint32_t)tpi->rec.data.TSN_seq);
3547 			 */
3548 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3549 				/*
3550 				 * If the queue of send is empty then its
3551 				 * the next sequence number that will be
3552 				 * assigned so we subtract one from this to
3553 				 * get the one we last sent.
3554 				 */
3555 				tp1->rec.data.fast_retran_tsn = sending_seq;
3556 			} else {
3557 				/*
3558 				 * If there are chunks on the send queue
3559 				 * (unsent data that has made it from the
3560 				 * stream queues but not out the door, we
3561 				 * take the first one (which will have the
3562 				 * lowest TSN) and subtract one to get the
3563 				 * one we last sent.
3564 				 */
3565 				struct sctp_tmit_chunk *ttt;
3566 
3567 				ttt = TAILQ_FIRST(&asoc->send_queue);
3568 				tp1->rec.data.fast_retran_tsn =
3569 				    ttt->rec.data.TSN_seq;
3570 			}
3571 
3572 			if (tp1->do_rtt) {
3573 				/*
3574 				 * this guy had a RTO calculation pending on
3575 				 * it, cancel it
3576 				 */
3577 				if ((tp1->whoTo != NULL) &&
3578 				    (tp1->whoTo->rto_needed == 0)) {
3579 					tp1->whoTo->rto_needed = 1;
3580 				}
3581 				tp1->do_rtt = 0;
3582 			}
3583 			if (alt != tp1->whoTo) {
3584 				/* yes, there is an alternate. */
3585 				sctp_free_remote_addr(tp1->whoTo);
3586 				/* sa_ignore FREED_MEMORY */
3587 				tp1->whoTo = alt;
3588 				atomic_add_int(&alt->ref_count, 1);
3589 			}
3590 		}
3591 	}
3592 }
3593 
3594 struct sctp_tmit_chunk *
3595 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3596     struct sctp_association *asoc)
3597 {
3598 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3599 	struct timeval now;
3600 	int now_filled = 0;
3601 
3602 	if (asoc->peer_supports_prsctp == 0) {
3603 		return (NULL);
3604 	}
3605 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3606 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3607 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3608 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3609 			/* no chance to advance, out of here */
3610 			break;
3611 		}
3612 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3613 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3614 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3615 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3616 				    asoc->advanced_peer_ack_point,
3617 				    tp1->rec.data.TSN_seq, 0, 0);
3618 			}
3619 		}
3620 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3621 			/*
3622 			 * We can't fwd-tsn past any that are reliable aka
3623 			 * retransmitted until the asoc fails.
3624 			 */
3625 			break;
3626 		}
3627 		if (!now_filled) {
3628 			(void)SCTP_GETTIME_TIMEVAL(&now);
3629 			now_filled = 1;
3630 		}
3631 		/*
3632 		 * now we got a chunk which is marked for another
3633 		 * retransmission to a PR-stream but has run out its chances
3634 		 * already maybe OR has been marked to skip now. Can we skip
3635 		 * it if its a resend?
3636 		 */
3637 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3638 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3639 			/*
3640 			 * Now is this one marked for resend and its time is
3641 			 * now up?
3642 			 */
3643 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3644 				/* Yes so drop it */
3645 				if (tp1->data) {
3646 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3647 					    1, SCTP_SO_NOT_LOCKED);
3648 				}
3649 			} else {
3650 				/*
3651 				 * No, we are done when hit one for resend
3652 				 * whos time as not expired.
3653 				 */
3654 				break;
3655 			}
3656 		}
3657 		/*
3658 		 * Ok now if this chunk is marked to drop it we can clean up
3659 		 * the chunk, advance our peer ack point and we can check
3660 		 * the next chunk.
3661 		 */
3662 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3663 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3664 			/* advance PeerAckPoint goes forward */
3665 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3666 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3667 				a_adv = tp1;
3668 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3669 				/* No update but we do save the chk */
3670 				a_adv = tp1;
3671 			}
3672 		} else {
3673 			/*
3674 			 * If it is still in RESEND we can advance no
3675 			 * further
3676 			 */
3677 			break;
3678 		}
3679 	}
3680 	return (a_adv);
3681 }
3682 
3683 static int
3684 sctp_fs_audit(struct sctp_association *asoc)
3685 {
3686 	struct sctp_tmit_chunk *chk;
3687 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3688 	int entry_flight, entry_cnt, ret;
3689 
3690 	entry_flight = asoc->total_flight;
3691 	entry_cnt = asoc->total_flight_count;
3692 	ret = 0;
3693 
3694 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3695 		return (0);
3696 
3697 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3698 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3699 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3700 			    chk->rec.data.TSN_seq,
3701 			    chk->send_size,
3702 			    chk->snd_count);
3703 			inflight++;
3704 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3705 			resend++;
3706 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3707 			inbetween++;
3708 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3709 			above++;
3710 		} else {
3711 			acked++;
3712 		}
3713 	}
3714 
3715 	if ((inflight > 0) || (inbetween > 0)) {
3716 #ifdef INVARIANTS
3717 		panic("Flight size-express incorrect? \n");
3718 #else
3719 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3720 		    entry_flight, entry_cnt);
3721 
3722 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3723 		    inflight, inbetween, resend, above, acked);
3724 		ret = 1;
3725 #endif
3726 	}
3727 	return (ret);
3728 }
3729 
3730 
3731 static void
3732 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3733     struct sctp_association *asoc,
3734     struct sctp_tmit_chunk *tp1)
3735 {
3736 	tp1->window_probe = 0;
3737 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3738 		/* TSN's skipped we do NOT move back. */
3739 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3740 		    tp1->whoTo->flight_size,
3741 		    tp1->book_size,
3742 		    (uintptr_t) tp1->whoTo,
3743 		    tp1->rec.data.TSN_seq);
3744 		return;
3745 	}
3746 	/* First setup this by shrinking flight */
3747 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3748 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3749 		    tp1);
3750 	}
3751 	sctp_flight_size_decrease(tp1);
3752 	sctp_total_flight_decrease(stcb, tp1);
3753 	/* Now mark for resend */
3754 	tp1->sent = SCTP_DATAGRAM_RESEND;
3755 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3756 
3757 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3758 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3759 		    tp1->whoTo->flight_size,
3760 		    tp1->book_size,
3761 		    (uintptr_t) tp1->whoTo,
3762 		    tp1->rec.data.TSN_seq);
3763 	}
3764 }
3765 
3766 void
3767 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3768     uint32_t rwnd, int *abort_now, int ecne_seen)
3769 {
3770 	struct sctp_nets *net;
3771 	struct sctp_association *asoc;
3772 	struct sctp_tmit_chunk *tp1, *tp2;
3773 	uint32_t old_rwnd;
3774 	int win_probe_recovery = 0;
3775 	int win_probe_recovered = 0;
3776 	int j, done_once = 0;
3777 	int rto_ok = 1;
3778 
3779 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3780 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3781 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3782 	}
3783 	SCTP_TCB_LOCK_ASSERT(stcb);
3784 #ifdef SCTP_ASOCLOG_OF_TSNS
3785 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3786 	stcb->asoc.cumack_log_at++;
3787 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3788 		stcb->asoc.cumack_log_at = 0;
3789 	}
3790 #endif
3791 	asoc = &stcb->asoc;
3792 	old_rwnd = asoc->peers_rwnd;
3793 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3794 		/* old ack */
3795 		return;
3796 	} else if (asoc->last_acked_seq == cumack) {
3797 		/* Window update sack */
3798 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3799 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3800 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3801 			/* SWS sender side engages */
3802 			asoc->peers_rwnd = 0;
3803 		}
3804 		if (asoc->peers_rwnd > old_rwnd) {
3805 			goto again;
3806 		}
3807 		return;
3808 	}
3809 	/* First setup for CC stuff */
3810 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3811 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3812 			/* Drag along the window_tsn for cwr's */
3813 			net->cwr_window_tsn = cumack;
3814 		}
3815 		net->prev_cwnd = net->cwnd;
3816 		net->net_ack = 0;
3817 		net->net_ack2 = 0;
3818 
3819 		/*
3820 		 * CMT: Reset CUC and Fast recovery algo variables before
3821 		 * SACK processing
3822 		 */
3823 		net->new_pseudo_cumack = 0;
3824 		net->will_exit_fast_recovery = 0;
3825 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3826 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3827 		}
3828 	}
3829 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3830 		uint32_t send_s;
3831 
3832 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3833 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3834 			    sctpchunk_listhead);
3835 			send_s = tp1->rec.data.TSN_seq + 1;
3836 		} else {
3837 			send_s = asoc->sending_seq;
3838 		}
3839 		if (SCTP_TSN_GE(cumack, send_s)) {
3840 #ifndef INVARIANTS
3841 			struct mbuf *oper;
3842 
3843 #endif
3844 #ifdef INVARIANTS
3845 			panic("Impossible sack 1");
3846 #else
3847 
3848 			*abort_now = 1;
3849 			/* XXX */
3850 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3851 			    0, M_NOWAIT, 1, MT_DATA);
3852 			if (oper) {
3853 				struct sctp_paramhdr *ph;
3854 				uint32_t *ippp;
3855 
3856 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3857 				    sizeof(uint32_t);
3858 				ph = mtod(oper, struct sctp_paramhdr *);
3859 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3860 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3861 				ippp = (uint32_t *) (ph + 1);
3862 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3863 			}
3864 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3865 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3866 			return;
3867 #endif
3868 		}
3869 	}
3870 	asoc->this_sack_highest_gap = cumack;
3871 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3872 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3873 		    stcb->asoc.overall_error_count,
3874 		    0,
3875 		    SCTP_FROM_SCTP_INDATA,
3876 		    __LINE__);
3877 	}
3878 	stcb->asoc.overall_error_count = 0;
3879 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3880 		/* process the new consecutive TSN first */
3881 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3882 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3883 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3884 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3885 				}
3886 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3887 					/*
3888 					 * If it is less than ACKED, it is
3889 					 * now no-longer in flight. Higher
3890 					 * values may occur during marking
3891 					 */
3892 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3893 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3894 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3895 							    tp1->whoTo->flight_size,
3896 							    tp1->book_size,
3897 							    (uintptr_t) tp1->whoTo,
3898 							    tp1->rec.data.TSN_seq);
3899 						}
3900 						sctp_flight_size_decrease(tp1);
3901 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3902 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3903 							    tp1);
3904 						}
3905 						/* sa_ignore NO_NULL_CHK */
3906 						sctp_total_flight_decrease(stcb, tp1);
3907 					}
3908 					tp1->whoTo->net_ack += tp1->send_size;
3909 					if (tp1->snd_count < 2) {
3910 						/*
3911 						 * True non-retransmited
3912 						 * chunk
3913 						 */
3914 						tp1->whoTo->net_ack2 +=
3915 						    tp1->send_size;
3916 
3917 						/* update RTO too? */
3918 						if (tp1->do_rtt) {
3919 							if (rto_ok) {
3920 								tp1->whoTo->RTO =
3921 								/*
3922 								 * sa_ignore
3923 								 * NO_NULL_CH
3924 								 * K
3925 								 */
3926 								    sctp_calculate_rto(stcb,
3927 								    asoc, tp1->whoTo,
3928 								    &tp1->sent_rcv_time,
3929 								    sctp_align_safe_nocopy,
3930 								    SCTP_RTT_FROM_DATA);
3931 								rto_ok = 0;
3932 							}
3933 							if (tp1->whoTo->rto_needed == 0) {
3934 								tp1->whoTo->rto_needed = 1;
3935 							}
3936 							tp1->do_rtt = 0;
3937 						}
3938 					}
3939 					/*
3940 					 * CMT: CUCv2 algorithm. From the
3941 					 * cumack'd TSNs, for each TSN being
3942 					 * acked for the first time, set the
3943 					 * following variables for the
3944 					 * corresp destination.
3945 					 * new_pseudo_cumack will trigger a
3946 					 * cwnd update.
3947 					 * find_(rtx_)pseudo_cumack will
3948 					 * trigger search for the next
3949 					 * expected (rtx-)pseudo-cumack.
3950 					 */
3951 					tp1->whoTo->new_pseudo_cumack = 1;
3952 					tp1->whoTo->find_pseudo_cumack = 1;
3953 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3954 
3955 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3956 						/* sa_ignore NO_NULL_CHK */
3957 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3958 					}
3959 				}
3960 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3961 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3962 				}
3963 				if (tp1->rec.data.chunk_was_revoked) {
3964 					/* deflate the cwnd */
3965 					tp1->whoTo->cwnd -= tp1->book_size;
3966 					tp1->rec.data.chunk_was_revoked = 0;
3967 				}
3968 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3969 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3970 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3971 #ifdef INVARIANTS
3972 					} else {
3973 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3974 #endif
3975 					}
3976 				}
3977 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3978 				if (tp1->data) {
3979 					/* sa_ignore NO_NULL_CHK */
3980 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3981 					sctp_m_freem(tp1->data);
3982 					tp1->data = NULL;
3983 				}
3984 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3985 					sctp_log_sack(asoc->last_acked_seq,
3986 					    cumack,
3987 					    tp1->rec.data.TSN_seq,
3988 					    0,
3989 					    0,
3990 					    SCTP_LOG_FREE_SENT);
3991 				}
3992 				asoc->sent_queue_cnt--;
3993 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3994 			} else {
3995 				break;
3996 			}
3997 		}
3998 
3999 	}
4000 	/* sa_ignore NO_NULL_CHK */
4001 	if (stcb->sctp_socket) {
4002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4003 		struct socket *so;
4004 
4005 #endif
4006 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4007 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4008 			/* sa_ignore NO_NULL_CHK */
4009 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4010 		}
4011 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4012 		so = SCTP_INP_SO(stcb->sctp_ep);
4013 		atomic_add_int(&stcb->asoc.refcnt, 1);
4014 		SCTP_TCB_UNLOCK(stcb);
4015 		SCTP_SOCKET_LOCK(so, 1);
4016 		SCTP_TCB_LOCK(stcb);
4017 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4018 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4019 			/* assoc was freed while we were unlocked */
4020 			SCTP_SOCKET_UNLOCK(so, 1);
4021 			return;
4022 		}
4023 #endif
4024 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4025 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4026 		SCTP_SOCKET_UNLOCK(so, 1);
4027 #endif
4028 	} else {
4029 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4030 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4031 		}
4032 	}
4033 
4034 	/* JRS - Use the congestion control given in the CC module */
4035 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4036 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4037 			if (net->net_ack2 > 0) {
4038 				/*
4039 				 * Karn's rule applies to clearing error
4040 				 * count, this is optional.
4041 				 */
4042 				net->error_count = 0;
4043 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4044 					/* addr came good */
4045 					net->dest_state |= SCTP_ADDR_REACHABLE;
4046 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4047 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4048 				}
4049 				if (net == stcb->asoc.primary_destination) {
4050 					if (stcb->asoc.alternate) {
4051 						/*
4052 						 * release the alternate,
4053 						 * primary is good
4054 						 */
4055 						sctp_free_remote_addr(stcb->asoc.alternate);
4056 						stcb->asoc.alternate = NULL;
4057 					}
4058 				}
4059 				if (net->dest_state & SCTP_ADDR_PF) {
4060 					net->dest_state &= ~SCTP_ADDR_PF;
4061 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4062 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4063 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4064 					/* Done with this net */
4065 					net->net_ack = 0;
4066 				}
4067 				/* restore any doubled timers */
4068 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4069 				if (net->RTO < stcb->asoc.minrto) {
4070 					net->RTO = stcb->asoc.minrto;
4071 				}
4072 				if (net->RTO > stcb->asoc.maxrto) {
4073 					net->RTO = stcb->asoc.maxrto;
4074 				}
4075 			}
4076 		}
4077 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4078 	}
4079 	asoc->last_acked_seq = cumack;
4080 
4081 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4082 		/* nothing left in-flight */
4083 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4084 			net->flight_size = 0;
4085 			net->partial_bytes_acked = 0;
4086 		}
4087 		asoc->total_flight = 0;
4088 		asoc->total_flight_count = 0;
4089 	}
4090 	/* RWND update */
4091 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4092 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4093 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4094 		/* SWS sender side engages */
4095 		asoc->peers_rwnd = 0;
4096 	}
4097 	if (asoc->peers_rwnd > old_rwnd) {
4098 		win_probe_recovery = 1;
4099 	}
4100 	/* Now assure a timer where data is queued at */
4101 again:
4102 	j = 0;
4103 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4104 		int to_ticks;
4105 
4106 		if (win_probe_recovery && (net->window_probe)) {
4107 			win_probe_recovered = 1;
4108 			/*
4109 			 * Find first chunk that was used with window probe
4110 			 * and clear the sent
4111 			 */
4112 			/* sa_ignore FREED_MEMORY */
4113 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4114 				if (tp1->window_probe) {
4115 					/* move back to data send queue */
4116 					sctp_window_probe_recovery(stcb, asoc, tp1);
4117 					break;
4118 				}
4119 			}
4120 		}
4121 		if (net->RTO == 0) {
4122 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4123 		} else {
4124 			to_ticks = MSEC_TO_TICKS(net->RTO);
4125 		}
4126 		if (net->flight_size) {
4127 			j++;
4128 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4129 			    sctp_timeout_handler, &net->rxt_timer);
4130 			if (net->window_probe) {
4131 				net->window_probe = 0;
4132 			}
4133 		} else {
4134 			if (net->window_probe) {
4135 				/*
4136 				 * In window probes we must assure a timer
4137 				 * is still running there
4138 				 */
4139 				net->window_probe = 0;
4140 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4141 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4142 					    sctp_timeout_handler, &net->rxt_timer);
4143 				}
4144 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4145 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4146 				    stcb, net,
4147 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4148 			}
4149 		}
4150 	}
4151 	if ((j == 0) &&
4152 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4153 	    (asoc->sent_queue_retran_cnt == 0) &&
4154 	    (win_probe_recovered == 0) &&
4155 	    (done_once == 0)) {
4156 		/*
4157 		 * huh, this should not happen unless all packets are
4158 		 * PR-SCTP and marked to skip of course.
4159 		 */
4160 		if (sctp_fs_audit(asoc)) {
4161 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4162 				net->flight_size = 0;
4163 			}
4164 			asoc->total_flight = 0;
4165 			asoc->total_flight_count = 0;
4166 			asoc->sent_queue_retran_cnt = 0;
4167 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4168 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4169 					sctp_flight_size_increase(tp1);
4170 					sctp_total_flight_increase(stcb, tp1);
4171 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4172 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4173 				}
4174 			}
4175 		}
4176 		done_once = 1;
4177 		goto again;
4178 	}
4179 	/**********************************/
4180 	/* Now what about shutdown issues */
4181 	/**********************************/
4182 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4183 		/* nothing left on sendqueue.. consider done */
4184 		/* clean up */
4185 		if ((asoc->stream_queue_cnt == 1) &&
4186 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4187 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4188 		    (asoc->locked_on_sending)
4189 		    ) {
4190 			struct sctp_stream_queue_pending *sp;
4191 
4192 			/*
4193 			 * I may be in a state where we got all across.. but
4194 			 * cannot write more due to a shutdown... we abort
4195 			 * since the user did not indicate EOR in this case.
4196 			 * The sp will be cleaned during free of the asoc.
4197 			 */
4198 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4199 			    sctp_streamhead);
4200 			if ((sp) && (sp->length == 0)) {
4201 				/* Let cleanup code purge it */
4202 				if (sp->msg_is_complete) {
4203 					asoc->stream_queue_cnt--;
4204 				} else {
4205 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4206 					asoc->locked_on_sending = NULL;
4207 					asoc->stream_queue_cnt--;
4208 				}
4209 			}
4210 		}
4211 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4212 		    (asoc->stream_queue_cnt == 0)) {
4213 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4214 				/* Need to abort here */
4215 				struct mbuf *oper;
4216 
4217 		abort_out_now:
4218 				*abort_now = 1;
4219 				/* XXX */
4220 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4221 				    0, M_NOWAIT, 1, MT_DATA);
4222 				if (oper) {
4223 					struct sctp_paramhdr *ph;
4224 
4225 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4226 					ph = mtod(oper, struct sctp_paramhdr *);
4227 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4228 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4229 				}
4230 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4231 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4232 			} else {
4233 				struct sctp_nets *netp;
4234 
4235 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4236 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4237 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4238 				}
4239 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4240 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4241 				sctp_stop_timers_for_shutdown(stcb);
4242 				if (asoc->alternate) {
4243 					netp = asoc->alternate;
4244 				} else {
4245 					netp = asoc->primary_destination;
4246 				}
4247 				sctp_send_shutdown(stcb, netp);
4248 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4249 				    stcb->sctp_ep, stcb, netp);
4250 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4251 				    stcb->sctp_ep, stcb, netp);
4252 			}
4253 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4254 		    (asoc->stream_queue_cnt == 0)) {
4255 			struct sctp_nets *netp;
4256 
4257 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4258 				goto abort_out_now;
4259 			}
4260 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4261 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4262 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4263 			sctp_stop_timers_for_shutdown(stcb);
4264 			if (asoc->alternate) {
4265 				netp = asoc->alternate;
4266 			} else {
4267 				netp = asoc->primary_destination;
4268 			}
4269 			sctp_send_shutdown_ack(stcb, netp);
4270 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4271 			    stcb->sctp_ep, stcb, netp);
4272 		}
4273 	}
4274 	/*********************************************/
4275 	/* Here we perform PR-SCTP procedures        */
4276 	/* (section 4.2)                             */
4277 	/*********************************************/
4278 	/* C1. update advancedPeerAckPoint */
4279 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4280 		asoc->advanced_peer_ack_point = cumack;
4281 	}
4282 	/* PR-Sctp issues need to be addressed too */
4283 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4284 		struct sctp_tmit_chunk *lchk;
4285 		uint32_t old_adv_peer_ack_point;
4286 
4287 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4288 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4289 		/* C3. See if we need to send a Fwd-TSN */
4290 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4291 			/*
4292 			 * ISSUE with ECN, see FWD-TSN processing.
4293 			 */
4294 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4295 				send_forward_tsn(stcb, asoc);
4296 			} else if (lchk) {
4297 				/* try to FR fwd-tsn's that get lost too */
4298 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4299 					send_forward_tsn(stcb, asoc);
4300 				}
4301 			}
4302 		}
4303 		if (lchk) {
4304 			/* Assure a timer is up */
4305 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4306 			    stcb->sctp_ep, stcb, lchk->whoTo);
4307 		}
4308 	}
4309 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4310 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4311 		    rwnd,
4312 		    stcb->asoc.peers_rwnd,
4313 		    stcb->asoc.total_flight,
4314 		    stcb->asoc.total_output_queue_size);
4315 	}
4316 }
4317 
4318 void
4319 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4320     struct sctp_tcb *stcb,
4321     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4322     int *abort_now, uint8_t flags,
4323     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4324 {
4325 	struct sctp_association *asoc;
4326 	struct sctp_tmit_chunk *tp1, *tp2;
4327 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4328 	uint16_t wake_him = 0;
4329 	uint32_t send_s = 0;
4330 	long j;
4331 	int accum_moved = 0;
4332 	int will_exit_fast_recovery = 0;
4333 	uint32_t a_rwnd, old_rwnd;
4334 	int win_probe_recovery = 0;
4335 	int win_probe_recovered = 0;
4336 	struct sctp_nets *net = NULL;
4337 	int done_once;
4338 	int rto_ok = 1;
4339 	uint8_t reneged_all = 0;
4340 	uint8_t cmt_dac_flag;
4341 
4342 	/*
4343 	 * we take any chance we can to service our queues since we cannot
4344 	 * get awoken when the socket is read from :<
4345 	 */
4346 	/*
4347 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4348 	 * old sack, if so discard. 2) If there is nothing left in the send
4349 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4350 	 * too, update any rwnd change and verify no timers are running.
4351 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4352 	 * moved process these first and note that it moved. 4) Process any
4353 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4354 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4355 	 * sync up flightsizes and things, stop all timers and also check
4356 	 * for shutdown_pending state. If so then go ahead and send off the
4357 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4358 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4359 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4360 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4361 	 * if in shutdown_recv state.
4362 	 */
4363 	SCTP_TCB_LOCK_ASSERT(stcb);
4364 	/* CMT DAC algo */
4365 	this_sack_lowest_newack = 0;
4366 	SCTP_STAT_INCR(sctps_slowpath_sack);
4367 	last_tsn = cum_ack;
4368 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4369 #ifdef SCTP_ASOCLOG_OF_TSNS
4370 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4371 	stcb->asoc.cumack_log_at++;
4372 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4373 		stcb->asoc.cumack_log_at = 0;
4374 	}
4375 #endif
4376 	a_rwnd = rwnd;
4377 
4378 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4379 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4380 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4381 	}
4382 	old_rwnd = stcb->asoc.peers_rwnd;
4383 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4384 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4385 		    stcb->asoc.overall_error_count,
4386 		    0,
4387 		    SCTP_FROM_SCTP_INDATA,
4388 		    __LINE__);
4389 	}
4390 	stcb->asoc.overall_error_count = 0;
4391 	asoc = &stcb->asoc;
4392 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4393 		sctp_log_sack(asoc->last_acked_seq,
4394 		    cum_ack,
4395 		    0,
4396 		    num_seg,
4397 		    num_dup,
4398 		    SCTP_LOG_NEW_SACK);
4399 	}
4400 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4401 		uint16_t i;
4402 		uint32_t *dupdata, dblock;
4403 
4404 		for (i = 0; i < num_dup; i++) {
4405 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4406 			    sizeof(uint32_t), (uint8_t *) & dblock);
4407 			if (dupdata == NULL) {
4408 				break;
4409 			}
4410 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4411 		}
4412 	}
4413 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4414 		/* reality check */
4415 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4416 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4417 			    sctpchunk_listhead);
4418 			send_s = tp1->rec.data.TSN_seq + 1;
4419 		} else {
4420 			tp1 = NULL;
4421 			send_s = asoc->sending_seq;
4422 		}
4423 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4424 			struct mbuf *oper;
4425 
4426 			/*
4427 			 * no way, we have not even sent this TSN out yet.
4428 			 * Peer is hopelessly messed up with us.
4429 			 */
4430 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4431 			    cum_ack, send_s);
4432 			if (tp1) {
4433 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4434 				    tp1->rec.data.TSN_seq, (void *)tp1);
4435 			}
4436 	hopeless_peer:
4437 			*abort_now = 1;
4438 			/* XXX */
4439 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4440 			    0, M_NOWAIT, 1, MT_DATA);
4441 			if (oper) {
4442 				struct sctp_paramhdr *ph;
4443 				uint32_t *ippp;
4444 
4445 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4446 				    sizeof(uint32_t);
4447 				ph = mtod(oper, struct sctp_paramhdr *);
4448 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4449 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4450 				ippp = (uint32_t *) (ph + 1);
4451 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4452 			}
4453 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4454 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4455 			return;
4456 		}
4457 	}
4458 	/**********************/
4459 	/* 1) check the range */
4460 	/**********************/
4461 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4462 		/* acking something behind */
4463 		return;
4464 	}
4465 	/* update the Rwnd of the peer */
4466 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4467 	    TAILQ_EMPTY(&asoc->send_queue) &&
4468 	    (asoc->stream_queue_cnt == 0)) {
4469 		/* nothing left on send/sent and strmq */
4470 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4471 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4472 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4473 		}
4474 		asoc->peers_rwnd = a_rwnd;
4475 		if (asoc->sent_queue_retran_cnt) {
4476 			asoc->sent_queue_retran_cnt = 0;
4477 		}
4478 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4479 			/* SWS sender side engages */
4480 			asoc->peers_rwnd = 0;
4481 		}
4482 		/* stop any timers */
4483 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4484 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4485 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4486 			net->partial_bytes_acked = 0;
4487 			net->flight_size = 0;
4488 		}
4489 		asoc->total_flight = 0;
4490 		asoc->total_flight_count = 0;
4491 		return;
4492 	}
4493 	/*
4494 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4495 	 * things. The total byte count acked is tracked in netAckSz AND
4496 	 * netAck2 is used to track the total bytes acked that are un-
4497 	 * amibguious and were never retransmitted. We track these on a per
4498 	 * destination address basis.
4499 	 */
4500 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4501 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4502 			/* Drag along the window_tsn for cwr's */
4503 			net->cwr_window_tsn = cum_ack;
4504 		}
4505 		net->prev_cwnd = net->cwnd;
4506 		net->net_ack = 0;
4507 		net->net_ack2 = 0;
4508 
4509 		/*
4510 		 * CMT: Reset CUC and Fast recovery algo variables before
4511 		 * SACK processing
4512 		 */
4513 		net->new_pseudo_cumack = 0;
4514 		net->will_exit_fast_recovery = 0;
4515 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4516 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4517 		}
4518 	}
4519 	/* process the new consecutive TSN first */
4520 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4521 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4522 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4523 				accum_moved = 1;
4524 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4525 					/*
4526 					 * If it is less than ACKED, it is
4527 					 * now no-longer in flight. Higher
4528 					 * values may occur during marking
4529 					 */
4530 					if ((tp1->whoTo->dest_state &
4531 					    SCTP_ADDR_UNCONFIRMED) &&
4532 					    (tp1->snd_count < 2)) {
4533 						/*
4534 						 * If there was no retran
4535 						 * and the address is
4536 						 * un-confirmed and we sent
4537 						 * there and are now
4538 						 * sacked.. its confirmed,
4539 						 * mark it so.
4540 						 */
4541 						tp1->whoTo->dest_state &=
4542 						    ~SCTP_ADDR_UNCONFIRMED;
4543 					}
4544 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4545 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4546 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4547 							    tp1->whoTo->flight_size,
4548 							    tp1->book_size,
4549 							    (uintptr_t) tp1->whoTo,
4550 							    tp1->rec.data.TSN_seq);
4551 						}
4552 						sctp_flight_size_decrease(tp1);
4553 						sctp_total_flight_decrease(stcb, tp1);
4554 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4555 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4556 							    tp1);
4557 						}
4558 					}
4559 					tp1->whoTo->net_ack += tp1->send_size;
4560 
4561 					/* CMT SFR and DAC algos */
4562 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4563 					tp1->whoTo->saw_newack = 1;
4564 
4565 					if (tp1->snd_count < 2) {
4566 						/*
4567 						 * True non-retransmited
4568 						 * chunk
4569 						 */
4570 						tp1->whoTo->net_ack2 +=
4571 						    tp1->send_size;
4572 
4573 						/* update RTO too? */
4574 						if (tp1->do_rtt) {
4575 							if (rto_ok) {
4576 								tp1->whoTo->RTO =
4577 								    sctp_calculate_rto(stcb,
4578 								    asoc, tp1->whoTo,
4579 								    &tp1->sent_rcv_time,
4580 								    sctp_align_safe_nocopy,
4581 								    SCTP_RTT_FROM_DATA);
4582 								rto_ok = 0;
4583 							}
4584 							if (tp1->whoTo->rto_needed == 0) {
4585 								tp1->whoTo->rto_needed = 1;
4586 							}
4587 							tp1->do_rtt = 0;
4588 						}
4589 					}
4590 					/*
4591 					 * CMT: CUCv2 algorithm. From the
4592 					 * cumack'd TSNs, for each TSN being
4593 					 * acked for the first time, set the
4594 					 * following variables for the
4595 					 * corresp destination.
4596 					 * new_pseudo_cumack will trigger a
4597 					 * cwnd update.
4598 					 * find_(rtx_)pseudo_cumack will
4599 					 * trigger search for the next
4600 					 * expected (rtx-)pseudo-cumack.
4601 					 */
4602 					tp1->whoTo->new_pseudo_cumack = 1;
4603 					tp1->whoTo->find_pseudo_cumack = 1;
4604 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4605 
4606 
4607 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4608 						sctp_log_sack(asoc->last_acked_seq,
4609 						    cum_ack,
4610 						    tp1->rec.data.TSN_seq,
4611 						    0,
4612 						    0,
4613 						    SCTP_LOG_TSN_ACKED);
4614 					}
4615 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4616 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4617 					}
4618 				}
4619 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4620 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4621 #ifdef SCTP_AUDITING_ENABLED
4622 					sctp_audit_log(0xB3,
4623 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4624 #endif
4625 				}
4626 				if (tp1->rec.data.chunk_was_revoked) {
4627 					/* deflate the cwnd */
4628 					tp1->whoTo->cwnd -= tp1->book_size;
4629 					tp1->rec.data.chunk_was_revoked = 0;
4630 				}
4631 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4632 					tp1->sent = SCTP_DATAGRAM_ACKED;
4633 				}
4634 			}
4635 		} else {
4636 			break;
4637 		}
4638 	}
4639 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4640 	/* always set this up to cum-ack */
4641 	asoc->this_sack_highest_gap = last_tsn;
4642 
4643 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4644 
4645 		/*
4646 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4647 		 * to be greater than the cumack. Also reset saw_newack to 0
4648 		 * for all dests.
4649 		 */
4650 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4651 			net->saw_newack = 0;
4652 			net->this_sack_highest_newack = last_tsn;
4653 		}
4654 
4655 		/*
4656 		 * thisSackHighestGap will increase while handling NEW
4657 		 * segments this_sack_highest_newack will increase while
4658 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4659 		 * used for CMT DAC algo. saw_newack will also change.
4660 		 */
4661 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4662 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4663 		    num_seg, num_nr_seg, &rto_ok)) {
4664 			wake_him++;
4665 		}
4666 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4667 			/*
4668 			 * validate the biggest_tsn_acked in the gap acks if
4669 			 * strict adherence is wanted.
4670 			 */
4671 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4672 				/*
4673 				 * peer is either confused or we are under
4674 				 * attack. We must abort.
4675 				 */
4676 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4677 				    biggest_tsn_acked, send_s);
4678 				goto hopeless_peer;
4679 			}
4680 		}
4681 	}
4682 	/*******************************************/
4683 	/* cancel ALL T3-send timer if accum moved */
4684 	/*******************************************/
4685 	if (asoc->sctp_cmt_on_off > 0) {
4686 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4687 			if (net->new_pseudo_cumack)
4688 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4689 				    stcb, net,
4690 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4691 
4692 		}
4693 	} else {
4694 		if (accum_moved) {
4695 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4696 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4697 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4698 			}
4699 		}
4700 	}
4701 	/********************************************/
4702 	/* drop the acked chunks from the sentqueue */
4703 	/********************************************/
4704 	asoc->last_acked_seq = cum_ack;
4705 
4706 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4707 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4708 			break;
4709 		}
4710 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4711 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4712 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4713 #ifdef INVARIANTS
4714 			} else {
4715 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4716 #endif
4717 			}
4718 		}
4719 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4720 		if (PR_SCTP_ENABLED(tp1->flags)) {
4721 			if (asoc->pr_sctp_cnt != 0)
4722 				asoc->pr_sctp_cnt--;
4723 		}
4724 		asoc->sent_queue_cnt--;
4725 		if (tp1->data) {
4726 			/* sa_ignore NO_NULL_CHK */
4727 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4728 			sctp_m_freem(tp1->data);
4729 			tp1->data = NULL;
4730 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4731 				asoc->sent_queue_cnt_removeable--;
4732 			}
4733 		}
4734 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4735 			sctp_log_sack(asoc->last_acked_seq,
4736 			    cum_ack,
4737 			    tp1->rec.data.TSN_seq,
4738 			    0,
4739 			    0,
4740 			    SCTP_LOG_FREE_SENT);
4741 		}
4742 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4743 		wake_him++;
4744 	}
4745 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4746 #ifdef INVARIANTS
4747 		panic("Warning flight size is postive and should be 0");
4748 #else
4749 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4750 		    asoc->total_flight);
4751 #endif
4752 		asoc->total_flight = 0;
4753 	}
4754 	/* sa_ignore NO_NULL_CHK */
4755 	if ((wake_him) && (stcb->sctp_socket)) {
4756 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4757 		struct socket *so;
4758 
4759 #endif
4760 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4761 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4762 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4763 		}
4764 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4765 		so = SCTP_INP_SO(stcb->sctp_ep);
4766 		atomic_add_int(&stcb->asoc.refcnt, 1);
4767 		SCTP_TCB_UNLOCK(stcb);
4768 		SCTP_SOCKET_LOCK(so, 1);
4769 		SCTP_TCB_LOCK(stcb);
4770 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4771 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4772 			/* assoc was freed while we were unlocked */
4773 			SCTP_SOCKET_UNLOCK(so, 1);
4774 			return;
4775 		}
4776 #endif
4777 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4778 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4779 		SCTP_SOCKET_UNLOCK(so, 1);
4780 #endif
4781 	} else {
4782 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4783 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4784 		}
4785 	}
4786 
4787 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4788 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4789 			/* Setup so we will exit RFC2582 fast recovery */
4790 			will_exit_fast_recovery = 1;
4791 		}
4792 	}
4793 	/*
4794 	 * Check for revoked fragments:
4795 	 *
4796 	 * if Previous sack - Had no frags then we can't have any revoked if
4797 	 * Previous sack - Had frag's then - If we now have frags aka
4798 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4799 	 * some of them. else - The peer revoked all ACKED fragments, since
4800 	 * we had some before and now we have NONE.
4801 	 */
4802 
4803 	if (num_seg) {
4804 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4805 		asoc->saw_sack_with_frags = 1;
4806 	} else if (asoc->saw_sack_with_frags) {
4807 		int cnt_revoked = 0;
4808 
4809 		/* Peer revoked all dg's marked or acked */
4810 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4811 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4812 				tp1->sent = SCTP_DATAGRAM_SENT;
4813 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4814 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4815 					    tp1->whoTo->flight_size,
4816 					    tp1->book_size,
4817 					    (uintptr_t) tp1->whoTo,
4818 					    tp1->rec.data.TSN_seq);
4819 				}
4820 				sctp_flight_size_increase(tp1);
4821 				sctp_total_flight_increase(stcb, tp1);
4822 				tp1->rec.data.chunk_was_revoked = 1;
4823 				/*
4824 				 * To ensure that this increase in
4825 				 * flightsize, which is artificial, does not
4826 				 * throttle the sender, we also increase the
4827 				 * cwnd artificially.
4828 				 */
4829 				tp1->whoTo->cwnd += tp1->book_size;
4830 				cnt_revoked++;
4831 			}
4832 		}
4833 		if (cnt_revoked) {
4834 			reneged_all = 1;
4835 		}
4836 		asoc->saw_sack_with_frags = 0;
4837 	}
4838 	if (num_nr_seg > 0)
4839 		asoc->saw_sack_with_nr_frags = 1;
4840 	else
4841 		asoc->saw_sack_with_nr_frags = 0;
4842 
4843 	/* JRS - Use the congestion control given in the CC module */
4844 	if (ecne_seen == 0) {
4845 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4846 			if (net->net_ack2 > 0) {
4847 				/*
4848 				 * Karn's rule applies to clearing error
4849 				 * count, this is optional.
4850 				 */
4851 				net->error_count = 0;
4852 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4853 					/* addr came good */
4854 					net->dest_state |= SCTP_ADDR_REACHABLE;
4855 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4856 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4857 				}
4858 				if (net == stcb->asoc.primary_destination) {
4859 					if (stcb->asoc.alternate) {
4860 						/*
4861 						 * release the alternate,
4862 						 * primary is good
4863 						 */
4864 						sctp_free_remote_addr(stcb->asoc.alternate);
4865 						stcb->asoc.alternate = NULL;
4866 					}
4867 				}
4868 				if (net->dest_state & SCTP_ADDR_PF) {
4869 					net->dest_state &= ~SCTP_ADDR_PF;
4870 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4871 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4872 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4873 					/* Done with this net */
4874 					net->net_ack = 0;
4875 				}
4876 				/* restore any doubled timers */
4877 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4878 				if (net->RTO < stcb->asoc.minrto) {
4879 					net->RTO = stcb->asoc.minrto;
4880 				}
4881 				if (net->RTO > stcb->asoc.maxrto) {
4882 					net->RTO = stcb->asoc.maxrto;
4883 				}
4884 			}
4885 		}
4886 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4887 	}
4888 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4889 		/* nothing left in-flight */
4890 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4891 			/* stop all timers */
4892 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4893 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4894 			net->flight_size = 0;
4895 			net->partial_bytes_acked = 0;
4896 		}
4897 		asoc->total_flight = 0;
4898 		asoc->total_flight_count = 0;
4899 	}
4900 	/**********************************/
4901 	/* Now what about shutdown issues */
4902 	/**********************************/
4903 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4904 		/* nothing left on sendqueue.. consider done */
4905 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4906 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4907 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4908 		}
4909 		asoc->peers_rwnd = a_rwnd;
4910 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4911 			/* SWS sender side engages */
4912 			asoc->peers_rwnd = 0;
4913 		}
4914 		/* clean up */
4915 		if ((asoc->stream_queue_cnt == 1) &&
4916 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4917 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4918 		    (asoc->locked_on_sending)
4919 		    ) {
4920 			struct sctp_stream_queue_pending *sp;
4921 
4922 			/*
4923 			 * I may be in a state where we got all across.. but
4924 			 * cannot write more due to a shutdown... we abort
4925 			 * since the user did not indicate EOR in this case.
4926 			 */
4927 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4928 			    sctp_streamhead);
4929 			if ((sp) && (sp->length == 0)) {
4930 				asoc->locked_on_sending = NULL;
4931 				if (sp->msg_is_complete) {
4932 					asoc->stream_queue_cnt--;
4933 				} else {
4934 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4935 					asoc->stream_queue_cnt--;
4936 				}
4937 			}
4938 		}
4939 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4940 		    (asoc->stream_queue_cnt == 0)) {
4941 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4942 				/* Need to abort here */
4943 				struct mbuf *oper;
4944 
4945 		abort_out_now:
4946 				*abort_now = 1;
4947 				/* XXX */
4948 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4949 				    0, M_NOWAIT, 1, MT_DATA);
4950 				if (oper) {
4951 					struct sctp_paramhdr *ph;
4952 
4953 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4954 					ph = mtod(oper, struct sctp_paramhdr *);
4955 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4956 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4957 				}
4958 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4959 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4960 				return;
4961 			} else {
4962 				struct sctp_nets *netp;
4963 
4964 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4965 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4966 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4967 				}
4968 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4969 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4970 				sctp_stop_timers_for_shutdown(stcb);
4971 				if (asoc->alternate) {
4972 					netp = asoc->alternate;
4973 				} else {
4974 					netp = asoc->primary_destination;
4975 				}
4976 				sctp_send_shutdown(stcb, netp);
4977 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4978 				    stcb->sctp_ep, stcb, netp);
4979 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4980 				    stcb->sctp_ep, stcb, netp);
4981 			}
4982 			return;
4983 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4984 		    (asoc->stream_queue_cnt == 0)) {
4985 			struct sctp_nets *netp;
4986 
4987 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4988 				goto abort_out_now;
4989 			}
4990 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4991 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4992 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4993 			sctp_stop_timers_for_shutdown(stcb);
4994 			if (asoc->alternate) {
4995 				netp = asoc->alternate;
4996 			} else {
4997 				netp = asoc->primary_destination;
4998 			}
4999 			sctp_send_shutdown_ack(stcb, netp);
5000 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5001 			    stcb->sctp_ep, stcb, netp);
5002 			return;
5003 		}
5004 	}
5005 	/*
5006 	 * Now here we are going to recycle net_ack for a different use...
5007 	 * HEADS UP.
5008 	 */
5009 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5010 		net->net_ack = 0;
5011 	}
5012 
5013 	/*
5014 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5015 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5016 	 * automatically ensure that.
5017 	 */
5018 	if ((asoc->sctp_cmt_on_off > 0) &&
5019 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5020 	    (cmt_dac_flag == 0)) {
5021 		this_sack_lowest_newack = cum_ack;
5022 	}
5023 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5024 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5025 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5026 	}
5027 	/* JRS - Use the congestion control given in the CC module */
5028 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5029 
5030 	/* Now are we exiting loss recovery ? */
5031 	if (will_exit_fast_recovery) {
5032 		/* Ok, we must exit fast recovery */
5033 		asoc->fast_retran_loss_recovery = 0;
5034 	}
5035 	if ((asoc->sat_t3_loss_recovery) &&
5036 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5037 		/* end satellite t3 loss recovery */
5038 		asoc->sat_t3_loss_recovery = 0;
5039 	}
5040 	/*
5041 	 * CMT Fast recovery
5042 	 */
5043 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5044 		if (net->will_exit_fast_recovery) {
5045 			/* Ok, we must exit fast recovery */
5046 			net->fast_retran_loss_recovery = 0;
5047 		}
5048 	}
5049 
5050 	/* Adjust and set the new rwnd value */
5051 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5052 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5053 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5054 	}
5055 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5056 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5057 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5058 		/* SWS sender side engages */
5059 		asoc->peers_rwnd = 0;
5060 	}
5061 	if (asoc->peers_rwnd > old_rwnd) {
5062 		win_probe_recovery = 1;
5063 	}
5064 	/*
5065 	 * Now we must setup so we have a timer up for anyone with
5066 	 * outstanding data.
5067 	 */
5068 	done_once = 0;
5069 again:
5070 	j = 0;
5071 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5072 		if (win_probe_recovery && (net->window_probe)) {
5073 			win_probe_recovered = 1;
5074 			/*-
5075 			 * Find first chunk that was used with
5076 			 * window probe and clear the event. Put
5077 			 * it back into the send queue as if has
5078 			 * not been sent.
5079 			 */
5080 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5081 				if (tp1->window_probe) {
5082 					sctp_window_probe_recovery(stcb, asoc, tp1);
5083 					break;
5084 				}
5085 			}
5086 		}
5087 		if (net->flight_size) {
5088 			j++;
5089 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5090 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5091 				    stcb->sctp_ep, stcb, net);
5092 			}
5093 			if (net->window_probe) {
5094 				net->window_probe = 0;
5095 			}
5096 		} else {
5097 			if (net->window_probe) {
5098 				/*
5099 				 * In window probes we must assure a timer
5100 				 * is still running there
5101 				 */
5102 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5103 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5104 					    stcb->sctp_ep, stcb, net);
5105 
5106 				}
5107 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5108 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5109 				    stcb, net,
5110 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5111 			}
5112 		}
5113 	}
5114 	if ((j == 0) &&
5115 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5116 	    (asoc->sent_queue_retran_cnt == 0) &&
5117 	    (win_probe_recovered == 0) &&
5118 	    (done_once == 0)) {
5119 		/*
5120 		 * huh, this should not happen unless all packets are
5121 		 * PR-SCTP and marked to skip of course.
5122 		 */
5123 		if (sctp_fs_audit(asoc)) {
5124 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5125 				net->flight_size = 0;
5126 			}
5127 			asoc->total_flight = 0;
5128 			asoc->total_flight_count = 0;
5129 			asoc->sent_queue_retran_cnt = 0;
5130 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5131 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5132 					sctp_flight_size_increase(tp1);
5133 					sctp_total_flight_increase(stcb, tp1);
5134 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5135 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5136 				}
5137 			}
5138 		}
5139 		done_once = 1;
5140 		goto again;
5141 	}
5142 	/*********************************************/
5143 	/* Here we perform PR-SCTP procedures        */
5144 	/* (section 4.2)                             */
5145 	/*********************************************/
5146 	/* C1. update advancedPeerAckPoint */
5147 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5148 		asoc->advanced_peer_ack_point = cum_ack;
5149 	}
5150 	/* C2. try to further move advancedPeerAckPoint ahead */
5151 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5152 		struct sctp_tmit_chunk *lchk;
5153 		uint32_t old_adv_peer_ack_point;
5154 
5155 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5156 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5157 		/* C3. See if we need to send a Fwd-TSN */
5158 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5159 			/*
5160 			 * ISSUE with ECN, see FWD-TSN processing.
5161 			 */
5162 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5163 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5164 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5165 				    old_adv_peer_ack_point);
5166 			}
5167 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5168 				send_forward_tsn(stcb, asoc);
5169 			} else if (lchk) {
5170 				/* try to FR fwd-tsn's that get lost too */
5171 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5172 					send_forward_tsn(stcb, asoc);
5173 				}
5174 			}
5175 		}
5176 		if (lchk) {
5177 			/* Assure a timer is up */
5178 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5179 			    stcb->sctp_ep, stcb, lchk->whoTo);
5180 		}
5181 	}
5182 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5183 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5184 		    a_rwnd,
5185 		    stcb->asoc.peers_rwnd,
5186 		    stcb->asoc.total_flight,
5187 		    stcb->asoc.total_output_queue_size);
5188 	}
5189 }
5190 
5191 void
5192 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5193 {
5194 	/* Copy cum-ack */
5195 	uint32_t cum_ack, a_rwnd;
5196 
5197 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5198 	/* Arrange so a_rwnd does NOT change */
5199 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5200 
5201 	/* Now call the express sack handling */
5202 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5203 }
5204 
5205 static void
5206 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5207     struct sctp_stream_in *strmin)
5208 {
5209 	struct sctp_queued_to_read *ctl, *nctl;
5210 	struct sctp_association *asoc;
5211 	uint16_t tt;
5212 
5213 	asoc = &stcb->asoc;
5214 	tt = strmin->last_sequence_delivered;
5215 	/*
5216 	 * First deliver anything prior to and including the stream no that
5217 	 * came in
5218 	 */
5219 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5220 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5221 			/* this is deliverable now */
5222 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5223 			/* subtract pending on streams */
5224 			asoc->size_on_all_streams -= ctl->length;
5225 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5226 			/* deliver it to at least the delivery-q */
5227 			if (stcb->sctp_socket) {
5228 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5229 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5230 				    ctl,
5231 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5232 			}
5233 		} else {
5234 			/* no more delivery now. */
5235 			break;
5236 		}
5237 	}
5238 	/*
5239 	 * now we must deliver things in queue the normal way  if any are
5240 	 * now ready.
5241 	 */
5242 	tt = strmin->last_sequence_delivered + 1;
5243 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5244 		if (tt == ctl->sinfo_ssn) {
5245 			/* this is deliverable now */
5246 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5247 			/* subtract pending on streams */
5248 			asoc->size_on_all_streams -= ctl->length;
5249 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5250 			/* deliver it to at least the delivery-q */
5251 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5252 			if (stcb->sctp_socket) {
5253 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5254 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5255 				    ctl,
5256 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5257 
5258 			}
5259 			tt = strmin->last_sequence_delivered + 1;
5260 		} else {
5261 			break;
5262 		}
5263 	}
5264 }
5265 
5266 static void
5267 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5268     struct sctp_association *asoc,
5269     uint16_t stream, uint16_t seq)
5270 {
5271 	struct sctp_tmit_chunk *chk, *nchk;
5272 
5273 	/* For each one on here see if we need to toss it */
5274 	/*
5275 	 * For now large messages held on the reasmqueue that are complete
5276 	 * will be tossed too. We could in theory do more work to spin
5277 	 * through and stop after dumping one msg aka seeing the start of a
5278 	 * new msg at the head, and call the delivery function... to see if
5279 	 * it can be delivered... But for now we just dump everything on the
5280 	 * queue.
5281 	 */
5282 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5283 		/*
5284 		 * Do not toss it if on a different stream or marked for
5285 		 * unordered delivery in which case the stream sequence
5286 		 * number has no meaning.
5287 		 */
5288 		if ((chk->rec.data.stream_number != stream) ||
5289 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5290 			continue;
5291 		}
5292 		if (chk->rec.data.stream_seq == seq) {
5293 			/* It needs to be tossed */
5294 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5295 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5296 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5297 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5298 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5299 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5300 			}
5301 			asoc->size_on_reasm_queue -= chk->send_size;
5302 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5303 
5304 			/* Clear up any stream problem */
5305 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5306 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5307 				/*
5308 				 * We must dump forward this streams
5309 				 * sequence number if the chunk is not
5310 				 * unordered that is being skipped. There is
5311 				 * a chance that if the peer does not
5312 				 * include the last fragment in its FWD-TSN
5313 				 * we WILL have a problem here since you
5314 				 * would have a partial chunk in queue that
5315 				 * may not be deliverable. Also if a Partial
5316 				 * delivery API as started the user may get
5317 				 * a partial chunk. The next read returning
5318 				 * a new chunk... really ugly but I see no
5319 				 * way around it! Maybe a notify??
5320 				 */
5321 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5322 			}
5323 			if (chk->data) {
5324 				sctp_m_freem(chk->data);
5325 				chk->data = NULL;
5326 			}
5327 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5328 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5329 			/*
5330 			 * If the stream_seq is > than the purging one, we
5331 			 * are done
5332 			 */
5333 			break;
5334 		}
5335 	}
5336 }
5337 
5338 
5339 void
5340 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5341     struct sctp_forward_tsn_chunk *fwd,
5342     int *abort_flag, struct mbuf *m, int offset)
5343 {
5344 	/* The pr-sctp fwd tsn */
5345 	/*
5346 	 * here we will perform all the data receiver side steps for
5347 	 * processing FwdTSN, as required in by pr-sctp draft:
5348 	 *
5349 	 * Assume we get FwdTSN(x):
5350 	 *
5351 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5352 	 * others we have 3) examine and update re-ordering queue on
5353 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5354 	 * report where we are.
5355 	 */
5356 	struct sctp_association *asoc;
5357 	uint32_t new_cum_tsn, gap;
5358 	unsigned int i, fwd_sz, m_size;
5359 	uint32_t str_seq;
5360 	struct sctp_stream_in *strm;
5361 	struct sctp_tmit_chunk *chk, *nchk;
5362 	struct sctp_queued_to_read *ctl, *sv;
5363 
5364 	asoc = &stcb->asoc;
5365 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5366 		SCTPDBG(SCTP_DEBUG_INDATA1,
5367 		    "Bad size too small/big fwd-tsn\n");
5368 		return;
5369 	}
5370 	m_size = (stcb->asoc.mapping_array_size << 3);
5371 	/*************************************************************/
5372 	/* 1. Here we update local cumTSN and shift the bitmap array */
5373 	/*************************************************************/
5374 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5375 
5376 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5377 		/* Already got there ... */
5378 		return;
5379 	}
5380 	/*
5381 	 * now we know the new TSN is more advanced, let's find the actual
5382 	 * gap
5383 	 */
5384 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5385 	asoc->cumulative_tsn = new_cum_tsn;
5386 	if (gap >= m_size) {
5387 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5388 			struct mbuf *oper;
5389 
5390 			/*
5391 			 * out of range (of single byte chunks in the rwnd I
5392 			 * give out). This must be an attacker.
5393 			 */
5394 			*abort_flag = 1;
5395 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5396 			    0, M_NOWAIT, 1, MT_DATA);
5397 			if (oper) {
5398 				struct sctp_paramhdr *ph;
5399 				uint32_t *ippp;
5400 
5401 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5402 				    (sizeof(uint32_t) * 3);
5403 				ph = mtod(oper, struct sctp_paramhdr *);
5404 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5405 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5406 				ippp = (uint32_t *) (ph + 1);
5407 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5408 				ippp++;
5409 				*ippp = asoc->highest_tsn_inside_map;
5410 				ippp++;
5411 				*ippp = new_cum_tsn;
5412 			}
5413 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5414 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5415 			return;
5416 		}
5417 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5418 
5419 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5420 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5421 		asoc->highest_tsn_inside_map = new_cum_tsn;
5422 
5423 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5424 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5425 
5426 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5427 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5428 		}
5429 	} else {
5430 		SCTP_TCB_LOCK_ASSERT(stcb);
5431 		for (i = 0; i <= gap; i++) {
5432 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5433 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5434 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5435 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5436 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5437 				}
5438 			}
5439 		}
5440 	}
5441 	/*************************************************************/
5442 	/* 2. Clear up re-assembly queue                             */
5443 	/*************************************************************/
5444 	/*
5445 	 * First service it if pd-api is up, just in case we can progress it
5446 	 * forward
5447 	 */
5448 	if (asoc->fragmented_delivery_inprogress) {
5449 		sctp_service_reassembly(stcb, asoc);
5450 	}
5451 	/* For each one on here see if we need to toss it */
5452 	/*
5453 	 * For now large messages held on the reasmqueue that are complete
5454 	 * will be tossed too. We could in theory do more work to spin
5455 	 * through and stop after dumping one msg aka seeing the start of a
5456 	 * new msg at the head, and call the delivery function... to see if
5457 	 * it can be delivered... But for now we just dump everything on the
5458 	 * queue.
5459 	 */
5460 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5461 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5462 			/* It needs to be tossed */
5463 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5464 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5465 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5466 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5467 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5468 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5469 			}
5470 			asoc->size_on_reasm_queue -= chk->send_size;
5471 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5472 
5473 			/* Clear up any stream problem */
5474 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5475 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5476 				/*
5477 				 * We must dump forward this streams
5478 				 * sequence number if the chunk is not
5479 				 * unordered that is being skipped. There is
5480 				 * a chance that if the peer does not
5481 				 * include the last fragment in its FWD-TSN
5482 				 * we WILL have a problem here since you
5483 				 * would have a partial chunk in queue that
5484 				 * may not be deliverable. Also if a Partial
5485 				 * delivery API as started the user may get
5486 				 * a partial chunk. The next read returning
5487 				 * a new chunk... really ugly but I see no
5488 				 * way around it! Maybe a notify??
5489 				 */
5490 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5491 			}
5492 			if (chk->data) {
5493 				sctp_m_freem(chk->data);
5494 				chk->data = NULL;
5495 			}
5496 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5497 		} else {
5498 			/*
5499 			 * Ok we have gone beyond the end of the fwd-tsn's
5500 			 * mark.
5501 			 */
5502 			break;
5503 		}
5504 	}
5505 	/*******************************************************/
5506 	/* 3. Update the PR-stream re-ordering queues and fix  */
5507 	/* delivery issues as needed.                       */
5508 	/*******************************************************/
5509 	fwd_sz -= sizeof(*fwd);
5510 	if (m && fwd_sz) {
5511 		/* New method. */
5512 		unsigned int num_str;
5513 		struct sctp_strseq *stseq, strseqbuf;
5514 
5515 		offset += sizeof(*fwd);
5516 
5517 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5518 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5519 		for (i = 0; i < num_str; i++) {
5520 			uint16_t st;
5521 
5522 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5523 			    sizeof(struct sctp_strseq),
5524 			    (uint8_t *) & strseqbuf);
5525 			offset += sizeof(struct sctp_strseq);
5526 			if (stseq == NULL) {
5527 				break;
5528 			}
5529 			/* Convert */
5530 			st = ntohs(stseq->stream);
5531 			stseq->stream = st;
5532 			st = ntohs(stseq->sequence);
5533 			stseq->sequence = st;
5534 
5535 			/* now process */
5536 
5537 			/*
5538 			 * Ok we now look for the stream/seq on the read
5539 			 * queue where its not all delivered. If we find it
5540 			 * we transmute the read entry into a PDI_ABORTED.
5541 			 */
5542 			if (stseq->stream >= asoc->streamincnt) {
5543 				/* screwed up streams, stop!  */
5544 				break;
5545 			}
5546 			if ((asoc->str_of_pdapi == stseq->stream) &&
5547 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5548 				/*
5549 				 * If this is the one we were partially
5550 				 * delivering now then we no longer are.
5551 				 * Note this will change with the reassembly
5552 				 * re-write.
5553 				 */
5554 				asoc->fragmented_delivery_inprogress = 0;
5555 			}
5556 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5557 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5558 				if ((ctl->sinfo_stream == stseq->stream) &&
5559 				    (ctl->sinfo_ssn == stseq->sequence)) {
5560 					str_seq = (stseq->stream << 16) | stseq->sequence;
5561 					ctl->end_added = 1;
5562 					ctl->pdapi_aborted = 1;
5563 					sv = stcb->asoc.control_pdapi;
5564 					stcb->asoc.control_pdapi = ctl;
5565 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5566 					    stcb,
5567 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5568 					    (void *)&str_seq,
5569 					    SCTP_SO_NOT_LOCKED);
5570 					stcb->asoc.control_pdapi = sv;
5571 					break;
5572 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5573 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5574 					/* We are past our victim SSN */
5575 					break;
5576 				}
5577 			}
5578 			strm = &asoc->strmin[stseq->stream];
5579 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5580 				/* Update the sequence number */
5581 				strm->last_sequence_delivered = stseq->sequence;
5582 			}
5583 			/* now kick the stream the new way */
5584 			/* sa_ignore NO_NULL_CHK */
5585 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5586 		}
5587 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5588 	}
5589 	/*
5590 	 * Now slide thing forward.
5591 	 */
5592 	sctp_slide_mapping_arrays(stcb);
5593 
5594 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5595 		/* now lets kick out and check for more fragmented delivery */
5596 		/* sa_ignore NO_NULL_CHK */
5597 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5598 	}
5599 }
5600