xref: /freebsd/sys/netinet/sctp_indata.c (revision 641a6cfb86023499caafe26a4d821a0b885cf00b)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 		cmh->cmsg_level = IPPROTO_SCTP;
255 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 		cmh->cmsg_type = SCTP_RCVINFO;
257 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 		rcvinfo->rcv_context = sinfo->sinfo_context;
265 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268 	}
269 	if (provide_nxt) {
270 		cmh->cmsg_level = IPPROTO_SCTP;
271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 		cmh->cmsg_type = SCTP_NXTINFO;
273 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 		nxtinfo->nxt_flags = 0;
276 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278 		}
279 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281 		}
282 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284 		}
285 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/*
322 		 * this tsn is behind the cum ack and thus we don't need to
323 		 * worry about it being moved from one to the other.
324 		 */
325 		return;
326 	}
327 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 		sctp_print_mapping_array(asoc);
331 #ifdef INVARIANTS
332 		panic("Things are really messed up now!!");
333 #endif
334 	}
335 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 		asoc->highest_tsn_inside_nr_map = tsn;
339 	}
340 	if (tsn == asoc->highest_tsn_inside_map) {
341 		/* We must back down to see what the new highest is */
342 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 				asoc->highest_tsn_inside_map = i;
346 				fnd = 1;
347 				break;
348 			}
349 		}
350 		if (!fnd) {
351 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352 		}
353 	}
354 }
355 
356 
357 /*
358  * We are delivering currently from the reassembly queue. We must continue to
359  * deliver until we either: 1) run out of space. 2) run out of sequential
360  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361  */
362 static void
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364 {
365 	struct sctp_tmit_chunk *chk, *nchk;
366 	uint16_t nxt_todel;
367 	uint16_t stream_no;
368 	int end = 0;
369 	int cntDel;
370 	struct sctp_queued_to_read *control, *ctl, *nctl;
371 
372 	if (stcb == NULL)
373 		return;
374 
375 	cntDel = stream_no = 0;
376 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 		/* socket above is long gone or going.. */
380 abandon:
381 		asoc->fragmented_delivery_inprogress = 0;
382 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 			asoc->size_on_reasm_queue -= chk->send_size;
385 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386 			/*
387 			 * Lose the data pointer, since its in the socket
388 			 * buffer
389 			 */
390 			if (chk->data) {
391 				sctp_m_freem(chk->data);
392 				chk->data = NULL;
393 			}
394 			/* Now free the address and data */
395 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 			/* sa_ignore FREED_MEMORY */
397 		}
398 		return;
399 	}
400 	SCTP_TCB_LOCK_ASSERT(stcb);
401 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 			/* Can't deliver more :< */
404 			return;
405 		}
406 		stream_no = chk->rec.data.stream_number;
407 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 		if (nxt_todel != chk->rec.data.stream_seq &&
409 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410 			/*
411 			 * Not the next sequence to deliver in its stream OR
412 			 * unordered
413 			 */
414 			return;
415 		}
416 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417 
418 			control = sctp_build_readq_entry_chk(stcb, chk);
419 			if (control == NULL) {
420 				/* out of memory? */
421 				return;
422 			}
423 			/* save it off for our future deliveries */
424 			stcb->asoc.control_pdapi = control;
425 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 				end = 1;
427 			else
428 				end = 0;
429 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 			sctp_add_to_readq(stcb->sctp_ep,
431 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433 			cntDel++;
434 		} else {
435 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436 				end = 1;
437 			else
438 				end = 0;
439 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 			    stcb->asoc.control_pdapi,
442 			    chk->data, end, chk->rec.data.TSN_seq,
443 			    &stcb->sctp_socket->so_rcv)) {
444 				/*
445 				 * something is very wrong, either
446 				 * control_pdapi is NULL, or the tail_mbuf
447 				 * is corrupt, or there is a EOM already on
448 				 * the mbuf chain.
449 				 */
450 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451 					goto abandon;
452 				} else {
453 #ifdef INVARIANTS
454 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 						panic("This should not happen control_pdapi NULL?");
456 					}
457 					/* if we did not panic, it was a EOM */
458 					panic("Bad chunking ??");
459 #else
460 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462 					}
463 					SCTP_PRINTF("Bad chunking ??\n");
464 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465 
466 #endif
467 					goto abandon;
468 				}
469 			}
470 			cntDel++;
471 		}
472 		/* pull it we did it */
473 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 			asoc->fragmented_delivery_inprogress = 0;
476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 				asoc->strmin[stream_no].last_sequence_delivered++;
478 			}
479 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481 			}
482 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483 			/*
484 			 * turn the flag back on since we just  delivered
485 			 * yet another one.
486 			 */
487 			asoc->fragmented_delivery_inprogress = 1;
488 		}
489 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493 
494 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->size_on_reasm_queue -= chk->send_size;
496 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 		/* free up the chk */
498 		chk->data = NULL;
499 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500 
501 		if (asoc->fragmented_delivery_inprogress == 0) {
502 			/*
503 			 * Now lets see if we can deliver the next one on
504 			 * the stream
505 			 */
506 			struct sctp_stream_in *strm;
507 
508 			strm = &asoc->strmin[stream_no];
509 			nxt_todel = strm->last_sequence_delivered + 1;
510 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 				/* Deliver more if we can. */
512 				if (nxt_todel == ctl->sinfo_ssn) {
513 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 					asoc->size_on_all_streams -= ctl->length;
515 					sctp_ucount_decr(asoc->cnt_on_all_streams);
516 					strm->last_sequence_delivered++;
517 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 					sctp_add_to_readq(stcb->sctp_ep, stcb,
519 					    ctl,
520 					    &stcb->sctp_socket->so_rcv, 1,
521 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 				} else {
523 					break;
524 				}
525 				nxt_todel = strm->last_sequence_delivered + 1;
526 			}
527 			break;
528 		}
529 	}
530 }
531 
532 /*
533  * Queue the chunk either right into the socket buffer if it is the next one
534  * to go OR put it in the correct place in the delivery queue.  If we do
535  * append to the so_buf, keep doing so until we are out of order. One big
536  * question still remains, what to do when the socket buffer is FULL??
537  */
538 static void
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540     struct sctp_queued_to_read *control, int *abort_flag)
541 {
542 	/*
543 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 	 * all the data in one stream this could happen quite rapidly. One
545 	 * could use the TSN to keep track of things, but this scheme breaks
546 	 * down in the other type of stream useage that could occur. Send a
547 	 * single msg to stream 0, send 4Billion messages to stream 1, now
548 	 * send a message to stream 0. You have a situation where the TSN
549 	 * has wrapped but not in the stream. Is this worth worrying about
550 	 * or should we just change our queue sort at the bottom to be by
551 	 * TSN.
552 	 *
553 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 	 * assignment this could happen... and I don't see how this would be
556 	 * a violation. So for now I am undecided an will leave the sort by
557 	 * SSN alone. Maybe a hybred approach is the answer
558 	 *
559 	 */
560 	struct sctp_stream_in *strm;
561 	struct sctp_queued_to_read *at;
562 	int queue_needed;
563 	uint16_t nxt_todel;
564 	struct mbuf *oper;
565 
566 	queue_needed = 1;
567 	asoc->size_on_all_streams += control->length;
568 	sctp_ucount_incr(asoc->cnt_on_all_streams);
569 	strm = &asoc->strmin[control->sinfo_stream];
570 	nxt_todel = strm->last_sequence_delivered + 1;
571 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
573 	}
574 	SCTPDBG(SCTP_DEBUG_INDATA1,
575 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 	    (uint32_t) control->sinfo_stream,
577 	    (uint32_t) strm->last_sequence_delivered,
578 	    (uint32_t) nxt_todel);
579 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 		/* The incoming sseq is behind where we last delivered? */
581 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
582 		    control->sinfo_ssn, strm->last_sequence_delivered);
583 protocol_error:
584 		/*
585 		 * throw it in the stream so it gets cleaned up in
586 		 * association destruction
587 		 */
588 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 		    0, M_DONTWAIT, 1, MT_DATA);
591 		if (oper) {
592 			struct sctp_paramhdr *ph;
593 			uint32_t *ippp;
594 
595 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 			    (sizeof(uint32_t) * 3);
597 			ph = mtod(oper, struct sctp_paramhdr *);
598 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 			ph->param_length = htons(SCTP_BUF_LEN(oper));
600 			ippp = (uint32_t *) (ph + 1);
601 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
602 			ippp++;
603 			*ippp = control->sinfo_tsn;
604 			ippp++;
605 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
606 		}
607 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
630 			/* all delivered */
631 			nxt_todel = strm->last_sequence_delivered + 1;
632 			if (nxt_todel == control->sinfo_ssn) {
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				continue;
654 			}
655 			break;
656 		}
657 	}
658 	if (queue_needed) {
659 		/*
660 		 * Ok, we did not deliver this guy, find the correct place
661 		 * to put it on the queue.
662 		 */
663 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
664 			goto protocol_error;
665 		}
666 		if (TAILQ_EMPTY(&strm->inqueue)) {
667 			/* Empty queue */
668 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
670 			}
671 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
672 		} else {
673 			TAILQ_FOREACH(at, &strm->inqueue, next) {
674 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
675 					/*
676 					 * one in queue is bigger than the
677 					 * new one, insert before this one
678 					 */
679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 						sctp_log_strm_del(control, at,
681 						    SCTP_STR_LOG_FROM_INSERT_MD);
682 					}
683 					TAILQ_INSERT_BEFORE(at, control, next);
684 					break;
685 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
686 					/*
687 					 * Gak, He sent me a duplicate str
688 					 * seq number
689 					 */
690 					/*
691 					 * foo bar, I guess I will just free
692 					 * this new guy, should we abort
693 					 * too? FIX ME MAYBE? Or it COULD be
694 					 * that the SSN's have wrapped.
695 					 * Maybe I should compare to TSN
696 					 * somehow... sigh for now just blow
697 					 * away the chunk!
698 					 */
699 
700 					if (control->data)
701 						sctp_m_freem(control->data);
702 					control->data = NULL;
703 					asoc->size_on_all_streams -= control->length;
704 					sctp_ucount_decr(asoc->cnt_on_all_streams);
705 					if (control->whoFrom) {
706 						sctp_free_remote_addr(control->whoFrom);
707 						control->whoFrom = NULL;
708 					}
709 					sctp_free_a_readq(stcb, control);
710 					return;
711 				} else {
712 					if (TAILQ_NEXT(at, next) == NULL) {
713 						/*
714 						 * We are at the end, insert
715 						 * it after this one
716 						 */
717 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 							sctp_log_strm_del(control, at,
719 							    SCTP_STR_LOG_FROM_INSERT_TL);
720 						}
721 						TAILQ_INSERT_AFTER(&strm->inqueue,
722 						    at, control, next);
723 						break;
724 					}
725 				}
726 			}
727 		}
728 	}
729 }
730 
731 /*
732  * Returns two things: You get the total size of the deliverable parts of the
733  * first fragmented message on the reassembly queue. And you get a 1 back if
734  * all of the message is ready or a 0 back if the message is still incomplete
735  */
736 static int
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
738 {
739 	struct sctp_tmit_chunk *chk;
740 	uint32_t tsn;
741 
742 	*t_size = 0;
743 	chk = TAILQ_FIRST(&asoc->reasmqueue);
744 	if (chk == NULL) {
745 		/* nothing on the queue */
746 		return (0);
747 	}
748 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 		/* Not a first on the queue */
750 		return (0);
751 	}
752 	tsn = chk->rec.data.TSN_seq;
753 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 		if (tsn != chk->rec.data.TSN_seq) {
755 			return (0);
756 		}
757 		*t_size += chk->send_size;
758 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
759 			return (1);
760 		}
761 		tsn++;
762 	}
763 	return (0);
764 }
765 
766 static void
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
768 {
769 	struct sctp_tmit_chunk *chk;
770 	uint16_t nxt_todel;
771 	uint32_t tsize, pd_point;
772 
773 doit_again:
774 	chk = TAILQ_FIRST(&asoc->reasmqueue);
775 	if (chk == NULL) {
776 		/* Huh? */
777 		asoc->size_on_reasm_queue = 0;
778 		asoc->cnt_on_reasm_queue = 0;
779 		return;
780 	}
781 	if (asoc->fragmented_delivery_inprogress == 0) {
782 		nxt_todel =
783 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 		    (nxt_todel == chk->rec.data.stream_seq ||
786 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
787 			/*
788 			 * Yep the first one is here and its ok to deliver
789 			 * but should we?
790 			 */
791 			if (stcb->sctp_socket) {
792 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
793 				    stcb->sctp_ep->partial_delivery_point);
794 			} else {
795 				pd_point = stcb->sctp_ep->partial_delivery_point;
796 			}
797 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
798 
799 				/*
800 				 * Yes, we setup to start reception, by
801 				 * backing down the TSN just in case we
802 				 * can't deliver. If we
803 				 */
804 				asoc->fragmented_delivery_inprogress = 1;
805 				asoc->tsn_last_delivered =
806 				    chk->rec.data.TSN_seq - 1;
807 				asoc->str_of_pdapi =
808 				    chk->rec.data.stream_number;
809 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
810 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
811 				asoc->fragment_flags = chk->rec.data.rcv_flags;
812 				sctp_service_reassembly(stcb, asoc);
813 			}
814 		}
815 	} else {
816 		/*
817 		 * Service re-assembly will deliver stream data queued at
818 		 * the end of fragmented delivery.. but it wont know to go
819 		 * back and call itself again... we do that here with the
820 		 * got doit_again
821 		 */
822 		sctp_service_reassembly(stcb, asoc);
823 		if (asoc->fragmented_delivery_inprogress == 0) {
824 			/*
825 			 * finished our Fragmented delivery, could be more
826 			 * waiting?
827 			 */
828 			goto doit_again;
829 		}
830 	}
831 }
832 
833 /*
834  * Dump onto the re-assembly queue, in its proper place. After dumping on the
835  * queue, see if anthing can be delivered. If so pull it off (or as much as
836  * we can. If we run out of space then we must dump what we can and set the
837  * appropriate flag to say we queued what we could.
838  */
839 static void
840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841     struct sctp_tmit_chunk *chk, int *abort_flag)
842 {
843 	struct mbuf *oper;
844 	uint32_t cum_ackp1, prev_tsn, post_tsn;
845 	struct sctp_tmit_chunk *at, *prev, *next;
846 
847 	prev = next = NULL;
848 	cum_ackp1 = asoc->tsn_last_delivered + 1;
849 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
850 		/* This is the first one on the queue */
851 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852 		/*
853 		 * we do not check for delivery of anything when only one
854 		 * fragment is here
855 		 */
856 		asoc->size_on_reasm_queue = chk->send_size;
857 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
858 		if (chk->rec.data.TSN_seq == cum_ackp1) {
859 			if (asoc->fragmented_delivery_inprogress == 0 &&
860 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
861 			    SCTP_DATA_FIRST_FRAG) {
862 				/*
863 				 * An empty queue, no delivery inprogress,
864 				 * we hit the next one and it does NOT have
865 				 * a FIRST fragment mark.
866 				 */
867 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
868 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869 				    0, M_DONTWAIT, 1, MT_DATA);
870 
871 				if (oper) {
872 					struct sctp_paramhdr *ph;
873 					uint32_t *ippp;
874 
875 					SCTP_BUF_LEN(oper) =
876 					    sizeof(struct sctp_paramhdr) +
877 					    (sizeof(uint32_t) * 3);
878 					ph = mtod(oper, struct sctp_paramhdr *);
879 					ph->param_type =
880 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881 					ph->param_length = htons(SCTP_BUF_LEN(oper));
882 					ippp = (uint32_t *) (ph + 1);
883 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884 					ippp++;
885 					*ippp = chk->rec.data.TSN_seq;
886 					ippp++;
887 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
888 
889 				}
890 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
891 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892 				*abort_flag = 1;
893 			} else if (asoc->fragmented_delivery_inprogress &&
894 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 				/*
896 				 * We are doing a partial delivery and the
897 				 * NEXT chunk MUST be either the LAST or
898 				 * MIDDLE fragment NOT a FIRST
899 				 */
900 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
901 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
902 				    0, M_DONTWAIT, 1, MT_DATA);
903 				if (oper) {
904 					struct sctp_paramhdr *ph;
905 					uint32_t *ippp;
906 
907 					SCTP_BUF_LEN(oper) =
908 					    sizeof(struct sctp_paramhdr) +
909 					    (3 * sizeof(uint32_t));
910 					ph = mtod(oper, struct sctp_paramhdr *);
911 					ph->param_type =
912 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
913 					ph->param_length = htons(SCTP_BUF_LEN(oper));
914 					ippp = (uint32_t *) (ph + 1);
915 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916 					ippp++;
917 					*ippp = chk->rec.data.TSN_seq;
918 					ippp++;
919 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920 				}
921 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
922 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923 				*abort_flag = 1;
924 			} else if (asoc->fragmented_delivery_inprogress) {
925 				/*
926 				 * Here we are ok with a MIDDLE or LAST
927 				 * piece
928 				 */
929 				if (chk->rec.data.stream_number !=
930 				    asoc->str_of_pdapi) {
931 					/* Got to be the right STR No */
932 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
933 					    chk->rec.data.stream_number,
934 					    asoc->str_of_pdapi);
935 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
936 					    0, M_DONTWAIT, 1, MT_DATA);
937 					if (oper) {
938 						struct sctp_paramhdr *ph;
939 						uint32_t *ippp;
940 
941 						SCTP_BUF_LEN(oper) =
942 						    sizeof(struct sctp_paramhdr) +
943 						    (sizeof(uint32_t) * 3);
944 						ph = mtod(oper,
945 						    struct sctp_paramhdr *);
946 						ph->param_type =
947 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 						ph->param_length =
949 						    htons(SCTP_BUF_LEN(oper));
950 						ippp = (uint32_t *) (ph + 1);
951 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 						ippp++;
953 						*ippp = chk->rec.data.TSN_seq;
954 						ippp++;
955 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 					}
957 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
958 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959 					*abort_flag = 1;
960 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 					    SCTP_DATA_UNORDERED &&
962 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 					/* Got to be the right STR Seq */
964 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 					    chk->rec.data.stream_seq,
966 					    asoc->ssn_of_pdapi);
967 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 					    0, M_DONTWAIT, 1, MT_DATA);
969 					if (oper) {
970 						struct sctp_paramhdr *ph;
971 						uint32_t *ippp;
972 
973 						SCTP_BUF_LEN(oper) =
974 						    sizeof(struct sctp_paramhdr) +
975 						    (3 * sizeof(uint32_t));
976 						ph = mtod(oper,
977 						    struct sctp_paramhdr *);
978 						ph->param_type =
979 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 						ph->param_length =
981 						    htons(SCTP_BUF_LEN(oper));
982 						ippp = (uint32_t *) (ph + 1);
983 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 						ippp++;
985 						*ippp = chk->rec.data.TSN_seq;
986 						ippp++;
987 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988 
989 					}
990 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
992 					*abort_flag = 1;
993 				}
994 			}
995 		}
996 		return;
997 	}
998 	/* Find its place */
999 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1000 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001 			/*
1002 			 * one in queue is bigger than the new one, insert
1003 			 * before this one
1004 			 */
1005 			/* A check */
1006 			asoc->size_on_reasm_queue += chk->send_size;
1007 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008 			next = at;
1009 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010 			break;
1011 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1012 			/* Gak, He sent me a duplicate str seq number */
1013 			/*
1014 			 * foo bar, I guess I will just free this new guy,
1015 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1016 			 * that the SSN's have wrapped. Maybe I should
1017 			 * compare to TSN somehow... sigh for now just blow
1018 			 * away the chunk!
1019 			 */
1020 			if (chk->data) {
1021 				sctp_m_freem(chk->data);
1022 				chk->data = NULL;
1023 			}
1024 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1025 			return;
1026 		} else {
1027 			prev = at;
1028 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029 				/*
1030 				 * We are at the end, insert it after this
1031 				 * one
1032 				 */
1033 				/* check it first */
1034 				asoc->size_on_reasm_queue += chk->send_size;
1035 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1037 				break;
1038 			}
1039 		}
1040 	}
1041 	/* Now the audits */
1042 	if (prev) {
1043 		prev_tsn = chk->rec.data.TSN_seq - 1;
1044 		if (prev_tsn == prev->rec.data.TSN_seq) {
1045 			/*
1046 			 * Ok the one I am dropping onto the end is the
1047 			 * NEXT. A bit of valdiation here.
1048 			 */
1049 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 			    SCTP_DATA_FIRST_FRAG ||
1051 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1052 			    SCTP_DATA_MIDDLE_FRAG) {
1053 				/*
1054 				 * Insert chk MUST be a MIDDLE or LAST
1055 				 * fragment
1056 				 */
1057 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 				    SCTP_DATA_FIRST_FRAG) {
1059 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1060 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1061 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062 					    0, M_DONTWAIT, 1, MT_DATA);
1063 					if (oper) {
1064 						struct sctp_paramhdr *ph;
1065 						uint32_t *ippp;
1066 
1067 						SCTP_BUF_LEN(oper) =
1068 						    sizeof(struct sctp_paramhdr) +
1069 						    (3 * sizeof(uint32_t));
1070 						ph = mtod(oper,
1071 						    struct sctp_paramhdr *);
1072 						ph->param_type =
1073 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 						ph->param_length =
1075 						    htons(SCTP_BUF_LEN(oper));
1076 						ippp = (uint32_t *) (ph + 1);
1077 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078 						ippp++;
1079 						*ippp = chk->rec.data.TSN_seq;
1080 						ippp++;
1081 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1082 
1083 					}
1084 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1085 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1086 					*abort_flag = 1;
1087 					return;
1088 				}
1089 				if (chk->rec.data.stream_number !=
1090 				    prev->rec.data.stream_number) {
1091 					/*
1092 					 * Huh, need the correct STR here,
1093 					 * they must be the same.
1094 					 */
1095 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1096 					    chk->rec.data.stream_number,
1097 					    prev->rec.data.stream_number);
1098 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1099 					    0, M_DONTWAIT, 1, MT_DATA);
1100 					if (oper) {
1101 						struct sctp_paramhdr *ph;
1102 						uint32_t *ippp;
1103 
1104 						SCTP_BUF_LEN(oper) =
1105 						    sizeof(struct sctp_paramhdr) +
1106 						    (3 * sizeof(uint32_t));
1107 						ph = mtod(oper,
1108 						    struct sctp_paramhdr *);
1109 						ph->param_type =
1110 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111 						ph->param_length =
1112 						    htons(SCTP_BUF_LEN(oper));
1113 						ippp = (uint32_t *) (ph + 1);
1114 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115 						ippp++;
1116 						*ippp = chk->rec.data.TSN_seq;
1117 						ippp++;
1118 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119 					}
1120 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1121 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1122 					*abort_flag = 1;
1123 					return;
1124 				}
1125 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1126 				    chk->rec.data.stream_seq !=
1127 				    prev->rec.data.stream_seq) {
1128 					/*
1129 					 * Huh, need the correct STR here,
1130 					 * they must be the same.
1131 					 */
1132 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1133 					    chk->rec.data.stream_seq,
1134 					    prev->rec.data.stream_seq);
1135 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1136 					    0, M_DONTWAIT, 1, MT_DATA);
1137 					if (oper) {
1138 						struct sctp_paramhdr *ph;
1139 						uint32_t *ippp;
1140 
1141 						SCTP_BUF_LEN(oper) =
1142 						    sizeof(struct sctp_paramhdr) +
1143 						    (3 * sizeof(uint32_t));
1144 						ph = mtod(oper,
1145 						    struct sctp_paramhdr *);
1146 						ph->param_type =
1147 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 						ph->param_length =
1149 						    htons(SCTP_BUF_LEN(oper));
1150 						ippp = (uint32_t *) (ph + 1);
1151 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152 						ippp++;
1153 						*ippp = chk->rec.data.TSN_seq;
1154 						ippp++;
1155 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156 					}
1157 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1158 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1159 					*abort_flag = 1;
1160 					return;
1161 				}
1162 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163 			    SCTP_DATA_LAST_FRAG) {
1164 				/* Insert chk MUST be a FIRST */
1165 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166 				    SCTP_DATA_FIRST_FRAG) {
1167 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 					    0, M_DONTWAIT, 1, MT_DATA);
1170 					if (oper) {
1171 						struct sctp_paramhdr *ph;
1172 						uint32_t *ippp;
1173 
1174 						SCTP_BUF_LEN(oper) =
1175 						    sizeof(struct sctp_paramhdr) +
1176 						    (3 * sizeof(uint32_t));
1177 						ph = mtod(oper,
1178 						    struct sctp_paramhdr *);
1179 						ph->param_type =
1180 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 						ph->param_length =
1182 						    htons(SCTP_BUF_LEN(oper));
1183 						ippp = (uint32_t *) (ph + 1);
1184 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185 						ippp++;
1186 						*ippp = chk->rec.data.TSN_seq;
1187 						ippp++;
1188 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1189 
1190 					}
1191 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1193 					*abort_flag = 1;
1194 					return;
1195 				}
1196 			}
1197 		}
1198 	}
1199 	if (next) {
1200 		post_tsn = chk->rec.data.TSN_seq + 1;
1201 		if (post_tsn == next->rec.data.TSN_seq) {
1202 			/*
1203 			 * Ok the one I am inserting ahead of is my NEXT
1204 			 * one. A bit of valdiation here.
1205 			 */
1206 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1207 				/* Insert chk MUST be a last fragment */
1208 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1209 				    != SCTP_DATA_LAST_FRAG) {
1210 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1211 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1212 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1213 					    0, M_DONTWAIT, 1, MT_DATA);
1214 					if (oper) {
1215 						struct sctp_paramhdr *ph;
1216 						uint32_t *ippp;
1217 
1218 						SCTP_BUF_LEN(oper) =
1219 						    sizeof(struct sctp_paramhdr) +
1220 						    (3 * sizeof(uint32_t));
1221 						ph = mtod(oper,
1222 						    struct sctp_paramhdr *);
1223 						ph->param_type =
1224 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 						ph->param_length =
1226 						    htons(SCTP_BUF_LEN(oper));
1227 						ippp = (uint32_t *) (ph + 1);
1228 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 						ippp++;
1230 						*ippp = chk->rec.data.TSN_seq;
1231 						ippp++;
1232 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 					}
1234 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1235 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1236 					*abort_flag = 1;
1237 					return;
1238 				}
1239 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1240 				    SCTP_DATA_MIDDLE_FRAG ||
1241 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1242 			    SCTP_DATA_LAST_FRAG) {
1243 				/*
1244 				 * Insert chk CAN be MIDDLE or FIRST NOT
1245 				 * LAST
1246 				 */
1247 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1248 				    SCTP_DATA_LAST_FRAG) {
1249 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1250 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1251 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1252 					    0, M_DONTWAIT, 1, MT_DATA);
1253 					if (oper) {
1254 						struct sctp_paramhdr *ph;
1255 						uint32_t *ippp;
1256 
1257 						SCTP_BUF_LEN(oper) =
1258 						    sizeof(struct sctp_paramhdr) +
1259 						    (3 * sizeof(uint32_t));
1260 						ph = mtod(oper,
1261 						    struct sctp_paramhdr *);
1262 						ph->param_type =
1263 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 						ph->param_length =
1265 						    htons(SCTP_BUF_LEN(oper));
1266 						ippp = (uint32_t *) (ph + 1);
1267 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 						ippp++;
1269 						*ippp = chk->rec.data.TSN_seq;
1270 						ippp++;
1271 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1272 
1273 					}
1274 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1275 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1276 					*abort_flag = 1;
1277 					return;
1278 				}
1279 				if (chk->rec.data.stream_number !=
1280 				    next->rec.data.stream_number) {
1281 					/*
1282 					 * Huh, need the correct STR here,
1283 					 * they must be the same.
1284 					 */
1285 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1286 					    chk->rec.data.stream_number,
1287 					    next->rec.data.stream_number);
1288 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289 					    0, M_DONTWAIT, 1, MT_DATA);
1290 					if (oper) {
1291 						struct sctp_paramhdr *ph;
1292 						uint32_t *ippp;
1293 
1294 						SCTP_BUF_LEN(oper) =
1295 						    sizeof(struct sctp_paramhdr) +
1296 						    (3 * sizeof(uint32_t));
1297 						ph = mtod(oper,
1298 						    struct sctp_paramhdr *);
1299 						ph->param_type =
1300 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 						ph->param_length =
1302 						    htons(SCTP_BUF_LEN(oper));
1303 						ippp = (uint32_t *) (ph + 1);
1304 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305 						ippp++;
1306 						*ippp = chk->rec.data.TSN_seq;
1307 						ippp++;
1308 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1309 
1310 					}
1311 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1312 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1313 					*abort_flag = 1;
1314 					return;
1315 				}
1316 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1317 				    chk->rec.data.stream_seq !=
1318 				    next->rec.data.stream_seq) {
1319 					/*
1320 					 * Huh, need the correct STR here,
1321 					 * they must be the same.
1322 					 */
1323 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1324 					    chk->rec.data.stream_seq,
1325 					    next->rec.data.stream_seq);
1326 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1327 					    0, M_DONTWAIT, 1, MT_DATA);
1328 					if (oper) {
1329 						struct sctp_paramhdr *ph;
1330 						uint32_t *ippp;
1331 
1332 						SCTP_BUF_LEN(oper) =
1333 						    sizeof(struct sctp_paramhdr) +
1334 						    (3 * sizeof(uint32_t));
1335 						ph = mtod(oper,
1336 						    struct sctp_paramhdr *);
1337 						ph->param_type =
1338 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339 						ph->param_length =
1340 						    htons(SCTP_BUF_LEN(oper));
1341 						ippp = (uint32_t *) (ph + 1);
1342 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343 						ippp++;
1344 						*ippp = chk->rec.data.TSN_seq;
1345 						ippp++;
1346 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347 					}
1348 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1349 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1350 					*abort_flag = 1;
1351 					return;
1352 				}
1353 			}
1354 		}
1355 	}
1356 	/* Do we need to do some delivery? check */
1357 	sctp_deliver_reasm_check(stcb, asoc);
1358 }
1359 
1360 /*
1361  * This is an unfortunate routine. It checks to make sure a evil guy is not
1362  * stuffing us full of bad packet fragments. A broken peer could also do this
1363  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1364  * :< more cycles.
1365  */
1366 static int
1367 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1368     uint32_t TSN_seq)
1369 {
1370 	struct sctp_tmit_chunk *at;
1371 	uint32_t tsn_est;
1372 
1373 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1374 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1375 			/* is it one bigger? */
1376 			tsn_est = at->rec.data.TSN_seq + 1;
1377 			if (tsn_est == TSN_seq) {
1378 				/* yep. It better be a last then */
1379 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380 				    SCTP_DATA_LAST_FRAG) {
1381 					/*
1382 					 * Ok this guy belongs next to a guy
1383 					 * that is NOT last, it should be a
1384 					 * middle/last, not a complete
1385 					 * chunk.
1386 					 */
1387 					return (1);
1388 				} else {
1389 					/*
1390 					 * This guy is ok since its a LAST
1391 					 * and the new chunk is a fully
1392 					 * self- contained one.
1393 					 */
1394 					return (0);
1395 				}
1396 			}
1397 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1398 			/* Software error since I have a dup? */
1399 			return (1);
1400 		} else {
1401 			/*
1402 			 * Ok, 'at' is larger than new chunk but does it
1403 			 * need to be right before it.
1404 			 */
1405 			tsn_est = TSN_seq + 1;
1406 			if (tsn_est == at->rec.data.TSN_seq) {
1407 				/* Yep, It better be a first */
1408 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1409 				    SCTP_DATA_FIRST_FRAG) {
1410 					return (1);
1411 				} else {
1412 					return (0);
1413 				}
1414 			}
1415 		}
1416 	}
1417 	return (0);
1418 }
1419 
1420 
1421 static int
1422 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1423     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1424     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1425     int *break_flag, int last_chunk)
1426 {
1427 	/* Process a data chunk */
1428 	/* struct sctp_tmit_chunk *chk; */
1429 	struct sctp_tmit_chunk *chk;
1430 	uint32_t tsn, gap;
1431 	struct mbuf *dmbuf;
1432 	int the_len;
1433 	int need_reasm_check = 0;
1434 	uint16_t strmno, strmseq;
1435 	struct mbuf *oper;
1436 	struct sctp_queued_to_read *control;
1437 	int ordered;
1438 	uint32_t protocol_id;
1439 	uint8_t chunk_flags;
1440 	struct sctp_stream_reset_list *liste;
1441 
1442 	chk = NULL;
1443 	tsn = ntohl(ch->dp.tsn);
1444 	chunk_flags = ch->ch.chunk_flags;
1445 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1446 		asoc->send_sack = 1;
1447 	}
1448 	protocol_id = ch->dp.protocol_id;
1449 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1450 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1451 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1452 	}
1453 	if (stcb == NULL) {
1454 		return (0);
1455 	}
1456 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1457 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1458 		/* It is a duplicate */
1459 		SCTP_STAT_INCR(sctps_recvdupdata);
1460 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1461 			/* Record a dup for the next outbound sack */
1462 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1463 			asoc->numduptsns++;
1464 		}
1465 		asoc->send_sack = 1;
1466 		return (0);
1467 	}
1468 	/* Calculate the number of TSN's between the base and this TSN */
1469 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1470 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1471 		/* Can't hold the bit in the mapping at max array, toss it */
1472 		return (0);
1473 	}
1474 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1475 		SCTP_TCB_LOCK_ASSERT(stcb);
1476 		if (sctp_expand_mapping_array(asoc, gap)) {
1477 			/* Can't expand, drop it */
1478 			return (0);
1479 		}
1480 	}
1481 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1482 		*high_tsn = tsn;
1483 	}
1484 	/* See if we have received this one already */
1485 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1486 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1487 		SCTP_STAT_INCR(sctps_recvdupdata);
1488 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1489 			/* Record a dup for the next outbound sack */
1490 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1491 			asoc->numduptsns++;
1492 		}
1493 		asoc->send_sack = 1;
1494 		return (0);
1495 	}
1496 	/*
1497 	 * Check to see about the GONE flag, duplicates would cause a sack
1498 	 * to be sent up above
1499 	 */
1500 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1501 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1502 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1503 	    ) {
1504 		/*
1505 		 * wait a minute, this guy is gone, there is no longer a
1506 		 * receiver. Send peer an ABORT!
1507 		 */
1508 		struct mbuf *op_err;
1509 
1510 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1511 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1512 		*abort_flag = 1;
1513 		return (0);
1514 	}
1515 	/*
1516 	 * Now before going further we see if there is room. If NOT then we
1517 	 * MAY let one through only IF this TSN is the one we are waiting
1518 	 * for on a partial delivery API.
1519 	 */
1520 
1521 	/* now do the tests */
1522 	if (((asoc->cnt_on_all_streams +
1523 	    asoc->cnt_on_reasm_queue +
1524 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1525 	    (((int)asoc->my_rwnd) <= 0)) {
1526 		/*
1527 		 * When we have NO room in the rwnd we check to make sure
1528 		 * the reader is doing its job...
1529 		 */
1530 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1531 			/* some to read, wake-up */
1532 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1533 			struct socket *so;
1534 
1535 			so = SCTP_INP_SO(stcb->sctp_ep);
1536 			atomic_add_int(&stcb->asoc.refcnt, 1);
1537 			SCTP_TCB_UNLOCK(stcb);
1538 			SCTP_SOCKET_LOCK(so, 1);
1539 			SCTP_TCB_LOCK(stcb);
1540 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1541 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1542 				/* assoc was freed while we were unlocked */
1543 				SCTP_SOCKET_UNLOCK(so, 1);
1544 				return (0);
1545 			}
1546 #endif
1547 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1549 			SCTP_SOCKET_UNLOCK(so, 1);
1550 #endif
1551 		}
1552 		/* now is it in the mapping array of what we have accepted? */
1553 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1554 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1555 			/* Nope not in the valid range dump it */
1556 			sctp_set_rwnd(stcb, asoc);
1557 			if ((asoc->cnt_on_all_streams +
1558 			    asoc->cnt_on_reasm_queue +
1559 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1560 				SCTP_STAT_INCR(sctps_datadropchklmt);
1561 			} else {
1562 				SCTP_STAT_INCR(sctps_datadroprwnd);
1563 			}
1564 			*break_flag = 1;
1565 			return (0);
1566 		}
1567 	}
1568 	strmno = ntohs(ch->dp.stream_id);
1569 	if (strmno >= asoc->streamincnt) {
1570 		struct sctp_paramhdr *phdr;
1571 		struct mbuf *mb;
1572 
1573 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1574 		    0, M_DONTWAIT, 1, MT_DATA);
1575 		if (mb != NULL) {
1576 			/* add some space up front so prepend will work well */
1577 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1578 			phdr = mtod(mb, struct sctp_paramhdr *);
1579 			/*
1580 			 * Error causes are just param's and this one has
1581 			 * two back to back phdr, one with the error type
1582 			 * and size, the other with the streamid and a rsvd
1583 			 */
1584 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1585 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1586 			phdr->param_length =
1587 			    htons(sizeof(struct sctp_paramhdr) * 2);
1588 			phdr++;
1589 			/* We insert the stream in the type field */
1590 			phdr->param_type = ch->dp.stream_id;
1591 			/* And set the length to 0 for the rsvd field */
1592 			phdr->param_length = 0;
1593 			sctp_queue_op_err(stcb, mb);
1594 		}
1595 		SCTP_STAT_INCR(sctps_badsid);
1596 		SCTP_TCB_LOCK_ASSERT(stcb);
1597 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1598 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1599 			asoc->highest_tsn_inside_nr_map = tsn;
1600 		}
1601 		if (tsn == (asoc->cumulative_tsn + 1)) {
1602 			/* Update cum-ack */
1603 			asoc->cumulative_tsn = tsn;
1604 		}
1605 		return (0);
1606 	}
1607 	/*
1608 	 * Before we continue lets validate that we are not being fooled by
1609 	 * an evil attacker. We can only have 4k chunks based on our TSN
1610 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1611 	 * way our stream sequence numbers could have wrapped. We of course
1612 	 * only validate the FIRST fragment so the bit must be set.
1613 	 */
1614 	strmseq = ntohs(ch->dp.stream_sequence);
1615 #ifdef SCTP_ASOCLOG_OF_TSNS
1616 	SCTP_TCB_LOCK_ASSERT(stcb);
1617 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1618 		asoc->tsn_in_at = 0;
1619 		asoc->tsn_in_wrapped = 1;
1620 	}
1621 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1622 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1623 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1624 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1625 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1626 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1627 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1628 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1629 	asoc->tsn_in_at++;
1630 #endif
1631 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1632 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1633 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1634 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1635 		/* The incoming sseq is behind where we last delivered? */
1636 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1637 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1638 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1639 		    0, M_DONTWAIT, 1, MT_DATA);
1640 		if (oper) {
1641 			struct sctp_paramhdr *ph;
1642 			uint32_t *ippp;
1643 
1644 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1645 			    (3 * sizeof(uint32_t));
1646 			ph = mtod(oper, struct sctp_paramhdr *);
1647 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1648 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1649 			ippp = (uint32_t *) (ph + 1);
1650 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1651 			ippp++;
1652 			*ippp = tsn;
1653 			ippp++;
1654 			*ippp = ((strmno << 16) | strmseq);
1655 
1656 		}
1657 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1658 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1659 		*abort_flag = 1;
1660 		return (0);
1661 	}
1662 	/************************************
1663 	 * From here down we may find ch-> invalid
1664 	 * so its a good idea NOT to use it.
1665 	 *************************************/
1666 
1667 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1668 	if (last_chunk == 0) {
1669 		dmbuf = SCTP_M_COPYM(*m,
1670 		    (offset + sizeof(struct sctp_data_chunk)),
1671 		    the_len, M_DONTWAIT);
1672 #ifdef SCTP_MBUF_LOGGING
1673 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1674 			struct mbuf *mat;
1675 
1676 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1677 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1678 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1679 				}
1680 			}
1681 		}
1682 #endif
1683 	} else {
1684 		/* We can steal the last chunk */
1685 		int l_len;
1686 
1687 		dmbuf = *m;
1688 		/* lop off the top part */
1689 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1690 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1691 			l_len = SCTP_BUF_LEN(dmbuf);
1692 		} else {
1693 			/*
1694 			 * need to count up the size hopefully does not hit
1695 			 * this to often :-0
1696 			 */
1697 			struct mbuf *lat;
1698 
1699 			l_len = 0;
1700 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1701 				l_len += SCTP_BUF_LEN(lat);
1702 			}
1703 		}
1704 		if (l_len > the_len) {
1705 			/* Trim the end round bytes off  too */
1706 			m_adj(dmbuf, -(l_len - the_len));
1707 		}
1708 	}
1709 	if (dmbuf == NULL) {
1710 		SCTP_STAT_INCR(sctps_nomem);
1711 		return (0);
1712 	}
1713 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1714 	    asoc->fragmented_delivery_inprogress == 0 &&
1715 	    TAILQ_EMPTY(&asoc->resetHead) &&
1716 	    ((ordered == 0) ||
1717 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1718 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1719 		/* Candidate for express delivery */
1720 		/*
1721 		 * Its not fragmented, No PD-API is up, Nothing in the
1722 		 * delivery queue, Its un-ordered OR ordered and the next to
1723 		 * deliver AND nothing else is stuck on the stream queue,
1724 		 * And there is room for it in the socket buffer. Lets just
1725 		 * stuff it up the buffer....
1726 		 */
1727 
1728 		/* It would be nice to avoid this copy if we could :< */
1729 		sctp_alloc_a_readq(stcb, control);
1730 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1731 		    protocol_id,
1732 		    stcb->asoc.context,
1733 		    strmno, strmseq,
1734 		    chunk_flags,
1735 		    dmbuf);
1736 		if (control == NULL) {
1737 			goto failed_express_del;
1738 		}
1739 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1740 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1741 			asoc->highest_tsn_inside_nr_map = tsn;
1742 		}
1743 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1744 		    control, &stcb->sctp_socket->so_rcv,
1745 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1746 
1747 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1748 			/* for ordered, bump what we delivered */
1749 			asoc->strmin[strmno].last_sequence_delivered++;
1750 		}
1751 		SCTP_STAT_INCR(sctps_recvexpress);
1752 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1753 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1754 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1755 		}
1756 		control = NULL;
1757 
1758 		goto finish_express_del;
1759 	}
1760 failed_express_del:
1761 	/* If we reach here this is a new chunk */
1762 	chk = NULL;
1763 	control = NULL;
1764 	/* Express for fragmented delivery? */
1765 	if ((asoc->fragmented_delivery_inprogress) &&
1766 	    (stcb->asoc.control_pdapi) &&
1767 	    (asoc->str_of_pdapi == strmno) &&
1768 	    (asoc->ssn_of_pdapi == strmseq)
1769 	    ) {
1770 		control = stcb->asoc.control_pdapi;
1771 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1772 			/* Can't be another first? */
1773 			goto failed_pdapi_express_del;
1774 		}
1775 		if (tsn == (control->sinfo_tsn + 1)) {
1776 			/* Yep, we can add it on */
1777 			int end = 0;
1778 
1779 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1780 				end = 1;
1781 			}
1782 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1783 			    tsn,
1784 			    &stcb->sctp_socket->so_rcv)) {
1785 				SCTP_PRINTF("Append fails end:%d\n", end);
1786 				goto failed_pdapi_express_del;
1787 			}
1788 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1789 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1790 				asoc->highest_tsn_inside_nr_map = tsn;
1791 			}
1792 			SCTP_STAT_INCR(sctps_recvexpressm);
1793 			control->sinfo_tsn = tsn;
1794 			asoc->tsn_last_delivered = tsn;
1795 			asoc->fragment_flags = chunk_flags;
1796 			asoc->tsn_of_pdapi_last_delivered = tsn;
1797 			asoc->last_flags_delivered = chunk_flags;
1798 			asoc->last_strm_seq_delivered = strmseq;
1799 			asoc->last_strm_no_delivered = strmno;
1800 			if (end) {
1801 				/* clean up the flags and such */
1802 				asoc->fragmented_delivery_inprogress = 0;
1803 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1804 					asoc->strmin[strmno].last_sequence_delivered++;
1805 				}
1806 				stcb->asoc.control_pdapi = NULL;
1807 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1808 					/*
1809 					 * There could be another message
1810 					 * ready
1811 					 */
1812 					need_reasm_check = 1;
1813 				}
1814 			}
1815 			control = NULL;
1816 			goto finish_express_del;
1817 		}
1818 	}
1819 failed_pdapi_express_del:
1820 	control = NULL;
1821 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1822 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1824 			asoc->highest_tsn_inside_nr_map = tsn;
1825 		}
1826 	} else {
1827 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1828 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1829 			asoc->highest_tsn_inside_map = tsn;
1830 		}
1831 	}
1832 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1833 		sctp_alloc_a_chunk(stcb, chk);
1834 		if (chk == NULL) {
1835 			/* No memory so we drop the chunk */
1836 			SCTP_STAT_INCR(sctps_nomem);
1837 			if (last_chunk == 0) {
1838 				/* we copied it, free the copy */
1839 				sctp_m_freem(dmbuf);
1840 			}
1841 			return (0);
1842 		}
1843 		chk->rec.data.TSN_seq = tsn;
1844 		chk->no_fr_allowed = 0;
1845 		chk->rec.data.stream_seq = strmseq;
1846 		chk->rec.data.stream_number = strmno;
1847 		chk->rec.data.payloadtype = protocol_id;
1848 		chk->rec.data.context = stcb->asoc.context;
1849 		chk->rec.data.doing_fast_retransmit = 0;
1850 		chk->rec.data.rcv_flags = chunk_flags;
1851 		chk->asoc = asoc;
1852 		chk->send_size = the_len;
1853 		chk->whoTo = net;
1854 		atomic_add_int(&net->ref_count, 1);
1855 		chk->data = dmbuf;
1856 	} else {
1857 		sctp_alloc_a_readq(stcb, control);
1858 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1859 		    protocol_id,
1860 		    stcb->asoc.context,
1861 		    strmno, strmseq,
1862 		    chunk_flags,
1863 		    dmbuf);
1864 		if (control == NULL) {
1865 			/* No memory so we drop the chunk */
1866 			SCTP_STAT_INCR(sctps_nomem);
1867 			if (last_chunk == 0) {
1868 				/* we copied it, free the copy */
1869 				sctp_m_freem(dmbuf);
1870 			}
1871 			return (0);
1872 		}
1873 		control->length = the_len;
1874 	}
1875 
1876 	/* Mark it as received */
1877 	/* Now queue it where it belongs */
1878 	if (control != NULL) {
1879 		/* First a sanity check */
1880 		if (asoc->fragmented_delivery_inprogress) {
1881 			/*
1882 			 * Ok, we have a fragmented delivery in progress if
1883 			 * this chunk is next to deliver OR belongs in our
1884 			 * view to the reassembly, the peer is evil or
1885 			 * broken.
1886 			 */
1887 			uint32_t estimate_tsn;
1888 
1889 			estimate_tsn = asoc->tsn_last_delivered + 1;
1890 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1891 			    (estimate_tsn == control->sinfo_tsn)) {
1892 				/* Evil/Broke peer */
1893 				sctp_m_freem(control->data);
1894 				control->data = NULL;
1895 				if (control->whoFrom) {
1896 					sctp_free_remote_addr(control->whoFrom);
1897 					control->whoFrom = NULL;
1898 				}
1899 				sctp_free_a_readq(stcb, control);
1900 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1901 				    0, M_DONTWAIT, 1, MT_DATA);
1902 				if (oper) {
1903 					struct sctp_paramhdr *ph;
1904 					uint32_t *ippp;
1905 
1906 					SCTP_BUF_LEN(oper) =
1907 					    sizeof(struct sctp_paramhdr) +
1908 					    (3 * sizeof(uint32_t));
1909 					ph = mtod(oper, struct sctp_paramhdr *);
1910 					ph->param_type =
1911 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1912 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1913 					ippp = (uint32_t *) (ph + 1);
1914 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1915 					ippp++;
1916 					*ippp = tsn;
1917 					ippp++;
1918 					*ippp = ((strmno << 16) | strmseq);
1919 				}
1920 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1921 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1922 				*abort_flag = 1;
1923 				return (0);
1924 			} else {
1925 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1926 					sctp_m_freem(control->data);
1927 					control->data = NULL;
1928 					if (control->whoFrom) {
1929 						sctp_free_remote_addr(control->whoFrom);
1930 						control->whoFrom = NULL;
1931 					}
1932 					sctp_free_a_readq(stcb, control);
1933 
1934 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935 					    0, M_DONTWAIT, 1, MT_DATA);
1936 					if (oper) {
1937 						struct sctp_paramhdr *ph;
1938 						uint32_t *ippp;
1939 
1940 						SCTP_BUF_LEN(oper) =
1941 						    sizeof(struct sctp_paramhdr) +
1942 						    (3 * sizeof(uint32_t));
1943 						ph = mtod(oper,
1944 						    struct sctp_paramhdr *);
1945 						ph->param_type =
1946 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1947 						ph->param_length =
1948 						    htons(SCTP_BUF_LEN(oper));
1949 						ippp = (uint32_t *) (ph + 1);
1950 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1951 						ippp++;
1952 						*ippp = tsn;
1953 						ippp++;
1954 						*ippp = ((strmno << 16) | strmseq);
1955 					}
1956 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1957 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1958 					*abort_flag = 1;
1959 					return (0);
1960 				}
1961 			}
1962 		} else {
1963 			/* No PDAPI running */
1964 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1965 				/*
1966 				 * Reassembly queue is NOT empty validate
1967 				 * that this tsn does not need to be in
1968 				 * reasembly queue. If it does then our peer
1969 				 * is broken or evil.
1970 				 */
1971 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1972 					sctp_m_freem(control->data);
1973 					control->data = NULL;
1974 					if (control->whoFrom) {
1975 						sctp_free_remote_addr(control->whoFrom);
1976 						control->whoFrom = NULL;
1977 					}
1978 					sctp_free_a_readq(stcb, control);
1979 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1980 					    0, M_DONTWAIT, 1, MT_DATA);
1981 					if (oper) {
1982 						struct sctp_paramhdr *ph;
1983 						uint32_t *ippp;
1984 
1985 						SCTP_BUF_LEN(oper) =
1986 						    sizeof(struct sctp_paramhdr) +
1987 						    (3 * sizeof(uint32_t));
1988 						ph = mtod(oper,
1989 						    struct sctp_paramhdr *);
1990 						ph->param_type =
1991 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1992 						ph->param_length =
1993 						    htons(SCTP_BUF_LEN(oper));
1994 						ippp = (uint32_t *) (ph + 1);
1995 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1996 						ippp++;
1997 						*ippp = tsn;
1998 						ippp++;
1999 						*ippp = ((strmno << 16) | strmseq);
2000 					}
2001 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2002 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2003 					*abort_flag = 1;
2004 					return (0);
2005 				}
2006 			}
2007 		}
2008 		/* ok, if we reach here we have passed the sanity checks */
2009 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2010 			/* queue directly into socket buffer */
2011 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2012 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2013 			    control,
2014 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2015 		} else {
2016 			/*
2017 			 * Special check for when streams are resetting. We
2018 			 * could be more smart about this and check the
2019 			 * actual stream to see if it is not being reset..
2020 			 * that way we would not create a HOLB when amongst
2021 			 * streams being reset and those not being reset.
2022 			 *
2023 			 * We take complete messages that have a stream reset
2024 			 * intervening (aka the TSN is after where our
2025 			 * cum-ack needs to be) off and put them on a
2026 			 * pending_reply_queue. The reassembly ones we do
2027 			 * not have to worry about since they are all sorted
2028 			 * and proceessed by TSN order. It is only the
2029 			 * singletons I must worry about.
2030 			 */
2031 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2032 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2033 				/*
2034 				 * yep its past where we need to reset... go
2035 				 * ahead and queue it.
2036 				 */
2037 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2038 					/* first one on */
2039 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2040 				} else {
2041 					struct sctp_queued_to_read *ctlOn,
2042 					                   *nctlOn;
2043 					unsigned char inserted = 0;
2044 
2045 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2046 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2047 							continue;
2048 						} else {
2049 							/* found it */
2050 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2051 							inserted = 1;
2052 							break;
2053 						}
2054 					}
2055 					if (inserted == 0) {
2056 						/*
2057 						 * must be put at end, use
2058 						 * prevP (all setup from
2059 						 * loop) to setup nextP.
2060 						 */
2061 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2062 					}
2063 				}
2064 			} else {
2065 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2066 				if (*abort_flag) {
2067 					return (0);
2068 				}
2069 			}
2070 		}
2071 	} else {
2072 		/* Into the re-assembly queue */
2073 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2074 		if (*abort_flag) {
2075 			/*
2076 			 * the assoc is now gone and chk was put onto the
2077 			 * reasm queue, which has all been freed.
2078 			 */
2079 			*m = NULL;
2080 			return (0);
2081 		}
2082 	}
2083 finish_express_del:
2084 	if (tsn == (asoc->cumulative_tsn + 1)) {
2085 		/* Update cum-ack */
2086 		asoc->cumulative_tsn = tsn;
2087 	}
2088 	if (last_chunk) {
2089 		*m = NULL;
2090 	}
2091 	if (ordered) {
2092 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2093 	} else {
2094 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2095 	}
2096 	SCTP_STAT_INCR(sctps_recvdata);
2097 	/* Set it present please */
2098 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2099 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2100 	}
2101 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2102 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2103 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2104 	}
2105 	/* check the special flag for stream resets */
2106 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2107 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2108 		/*
2109 		 * we have finished working through the backlogged TSN's now
2110 		 * time to reset streams. 1: call reset function. 2: free
2111 		 * pending_reply space 3: distribute any chunks in
2112 		 * pending_reply_queue.
2113 		 */
2114 		struct sctp_queued_to_read *ctl, *nctl;
2115 
2116 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2117 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2118 		SCTP_FREE(liste, SCTP_M_STRESET);
2119 		/* sa_ignore FREED_MEMORY */
2120 		liste = TAILQ_FIRST(&asoc->resetHead);
2121 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2122 			/* All can be removed */
2123 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2124 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2125 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2126 				if (*abort_flag) {
2127 					return (0);
2128 				}
2129 			}
2130 		} else {
2131 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2132 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2133 					break;
2134 				}
2135 				/*
2136 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2137 				 * process it which is the NOT of
2138 				 * ctl->sinfo_tsn > liste->tsn
2139 				 */
2140 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2141 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2142 				if (*abort_flag) {
2143 					return (0);
2144 				}
2145 			}
2146 		}
2147 		/*
2148 		 * Now service re-assembly to pick up anything that has been
2149 		 * held on reassembly queue?
2150 		 */
2151 		sctp_deliver_reasm_check(stcb, asoc);
2152 		need_reasm_check = 0;
2153 	}
2154 	if (need_reasm_check) {
2155 		/* Another one waits ? */
2156 		sctp_deliver_reasm_check(stcb, asoc);
2157 	}
2158 	return (1);
2159 }
2160 
2161 int8_t sctp_map_lookup_tab[256] = {
2162 	0, 1, 0, 2, 0, 1, 0, 3,
2163 	0, 1, 0, 2, 0, 1, 0, 4,
2164 	0, 1, 0, 2, 0, 1, 0, 3,
2165 	0, 1, 0, 2, 0, 1, 0, 5,
2166 	0, 1, 0, 2, 0, 1, 0, 3,
2167 	0, 1, 0, 2, 0, 1, 0, 4,
2168 	0, 1, 0, 2, 0, 1, 0, 3,
2169 	0, 1, 0, 2, 0, 1, 0, 6,
2170 	0, 1, 0, 2, 0, 1, 0, 3,
2171 	0, 1, 0, 2, 0, 1, 0, 4,
2172 	0, 1, 0, 2, 0, 1, 0, 3,
2173 	0, 1, 0, 2, 0, 1, 0, 5,
2174 	0, 1, 0, 2, 0, 1, 0, 3,
2175 	0, 1, 0, 2, 0, 1, 0, 4,
2176 	0, 1, 0, 2, 0, 1, 0, 3,
2177 	0, 1, 0, 2, 0, 1, 0, 7,
2178 	0, 1, 0, 2, 0, 1, 0, 3,
2179 	0, 1, 0, 2, 0, 1, 0, 4,
2180 	0, 1, 0, 2, 0, 1, 0, 3,
2181 	0, 1, 0, 2, 0, 1, 0, 5,
2182 	0, 1, 0, 2, 0, 1, 0, 3,
2183 	0, 1, 0, 2, 0, 1, 0, 4,
2184 	0, 1, 0, 2, 0, 1, 0, 3,
2185 	0, 1, 0, 2, 0, 1, 0, 6,
2186 	0, 1, 0, 2, 0, 1, 0, 3,
2187 	0, 1, 0, 2, 0, 1, 0, 4,
2188 	0, 1, 0, 2, 0, 1, 0, 3,
2189 	0, 1, 0, 2, 0, 1, 0, 5,
2190 	0, 1, 0, 2, 0, 1, 0, 3,
2191 	0, 1, 0, 2, 0, 1, 0, 4,
2192 	0, 1, 0, 2, 0, 1, 0, 3,
2193 	0, 1, 0, 2, 0, 1, 0, 8
2194 };
2195 
2196 
2197 void
2198 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2199 {
2200 	/*
2201 	 * Now we also need to check the mapping array in a couple of ways.
2202 	 * 1) Did we move the cum-ack point?
2203 	 *
2204 	 * When you first glance at this you might think that all entries that
2205 	 * make up the postion of the cum-ack would be in the nr-mapping
2206 	 * array only.. i.e. things up to the cum-ack are always
2207 	 * deliverable. Thats true with one exception, when its a fragmented
2208 	 * message we may not deliver the data until some threshold (or all
2209 	 * of it) is in place. So we must OR the nr_mapping_array and
2210 	 * mapping_array to get a true picture of the cum-ack.
2211 	 */
2212 	struct sctp_association *asoc;
2213 	int at;
2214 	uint8_t val;
2215 	int slide_from, slide_end, lgap, distance;
2216 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2217 
2218 	asoc = &stcb->asoc;
2219 
2220 	old_cumack = asoc->cumulative_tsn;
2221 	old_base = asoc->mapping_array_base_tsn;
2222 	old_highest = asoc->highest_tsn_inside_map;
2223 	/*
2224 	 * We could probably improve this a small bit by calculating the
2225 	 * offset of the current cum-ack as the starting point.
2226 	 */
2227 	at = 0;
2228 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2229 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2230 		if (val == 0xff) {
2231 			at += 8;
2232 		} else {
2233 			/* there is a 0 bit */
2234 			at += sctp_map_lookup_tab[val];
2235 			break;
2236 		}
2237 	}
2238 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2239 
2240 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2241 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2242 #ifdef INVARIANTS
2243 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2244 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2245 #else
2246 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2247 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2248 		sctp_print_mapping_array(asoc);
2249 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2250 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2251 		}
2252 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2253 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2254 #endif
2255 	}
2256 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2257 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2258 	} else {
2259 		highest_tsn = asoc->highest_tsn_inside_map;
2260 	}
2261 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2262 		/* The complete array was completed by a single FR */
2263 		/* highest becomes the cum-ack */
2264 		int clr;
2265 
2266 #ifdef INVARIANTS
2267 		unsigned int i;
2268 
2269 #endif
2270 
2271 		/* clear the array */
2272 		clr = ((at + 7) >> 3);
2273 		if (clr > asoc->mapping_array_size) {
2274 			clr = asoc->mapping_array_size;
2275 		}
2276 		memset(asoc->mapping_array, 0, clr);
2277 		memset(asoc->nr_mapping_array, 0, clr);
2278 #ifdef INVARIANTS
2279 		for (i = 0; i < asoc->mapping_array_size; i++) {
2280 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2281 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2282 				sctp_print_mapping_array(asoc);
2283 			}
2284 		}
2285 #endif
2286 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2287 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2288 	} else if (at >= 8) {
2289 		/* we can slide the mapping array down */
2290 		/* slide_from holds where we hit the first NON 0xff byte */
2291 
2292 		/*
2293 		 * now calculate the ceiling of the move using our highest
2294 		 * TSN value
2295 		 */
2296 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2297 		slide_end = (lgap >> 3);
2298 		if (slide_end < slide_from) {
2299 			sctp_print_mapping_array(asoc);
2300 #ifdef INVARIANTS
2301 			panic("impossible slide");
2302 #else
2303 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2304 			    lgap, slide_end, slide_from, at);
2305 			return;
2306 #endif
2307 		}
2308 		if (slide_end > asoc->mapping_array_size) {
2309 #ifdef INVARIANTS
2310 			panic("would overrun buffer");
2311 #else
2312 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2313 			    asoc->mapping_array_size, slide_end);
2314 			slide_end = asoc->mapping_array_size;
2315 #endif
2316 		}
2317 		distance = (slide_end - slide_from) + 1;
2318 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2319 			sctp_log_map(old_base, old_cumack, old_highest,
2320 			    SCTP_MAP_PREPARE_SLIDE);
2321 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2322 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2323 		}
2324 		if (distance + slide_from > asoc->mapping_array_size ||
2325 		    distance < 0) {
2326 			/*
2327 			 * Here we do NOT slide forward the array so that
2328 			 * hopefully when more data comes in to fill it up
2329 			 * we will be able to slide it forward. Really I
2330 			 * don't think this should happen :-0
2331 			 */
2332 
2333 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2334 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2335 				    (uint32_t) asoc->mapping_array_size,
2336 				    SCTP_MAP_SLIDE_NONE);
2337 			}
2338 		} else {
2339 			int ii;
2340 
2341 			for (ii = 0; ii < distance; ii++) {
2342 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2343 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2344 
2345 			}
2346 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2347 				asoc->mapping_array[ii] = 0;
2348 				asoc->nr_mapping_array[ii] = 0;
2349 			}
2350 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2351 				asoc->highest_tsn_inside_map += (slide_from << 3);
2352 			}
2353 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2354 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2355 			}
2356 			asoc->mapping_array_base_tsn += (slide_from << 3);
2357 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2358 				sctp_log_map(asoc->mapping_array_base_tsn,
2359 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2360 				    SCTP_MAP_SLIDE_RESULT);
2361 			}
2362 		}
2363 	}
2364 }
2365 
2366 void
2367 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2368 {
2369 	struct sctp_association *asoc;
2370 	uint32_t highest_tsn;
2371 
2372 	asoc = &stcb->asoc;
2373 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2374 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2375 	} else {
2376 		highest_tsn = asoc->highest_tsn_inside_map;
2377 	}
2378 
2379 	/*
2380 	 * Now we need to see if we need to queue a sack or just start the
2381 	 * timer (if allowed).
2382 	 */
2383 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2384 		/*
2385 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2386 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2387 		 * SACK
2388 		 */
2389 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2390 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2391 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2392 		}
2393 		sctp_send_shutdown(stcb,
2394 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2395 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2396 	} else {
2397 		int is_a_gap;
2398 
2399 		/* is there a gap now ? */
2400 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2401 
2402 		/*
2403 		 * CMT DAC algorithm: increase number of packets received
2404 		 * since last ack
2405 		 */
2406 		stcb->asoc.cmt_dac_pkts_rcvd++;
2407 
2408 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2409 							 * SACK */
2410 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2411 							 * longer is one */
2412 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2413 		    (is_a_gap) ||	/* is still a gap */
2414 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2415 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2416 		    ) {
2417 
2418 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2419 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2420 			    (stcb->asoc.send_sack == 0) &&
2421 			    (stcb->asoc.numduptsns == 0) &&
2422 			    (stcb->asoc.delayed_ack) &&
2423 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2424 
2425 				/*
2426 				 * CMT DAC algorithm: With CMT, delay acks
2427 				 * even in the face of
2428 				 *
2429 				 * reordering. Therefore, if acks that do not
2430 				 * have to be sent because of the above
2431 				 * reasons, will be delayed. That is, acks
2432 				 * that would have been sent due to gap
2433 				 * reports will be delayed with DAC. Start
2434 				 * the delayed ack timer.
2435 				 */
2436 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2437 				    stcb->sctp_ep, stcb, NULL);
2438 			} else {
2439 				/*
2440 				 * Ok we must build a SACK since the timer
2441 				 * is pending, we got our first packet OR
2442 				 * there are gaps or duplicates.
2443 				 */
2444 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2445 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2446 			}
2447 		} else {
2448 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2449 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2450 				    stcb->sctp_ep, stcb, NULL);
2451 			}
2452 		}
2453 	}
2454 }
2455 
2456 void
2457 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2458 {
2459 	struct sctp_tmit_chunk *chk;
2460 	uint32_t tsize, pd_point;
2461 	uint16_t nxt_todel;
2462 
2463 	if (asoc->fragmented_delivery_inprogress) {
2464 		sctp_service_reassembly(stcb, asoc);
2465 	}
2466 	/* Can we proceed further, i.e. the PD-API is complete */
2467 	if (asoc->fragmented_delivery_inprogress) {
2468 		/* no */
2469 		return;
2470 	}
2471 	/*
2472 	 * Now is there some other chunk I can deliver from the reassembly
2473 	 * queue.
2474 	 */
2475 doit_again:
2476 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2477 	if (chk == NULL) {
2478 		asoc->size_on_reasm_queue = 0;
2479 		asoc->cnt_on_reasm_queue = 0;
2480 		return;
2481 	}
2482 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2483 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2484 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2485 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2486 		/*
2487 		 * Yep the first one is here. We setup to start reception,
2488 		 * by backing down the TSN just in case we can't deliver.
2489 		 */
2490 
2491 		/*
2492 		 * Before we start though either all of the message should
2493 		 * be here or the socket buffer max or nothing on the
2494 		 * delivery queue and something can be delivered.
2495 		 */
2496 		if (stcb->sctp_socket) {
2497 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2498 			    stcb->sctp_ep->partial_delivery_point);
2499 		} else {
2500 			pd_point = stcb->sctp_ep->partial_delivery_point;
2501 		}
2502 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2503 			asoc->fragmented_delivery_inprogress = 1;
2504 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2505 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2506 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2507 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2508 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2509 			sctp_service_reassembly(stcb, asoc);
2510 			if (asoc->fragmented_delivery_inprogress == 0) {
2511 				goto doit_again;
2512 			}
2513 		}
2514 	}
2515 }
2516 
2517 int
2518 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2519     struct sockaddr *src, struct sockaddr *dst,
2520     struct sctphdr *sh, struct sctp_inpcb *inp,
2521     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2522     uint8_t use_mflowid, uint32_t mflowid,
2523     uint32_t vrf_id, uint16_t port)
2524 {
2525 	struct sctp_data_chunk *ch, chunk_buf;
2526 	struct sctp_association *asoc;
2527 	int num_chunks = 0;	/* number of control chunks processed */
2528 	int stop_proc = 0;
2529 	int chk_length, break_flag, last_chunk;
2530 	int abort_flag = 0, was_a_gap;
2531 	struct mbuf *m;
2532 	uint32_t highest_tsn;
2533 
2534 	/* set the rwnd */
2535 	sctp_set_rwnd(stcb, &stcb->asoc);
2536 
2537 	m = *mm;
2538 	SCTP_TCB_LOCK_ASSERT(stcb);
2539 	asoc = &stcb->asoc;
2540 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2541 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2542 	} else {
2543 		highest_tsn = asoc->highest_tsn_inside_map;
2544 	}
2545 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2546 	/*
2547 	 * setup where we got the last DATA packet from for any SACK that
2548 	 * may need to go out. Don't bump the net. This is done ONLY when a
2549 	 * chunk is assigned.
2550 	 */
2551 	asoc->last_data_chunk_from = net;
2552 
2553 	/*-
2554 	 * Now before we proceed we must figure out if this is a wasted
2555 	 * cluster... i.e. it is a small packet sent in and yet the driver
2556 	 * underneath allocated a full cluster for it. If so we must copy it
2557 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2558 	 * with cluster starvation. Note for __Panda__ we don't do this
2559 	 * since it has clusters all the way down to 64 bytes.
2560 	 */
2561 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2562 		/* we only handle mbufs that are singletons.. not chains */
2563 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2564 		if (m) {
2565 			/* ok lets see if we can copy the data up */
2566 			caddr_t *from, *to;
2567 
2568 			/* get the pointers and copy */
2569 			to = mtod(m, caddr_t *);
2570 			from = mtod((*mm), caddr_t *);
2571 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2572 			/* copy the length and free up the old */
2573 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2574 			sctp_m_freem(*mm);
2575 			/* sucess, back copy */
2576 			*mm = m;
2577 		} else {
2578 			/* We are in trouble in the mbuf world .. yikes */
2579 			m = *mm;
2580 		}
2581 	}
2582 	/* get pointer to the first chunk header */
2583 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2584 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2585 	if (ch == NULL) {
2586 		return (1);
2587 	}
2588 	/*
2589 	 * process all DATA chunks...
2590 	 */
2591 	*high_tsn = asoc->cumulative_tsn;
2592 	break_flag = 0;
2593 	asoc->data_pkts_seen++;
2594 	while (stop_proc == 0) {
2595 		/* validate chunk length */
2596 		chk_length = ntohs(ch->ch.chunk_length);
2597 		if (length - *offset < chk_length) {
2598 			/* all done, mutulated chunk */
2599 			stop_proc = 1;
2600 			continue;
2601 		}
2602 		if (ch->ch.chunk_type == SCTP_DATA) {
2603 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2604 				/*
2605 				 * Need to send an abort since we had a
2606 				 * invalid data chunk.
2607 				 */
2608 				struct mbuf *op_err;
2609 
2610 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2611 				    0, M_DONTWAIT, 1, MT_DATA);
2612 
2613 				if (op_err) {
2614 					struct sctp_paramhdr *ph;
2615 					uint32_t *ippp;
2616 
2617 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2618 					    (2 * sizeof(uint32_t));
2619 					ph = mtod(op_err, struct sctp_paramhdr *);
2620 					ph->param_type =
2621 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2622 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2623 					ippp = (uint32_t *) (ph + 1);
2624 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2625 					ippp++;
2626 					*ippp = asoc->cumulative_tsn;
2627 
2628 				}
2629 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2630 				sctp_abort_association(inp, stcb, m, iphlen,
2631 				    src, dst, sh, op_err,
2632 				    use_mflowid, mflowid,
2633 				    vrf_id, port);
2634 				return (2);
2635 			}
2636 #ifdef SCTP_AUDITING_ENABLED
2637 			sctp_audit_log(0xB1, 0);
2638 #endif
2639 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2640 				last_chunk = 1;
2641 			} else {
2642 				last_chunk = 0;
2643 			}
2644 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2645 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2646 			    last_chunk)) {
2647 				num_chunks++;
2648 			}
2649 			if (abort_flag)
2650 				return (2);
2651 
2652 			if (break_flag) {
2653 				/*
2654 				 * Set because of out of rwnd space and no
2655 				 * drop rep space left.
2656 				 */
2657 				stop_proc = 1;
2658 				continue;
2659 			}
2660 		} else {
2661 			/* not a data chunk in the data region */
2662 			switch (ch->ch.chunk_type) {
2663 			case SCTP_INITIATION:
2664 			case SCTP_INITIATION_ACK:
2665 			case SCTP_SELECTIVE_ACK:
2666 			case SCTP_NR_SELECTIVE_ACK:
2667 			case SCTP_HEARTBEAT_REQUEST:
2668 			case SCTP_HEARTBEAT_ACK:
2669 			case SCTP_ABORT_ASSOCIATION:
2670 			case SCTP_SHUTDOWN:
2671 			case SCTP_SHUTDOWN_ACK:
2672 			case SCTP_OPERATION_ERROR:
2673 			case SCTP_COOKIE_ECHO:
2674 			case SCTP_COOKIE_ACK:
2675 			case SCTP_ECN_ECHO:
2676 			case SCTP_ECN_CWR:
2677 			case SCTP_SHUTDOWN_COMPLETE:
2678 			case SCTP_AUTHENTICATION:
2679 			case SCTP_ASCONF_ACK:
2680 			case SCTP_PACKET_DROPPED:
2681 			case SCTP_STREAM_RESET:
2682 			case SCTP_FORWARD_CUM_TSN:
2683 			case SCTP_ASCONF:
2684 				/*
2685 				 * Now, what do we do with KNOWN chunks that
2686 				 * are NOT in the right place?
2687 				 *
2688 				 * For now, I do nothing but ignore them. We
2689 				 * may later want to add sysctl stuff to
2690 				 * switch out and do either an ABORT() or
2691 				 * possibly process them.
2692 				 */
2693 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2694 					struct mbuf *op_err;
2695 
2696 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2697 					sctp_abort_association(inp, stcb,
2698 					    m, iphlen,
2699 					    src, dst,
2700 					    sh, op_err,
2701 					    use_mflowid, mflowid,
2702 					    vrf_id, port);
2703 					return (2);
2704 				}
2705 				break;
2706 			default:
2707 				/* unknown chunk type, use bit rules */
2708 				if (ch->ch.chunk_type & 0x40) {
2709 					/* Add a error report to the queue */
2710 					struct mbuf *merr;
2711 					struct sctp_paramhdr *phd;
2712 
2713 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2714 					if (merr) {
2715 						phd = mtod(merr, struct sctp_paramhdr *);
2716 						/*
2717 						 * We cheat and use param
2718 						 * type since we did not
2719 						 * bother to define a error
2720 						 * cause struct. They are
2721 						 * the same basic format
2722 						 * with different names.
2723 						 */
2724 						phd->param_type =
2725 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2726 						phd->param_length =
2727 						    htons(chk_length + sizeof(*phd));
2728 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2729 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2730 						if (SCTP_BUF_NEXT(merr)) {
2731 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2732 								sctp_m_freem(merr);
2733 							} else {
2734 								sctp_queue_op_err(stcb, merr);
2735 							}
2736 						} else {
2737 							sctp_m_freem(merr);
2738 						}
2739 					}
2740 				}
2741 				if ((ch->ch.chunk_type & 0x80) == 0) {
2742 					/* discard the rest of this packet */
2743 					stop_proc = 1;
2744 				}	/* else skip this bad chunk and
2745 					 * continue... */
2746 				break;
2747 			}	/* switch of chunk type */
2748 		}
2749 		*offset += SCTP_SIZE32(chk_length);
2750 		if ((*offset >= length) || stop_proc) {
2751 			/* no more data left in the mbuf chain */
2752 			stop_proc = 1;
2753 			continue;
2754 		}
2755 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2756 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2757 		if (ch == NULL) {
2758 			*offset = length;
2759 			stop_proc = 1;
2760 			continue;
2761 		}
2762 	}
2763 	if (break_flag) {
2764 		/*
2765 		 * we need to report rwnd overrun drops.
2766 		 */
2767 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2768 	}
2769 	if (num_chunks) {
2770 		/*
2771 		 * Did we get data, if so update the time for auto-close and
2772 		 * give peer credit for being alive.
2773 		 */
2774 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2775 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2776 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2777 			    stcb->asoc.overall_error_count,
2778 			    0,
2779 			    SCTP_FROM_SCTP_INDATA,
2780 			    __LINE__);
2781 		}
2782 		stcb->asoc.overall_error_count = 0;
2783 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2784 	}
2785 	/* now service all of the reassm queue if needed */
2786 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2787 		sctp_service_queues(stcb, asoc);
2788 
2789 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2790 		/* Assure that we ack right away */
2791 		stcb->asoc.send_sack = 1;
2792 	}
2793 	/* Start a sack timer or QUEUE a SACK for sending */
2794 	sctp_sack_check(stcb, was_a_gap);
2795 	return (0);
2796 }
2797 
2798 static int
2799 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2800     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2801     int *num_frs,
2802     uint32_t * biggest_newly_acked_tsn,
2803     uint32_t * this_sack_lowest_newack,
2804     int *rto_ok)
2805 {
2806 	struct sctp_tmit_chunk *tp1;
2807 	unsigned int theTSN;
2808 	int j, wake_him = 0, circled = 0;
2809 
2810 	/* Recover the tp1 we last saw */
2811 	tp1 = *p_tp1;
2812 	if (tp1 == NULL) {
2813 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2814 	}
2815 	for (j = frag_strt; j <= frag_end; j++) {
2816 		theTSN = j + last_tsn;
2817 		while (tp1) {
2818 			if (tp1->rec.data.doing_fast_retransmit)
2819 				(*num_frs) += 1;
2820 
2821 			/*-
2822 			 * CMT: CUCv2 algorithm. For each TSN being
2823 			 * processed from the sent queue, track the
2824 			 * next expected pseudo-cumack, or
2825 			 * rtx_pseudo_cumack, if required. Separate
2826 			 * cumack trackers for first transmissions,
2827 			 * and retransmissions.
2828 			 */
2829 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2830 			    (tp1->snd_count == 1)) {
2831 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2832 				tp1->whoTo->find_pseudo_cumack = 0;
2833 			}
2834 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2835 			    (tp1->snd_count > 1)) {
2836 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2837 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2838 			}
2839 			if (tp1->rec.data.TSN_seq == theTSN) {
2840 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2841 					/*-
2842 					 * must be held until
2843 					 * cum-ack passes
2844 					 */
2845 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2846 						/*-
2847 						 * If it is less than RESEND, it is
2848 						 * now no-longer in flight.
2849 						 * Higher values may already be set
2850 						 * via previous Gap Ack Blocks...
2851 						 * i.e. ACKED or RESEND.
2852 						 */
2853 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2854 						    *biggest_newly_acked_tsn)) {
2855 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2856 						}
2857 						/*-
2858 						 * CMT: SFR algo (and HTNA) - set
2859 						 * saw_newack to 1 for dest being
2860 						 * newly acked. update
2861 						 * this_sack_highest_newack if
2862 						 * appropriate.
2863 						 */
2864 						if (tp1->rec.data.chunk_was_revoked == 0)
2865 							tp1->whoTo->saw_newack = 1;
2866 
2867 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2868 						    tp1->whoTo->this_sack_highest_newack)) {
2869 							tp1->whoTo->this_sack_highest_newack =
2870 							    tp1->rec.data.TSN_seq;
2871 						}
2872 						/*-
2873 						 * CMT DAC algo: also update
2874 						 * this_sack_lowest_newack
2875 						 */
2876 						if (*this_sack_lowest_newack == 0) {
2877 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2878 								sctp_log_sack(*this_sack_lowest_newack,
2879 								    last_tsn,
2880 								    tp1->rec.data.TSN_seq,
2881 								    0,
2882 								    0,
2883 								    SCTP_LOG_TSN_ACKED);
2884 							}
2885 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2886 						}
2887 						/*-
2888 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2889 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2890 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2891 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2892 						 * Separate pseudo_cumack trackers for first transmissions and
2893 						 * retransmissions.
2894 						 */
2895 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2896 							if (tp1->rec.data.chunk_was_revoked == 0) {
2897 								tp1->whoTo->new_pseudo_cumack = 1;
2898 							}
2899 							tp1->whoTo->find_pseudo_cumack = 1;
2900 						}
2901 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2902 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2903 						}
2904 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2905 							if (tp1->rec.data.chunk_was_revoked == 0) {
2906 								tp1->whoTo->new_pseudo_cumack = 1;
2907 							}
2908 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2909 						}
2910 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2911 							sctp_log_sack(*biggest_newly_acked_tsn,
2912 							    last_tsn,
2913 							    tp1->rec.data.TSN_seq,
2914 							    frag_strt,
2915 							    frag_end,
2916 							    SCTP_LOG_TSN_ACKED);
2917 						}
2918 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2919 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2920 							    tp1->whoTo->flight_size,
2921 							    tp1->book_size,
2922 							    (uintptr_t) tp1->whoTo,
2923 							    tp1->rec.data.TSN_seq);
2924 						}
2925 						sctp_flight_size_decrease(tp1);
2926 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2927 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2928 							    tp1);
2929 						}
2930 						sctp_total_flight_decrease(stcb, tp1);
2931 
2932 						tp1->whoTo->net_ack += tp1->send_size;
2933 						if (tp1->snd_count < 2) {
2934 							/*-
2935 							 * True non-retransmited chunk
2936 							 */
2937 							tp1->whoTo->net_ack2 += tp1->send_size;
2938 
2939 							/*-
2940 							 * update RTO too ?
2941 							 */
2942 							if (tp1->do_rtt) {
2943 								if (*rto_ok) {
2944 									tp1->whoTo->RTO =
2945 									    sctp_calculate_rto(stcb,
2946 									    &stcb->asoc,
2947 									    tp1->whoTo,
2948 									    &tp1->sent_rcv_time,
2949 									    sctp_align_safe_nocopy,
2950 									    SCTP_RTT_FROM_DATA);
2951 									*rto_ok = 0;
2952 								}
2953 								if (tp1->whoTo->rto_needed == 0) {
2954 									tp1->whoTo->rto_needed = 1;
2955 								}
2956 								tp1->do_rtt = 0;
2957 							}
2958 						}
2959 					}
2960 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2961 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2962 						    stcb->asoc.this_sack_highest_gap)) {
2963 							stcb->asoc.this_sack_highest_gap =
2964 							    tp1->rec.data.TSN_seq;
2965 						}
2966 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2967 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2968 #ifdef SCTP_AUDITING_ENABLED
2969 							sctp_audit_log(0xB2,
2970 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2971 #endif
2972 						}
2973 					}
2974 					/*-
2975 					 * All chunks NOT UNSENT fall through here and are marked
2976 					 * (leave PR-SCTP ones that are to skip alone though)
2977 					 */
2978 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2979 						tp1->sent = SCTP_DATAGRAM_MARKED;
2980 
2981 					if (tp1->rec.data.chunk_was_revoked) {
2982 						/* deflate the cwnd */
2983 						tp1->whoTo->cwnd -= tp1->book_size;
2984 						tp1->rec.data.chunk_was_revoked = 0;
2985 					}
2986 					/* NR Sack code here */
2987 					if (nr_sacking) {
2988 						if (tp1->data) {
2989 							/*
2990 							 * sa_ignore
2991 							 * NO_NULL_CHK
2992 							 */
2993 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2994 							sctp_m_freem(tp1->data);
2995 							tp1->data = NULL;
2996 						}
2997 						wake_him++;
2998 					}
2999 				}
3000 				break;
3001 			}	/* if (tp1->TSN_seq == theTSN) */
3002 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3003 				break;
3004 			}
3005 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3006 			if ((tp1 == NULL) && (circled == 0)) {
3007 				circled++;
3008 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3009 			}
3010 		}		/* end while (tp1) */
3011 		if (tp1 == NULL) {
3012 			circled = 0;
3013 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3014 		}
3015 		/* In case the fragments were not in order we must reset */
3016 	}			/* end for (j = fragStart */
3017 	*p_tp1 = tp1;
3018 	return (wake_him);	/* Return value only used for nr-sack */
3019 }
3020 
3021 
3022 static int
3023 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3024     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3025     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3026     int num_seg, int num_nr_seg, int *rto_ok)
3027 {
3028 	struct sctp_gap_ack_block *frag, block;
3029 	struct sctp_tmit_chunk *tp1;
3030 	int i;
3031 	int num_frs = 0;
3032 	int chunk_freed;
3033 	int non_revocable;
3034 	uint16_t frag_strt, frag_end, prev_frag_end;
3035 
3036 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3037 	prev_frag_end = 0;
3038 	chunk_freed = 0;
3039 
3040 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3041 		if (i == num_seg) {
3042 			prev_frag_end = 0;
3043 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3044 		}
3045 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3046 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3047 		*offset += sizeof(block);
3048 		if (frag == NULL) {
3049 			return (chunk_freed);
3050 		}
3051 		frag_strt = ntohs(frag->start);
3052 		frag_end = ntohs(frag->end);
3053 
3054 		if (frag_strt > frag_end) {
3055 			/* This gap report is malformed, skip it. */
3056 			continue;
3057 		}
3058 		if (frag_strt <= prev_frag_end) {
3059 			/* This gap report is not in order, so restart. */
3060 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3061 		}
3062 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3063 			*biggest_tsn_acked = last_tsn + frag_end;
3064 		}
3065 		if (i < num_seg) {
3066 			non_revocable = 0;
3067 		} else {
3068 			non_revocable = 1;
3069 		}
3070 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3071 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3072 		    this_sack_lowest_newack, rto_ok)) {
3073 			chunk_freed = 1;
3074 		}
3075 		prev_frag_end = frag_end;
3076 	}
3077 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3078 		if (num_frs)
3079 			sctp_log_fr(*biggest_tsn_acked,
3080 			    *biggest_newly_acked_tsn,
3081 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3082 	}
3083 	return (chunk_freed);
3084 }
3085 
3086 static void
3087 sctp_check_for_revoked(struct sctp_tcb *stcb,
3088     struct sctp_association *asoc, uint32_t cumack,
3089     uint32_t biggest_tsn_acked)
3090 {
3091 	struct sctp_tmit_chunk *tp1;
3092 	int tot_revoked = 0;
3093 
3094 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3095 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3096 			/*
3097 			 * ok this guy is either ACK or MARKED. If it is
3098 			 * ACKED it has been previously acked but not this
3099 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3100 			 * again.
3101 			 */
3102 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3103 				break;
3104 			}
3105 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3106 				/* it has been revoked */
3107 				tp1->sent = SCTP_DATAGRAM_SENT;
3108 				tp1->rec.data.chunk_was_revoked = 1;
3109 				/*
3110 				 * We must add this stuff back in to assure
3111 				 * timers and such get started.
3112 				 */
3113 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3114 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3115 					    tp1->whoTo->flight_size,
3116 					    tp1->book_size,
3117 					    (uintptr_t) tp1->whoTo,
3118 					    tp1->rec.data.TSN_seq);
3119 				}
3120 				sctp_flight_size_increase(tp1);
3121 				sctp_total_flight_increase(stcb, tp1);
3122 				/*
3123 				 * We inflate the cwnd to compensate for our
3124 				 * artificial inflation of the flight_size.
3125 				 */
3126 				tp1->whoTo->cwnd += tp1->book_size;
3127 				tot_revoked++;
3128 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3129 					sctp_log_sack(asoc->last_acked_seq,
3130 					    cumack,
3131 					    tp1->rec.data.TSN_seq,
3132 					    0,
3133 					    0,
3134 					    SCTP_LOG_TSN_REVOKED);
3135 				}
3136 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3137 				/* it has been re-acked in this SACK */
3138 				tp1->sent = SCTP_DATAGRAM_ACKED;
3139 			}
3140 		}
3141 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3142 			break;
3143 	}
3144 }
3145 
3146 
3147 static void
3148 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3149     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3150 {
3151 	struct sctp_tmit_chunk *tp1;
3152 	int strike_flag = 0;
3153 	struct timeval now;
3154 	int tot_retrans = 0;
3155 	uint32_t sending_seq;
3156 	struct sctp_nets *net;
3157 	int num_dests_sacked = 0;
3158 
3159 	/*
3160 	 * select the sending_seq, this is either the next thing ready to be
3161 	 * sent but not transmitted, OR, the next seq we assign.
3162 	 */
3163 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3164 	if (tp1 == NULL) {
3165 		sending_seq = asoc->sending_seq;
3166 	} else {
3167 		sending_seq = tp1->rec.data.TSN_seq;
3168 	}
3169 
3170 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3171 	if ((asoc->sctp_cmt_on_off > 0) &&
3172 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3173 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3174 			if (net->saw_newack)
3175 				num_dests_sacked++;
3176 		}
3177 	}
3178 	if (stcb->asoc.peer_supports_prsctp) {
3179 		(void)SCTP_GETTIME_TIMEVAL(&now);
3180 	}
3181 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3182 		strike_flag = 0;
3183 		if (tp1->no_fr_allowed) {
3184 			/* this one had a timeout or something */
3185 			continue;
3186 		}
3187 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3188 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3189 				sctp_log_fr(biggest_tsn_newly_acked,
3190 				    tp1->rec.data.TSN_seq,
3191 				    tp1->sent,
3192 				    SCTP_FR_LOG_CHECK_STRIKE);
3193 		}
3194 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3195 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3196 			/* done */
3197 			break;
3198 		}
3199 		if (stcb->asoc.peer_supports_prsctp) {
3200 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3201 				/* Is it expired? */
3202 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3203 					/* Yes so drop it */
3204 					if (tp1->data != NULL) {
3205 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3206 						    SCTP_SO_NOT_LOCKED);
3207 					}
3208 					continue;
3209 				}
3210 			}
3211 		}
3212 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3213 			/* we are beyond the tsn in the sack  */
3214 			break;
3215 		}
3216 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3217 			/* either a RESEND, ACKED, or MARKED */
3218 			/* skip */
3219 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3220 				/* Continue strikin FWD-TSN chunks */
3221 				tp1->rec.data.fwd_tsn_cnt++;
3222 			}
3223 			continue;
3224 		}
3225 		/*
3226 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3227 		 */
3228 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3229 			/*
3230 			 * No new acks were receieved for data sent to this
3231 			 * dest. Therefore, according to the SFR algo for
3232 			 * CMT, no data sent to this dest can be marked for
3233 			 * FR using this SACK.
3234 			 */
3235 			continue;
3236 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3237 		    tp1->whoTo->this_sack_highest_newack)) {
3238 			/*
3239 			 * CMT: New acks were receieved for data sent to
3240 			 * this dest. But no new acks were seen for data
3241 			 * sent after tp1. Therefore, according to the SFR
3242 			 * algo for CMT, tp1 cannot be marked for FR using
3243 			 * this SACK. This step covers part of the DAC algo
3244 			 * and the HTNA algo as well.
3245 			 */
3246 			continue;
3247 		}
3248 		/*
3249 		 * Here we check to see if we were have already done a FR
3250 		 * and if so we see if the biggest TSN we saw in the sack is
3251 		 * smaller than the recovery point. If so we don't strike
3252 		 * the tsn... otherwise we CAN strike the TSN.
3253 		 */
3254 		/*
3255 		 * @@@ JRI: Check for CMT if (accum_moved &&
3256 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3257 		 * 0)) {
3258 		 */
3259 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3260 			/*
3261 			 * Strike the TSN if in fast-recovery and cum-ack
3262 			 * moved.
3263 			 */
3264 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3265 				sctp_log_fr(biggest_tsn_newly_acked,
3266 				    tp1->rec.data.TSN_seq,
3267 				    tp1->sent,
3268 				    SCTP_FR_LOG_STRIKE_CHUNK);
3269 			}
3270 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3271 				tp1->sent++;
3272 			}
3273 			if ((asoc->sctp_cmt_on_off > 0) &&
3274 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3275 				/*
3276 				 * CMT DAC algorithm: If SACK flag is set to
3277 				 * 0, then lowest_newack test will not pass
3278 				 * because it would have been set to the
3279 				 * cumack earlier. If not already to be
3280 				 * rtx'd, If not a mixed sack and if tp1 is
3281 				 * not between two sacked TSNs, then mark by
3282 				 * one more. NOTE that we are marking by one
3283 				 * additional time since the SACK DAC flag
3284 				 * indicates that two packets have been
3285 				 * received after this missing TSN.
3286 				 */
3287 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3288 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3289 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3290 						sctp_log_fr(16 + num_dests_sacked,
3291 						    tp1->rec.data.TSN_seq,
3292 						    tp1->sent,
3293 						    SCTP_FR_LOG_STRIKE_CHUNK);
3294 					}
3295 					tp1->sent++;
3296 				}
3297 			}
3298 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3299 		    (asoc->sctp_cmt_on_off == 0)) {
3300 			/*
3301 			 * For those that have done a FR we must take
3302 			 * special consideration if we strike. I.e the
3303 			 * biggest_newly_acked must be higher than the
3304 			 * sending_seq at the time we did the FR.
3305 			 */
3306 			if (
3307 #ifdef SCTP_FR_TO_ALTERNATE
3308 			/*
3309 			 * If FR's go to new networks, then we must only do
3310 			 * this for singly homed asoc's. However if the FR's
3311 			 * go to the same network (Armando's work) then its
3312 			 * ok to FR multiple times.
3313 			 */
3314 			    (asoc->numnets < 2)
3315 #else
3316 			    (1)
3317 #endif
3318 			    ) {
3319 
3320 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3321 				    tp1->rec.data.fast_retran_tsn)) {
3322 					/*
3323 					 * Strike the TSN, since this ack is
3324 					 * beyond where things were when we
3325 					 * did a FR.
3326 					 */
3327 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3328 						sctp_log_fr(biggest_tsn_newly_acked,
3329 						    tp1->rec.data.TSN_seq,
3330 						    tp1->sent,
3331 						    SCTP_FR_LOG_STRIKE_CHUNK);
3332 					}
3333 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3334 						tp1->sent++;
3335 					}
3336 					strike_flag = 1;
3337 					if ((asoc->sctp_cmt_on_off > 0) &&
3338 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3339 						/*
3340 						 * CMT DAC algorithm: If
3341 						 * SACK flag is set to 0,
3342 						 * then lowest_newack test
3343 						 * will not pass because it
3344 						 * would have been set to
3345 						 * the cumack earlier. If
3346 						 * not already to be rtx'd,
3347 						 * If not a mixed sack and
3348 						 * if tp1 is not between two
3349 						 * sacked TSNs, then mark by
3350 						 * one more. NOTE that we
3351 						 * are marking by one
3352 						 * additional time since the
3353 						 * SACK DAC flag indicates
3354 						 * that two packets have
3355 						 * been received after this
3356 						 * missing TSN.
3357 						 */
3358 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3359 						    (num_dests_sacked == 1) &&
3360 						    SCTP_TSN_GT(this_sack_lowest_newack,
3361 						    tp1->rec.data.TSN_seq)) {
3362 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3363 								sctp_log_fr(32 + num_dests_sacked,
3364 								    tp1->rec.data.TSN_seq,
3365 								    tp1->sent,
3366 								    SCTP_FR_LOG_STRIKE_CHUNK);
3367 							}
3368 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3369 								tp1->sent++;
3370 							}
3371 						}
3372 					}
3373 				}
3374 			}
3375 			/*
3376 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3377 			 * algo covers HTNA.
3378 			 */
3379 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3380 		    biggest_tsn_newly_acked)) {
3381 			/*
3382 			 * We don't strike these: This is the  HTNA
3383 			 * algorithm i.e. we don't strike If our TSN is
3384 			 * larger than the Highest TSN Newly Acked.
3385 			 */
3386 			;
3387 		} else {
3388 			/* Strike the TSN */
3389 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3390 				sctp_log_fr(biggest_tsn_newly_acked,
3391 				    tp1->rec.data.TSN_seq,
3392 				    tp1->sent,
3393 				    SCTP_FR_LOG_STRIKE_CHUNK);
3394 			}
3395 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3396 				tp1->sent++;
3397 			}
3398 			if ((asoc->sctp_cmt_on_off > 0) &&
3399 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3400 				/*
3401 				 * CMT DAC algorithm: If SACK flag is set to
3402 				 * 0, then lowest_newack test will not pass
3403 				 * because it would have been set to the
3404 				 * cumack earlier. If not already to be
3405 				 * rtx'd, If not a mixed sack and if tp1 is
3406 				 * not between two sacked TSNs, then mark by
3407 				 * one more. NOTE that we are marking by one
3408 				 * additional time since the SACK DAC flag
3409 				 * indicates that two packets have been
3410 				 * received after this missing TSN.
3411 				 */
3412 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3413 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3414 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3415 						sctp_log_fr(48 + num_dests_sacked,
3416 						    tp1->rec.data.TSN_seq,
3417 						    tp1->sent,
3418 						    SCTP_FR_LOG_STRIKE_CHUNK);
3419 					}
3420 					tp1->sent++;
3421 				}
3422 			}
3423 		}
3424 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3425 			struct sctp_nets *alt;
3426 
3427 			/* fix counts and things */
3428 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3429 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3430 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3431 				    tp1->book_size,
3432 				    (uintptr_t) tp1->whoTo,
3433 				    tp1->rec.data.TSN_seq);
3434 			}
3435 			if (tp1->whoTo) {
3436 				tp1->whoTo->net_ack++;
3437 				sctp_flight_size_decrease(tp1);
3438 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3439 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3440 					    tp1);
3441 				}
3442 			}
3443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3444 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3445 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3446 			}
3447 			/* add back to the rwnd */
3448 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3449 
3450 			/* remove from the total flight */
3451 			sctp_total_flight_decrease(stcb, tp1);
3452 
3453 			if ((stcb->asoc.peer_supports_prsctp) &&
3454 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3455 				/*
3456 				 * Has it been retransmitted tv_sec times? -
3457 				 * we store the retran count there.
3458 				 */
3459 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3460 					/* Yes, so drop it */
3461 					if (tp1->data != NULL) {
3462 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3463 						    SCTP_SO_NOT_LOCKED);
3464 					}
3465 					/* Make sure to flag we had a FR */
3466 					tp1->whoTo->net_ack++;
3467 					continue;
3468 				}
3469 			}
3470 			/*
3471 			 * SCTP_PRINTF("OK, we are now ready to FR this
3472 			 * guy\n");
3473 			 */
3474 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3475 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3476 				    0, SCTP_FR_MARKED);
3477 			}
3478 			if (strike_flag) {
3479 				/* This is a subsequent FR */
3480 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3481 			}
3482 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3483 			if (asoc->sctp_cmt_on_off > 0) {
3484 				/*
3485 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3486 				 * If CMT is being used, then pick dest with
3487 				 * largest ssthresh for any retransmission.
3488 				 */
3489 				tp1->no_fr_allowed = 1;
3490 				alt = tp1->whoTo;
3491 				/* sa_ignore NO_NULL_CHK */
3492 				if (asoc->sctp_cmt_pf > 0) {
3493 					/*
3494 					 * JRS 5/18/07 - If CMT PF is on,
3495 					 * use the PF version of
3496 					 * find_alt_net()
3497 					 */
3498 					alt = sctp_find_alternate_net(stcb, alt, 2);
3499 				} else {
3500 					/*
3501 					 * JRS 5/18/07 - If only CMT is on,
3502 					 * use the CMT version of
3503 					 * find_alt_net()
3504 					 */
3505 					/* sa_ignore NO_NULL_CHK */
3506 					alt = sctp_find_alternate_net(stcb, alt, 1);
3507 				}
3508 				if (alt == NULL) {
3509 					alt = tp1->whoTo;
3510 				}
3511 				/*
3512 				 * CUCv2: If a different dest is picked for
3513 				 * the retransmission, then new
3514 				 * (rtx-)pseudo_cumack needs to be tracked
3515 				 * for orig dest. Let CUCv2 track new (rtx-)
3516 				 * pseudo-cumack always.
3517 				 */
3518 				if (tp1->whoTo) {
3519 					tp1->whoTo->find_pseudo_cumack = 1;
3520 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3521 				}
3522 			} else {/* CMT is OFF */
3523 
3524 #ifdef SCTP_FR_TO_ALTERNATE
3525 				/* Can we find an alternate? */
3526 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3527 #else
3528 				/*
3529 				 * default behavior is to NOT retransmit
3530 				 * FR's to an alternate. Armando Caro's
3531 				 * paper details why.
3532 				 */
3533 				alt = tp1->whoTo;
3534 #endif
3535 			}
3536 
3537 			tp1->rec.data.doing_fast_retransmit = 1;
3538 			tot_retrans++;
3539 			/* mark the sending seq for possible subsequent FR's */
3540 			/*
3541 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3542 			 * (uint32_t)tpi->rec.data.TSN_seq);
3543 			 */
3544 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3545 				/*
3546 				 * If the queue of send is empty then its
3547 				 * the next sequence number that will be
3548 				 * assigned so we subtract one from this to
3549 				 * get the one we last sent.
3550 				 */
3551 				tp1->rec.data.fast_retran_tsn = sending_seq;
3552 			} else {
3553 				/*
3554 				 * If there are chunks on the send queue
3555 				 * (unsent data that has made it from the
3556 				 * stream queues but not out the door, we
3557 				 * take the first one (which will have the
3558 				 * lowest TSN) and subtract one to get the
3559 				 * one we last sent.
3560 				 */
3561 				struct sctp_tmit_chunk *ttt;
3562 
3563 				ttt = TAILQ_FIRST(&asoc->send_queue);
3564 				tp1->rec.data.fast_retran_tsn =
3565 				    ttt->rec.data.TSN_seq;
3566 			}
3567 
3568 			if (tp1->do_rtt) {
3569 				/*
3570 				 * this guy had a RTO calculation pending on
3571 				 * it, cancel it
3572 				 */
3573 				if ((tp1->whoTo != NULL) &&
3574 				    (tp1->whoTo->rto_needed == 0)) {
3575 					tp1->whoTo->rto_needed = 1;
3576 				}
3577 				tp1->do_rtt = 0;
3578 			}
3579 			if (alt != tp1->whoTo) {
3580 				/* yes, there is an alternate. */
3581 				sctp_free_remote_addr(tp1->whoTo);
3582 				/* sa_ignore FREED_MEMORY */
3583 				tp1->whoTo = alt;
3584 				atomic_add_int(&alt->ref_count, 1);
3585 			}
3586 		}
3587 	}
3588 }
3589 
3590 struct sctp_tmit_chunk *
3591 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3592     struct sctp_association *asoc)
3593 {
3594 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3595 	struct timeval now;
3596 	int now_filled = 0;
3597 
3598 	if (asoc->peer_supports_prsctp == 0) {
3599 		return (NULL);
3600 	}
3601 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3602 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3603 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3604 			/* no chance to advance, out of here */
3605 			break;
3606 		}
3607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3608 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3609 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3610 				    asoc->advanced_peer_ack_point,
3611 				    tp1->rec.data.TSN_seq, 0, 0);
3612 			}
3613 		}
3614 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3615 			/*
3616 			 * We can't fwd-tsn past any that are reliable aka
3617 			 * retransmitted until the asoc fails.
3618 			 */
3619 			break;
3620 		}
3621 		if (!now_filled) {
3622 			(void)SCTP_GETTIME_TIMEVAL(&now);
3623 			now_filled = 1;
3624 		}
3625 		/*
3626 		 * now we got a chunk which is marked for another
3627 		 * retransmission to a PR-stream but has run out its chances
3628 		 * already maybe OR has been marked to skip now. Can we skip
3629 		 * it if its a resend?
3630 		 */
3631 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3632 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3633 			/*
3634 			 * Now is this one marked for resend and its time is
3635 			 * now up?
3636 			 */
3637 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3638 				/* Yes so drop it */
3639 				if (tp1->data) {
3640 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3641 					    1, SCTP_SO_NOT_LOCKED);
3642 				}
3643 			} else {
3644 				/*
3645 				 * No, we are done when hit one for resend
3646 				 * whos time as not expired.
3647 				 */
3648 				break;
3649 			}
3650 		}
3651 		/*
3652 		 * Ok now if this chunk is marked to drop it we can clean up
3653 		 * the chunk, advance our peer ack point and we can check
3654 		 * the next chunk.
3655 		 */
3656 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3657 			/* advance PeerAckPoint goes forward */
3658 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3659 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3660 				a_adv = tp1;
3661 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3662 				/* No update but we do save the chk */
3663 				a_adv = tp1;
3664 			}
3665 		} else {
3666 			/*
3667 			 * If it is still in RESEND we can advance no
3668 			 * further
3669 			 */
3670 			break;
3671 		}
3672 	}
3673 	return (a_adv);
3674 }
3675 
3676 static int
3677 sctp_fs_audit(struct sctp_association *asoc)
3678 {
3679 	struct sctp_tmit_chunk *chk;
3680 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3681 	int entry_flight, entry_cnt, ret;
3682 
3683 	entry_flight = asoc->total_flight;
3684 	entry_cnt = asoc->total_flight_count;
3685 	ret = 0;
3686 
3687 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3688 		return (0);
3689 
3690 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3691 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3692 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3693 			    chk->rec.data.TSN_seq,
3694 			    chk->send_size,
3695 			    chk->snd_count);
3696 			inflight++;
3697 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3698 			resend++;
3699 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3700 			inbetween++;
3701 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3702 			above++;
3703 		} else {
3704 			acked++;
3705 		}
3706 	}
3707 
3708 	if ((inflight > 0) || (inbetween > 0)) {
3709 #ifdef INVARIANTS
3710 		panic("Flight size-express incorrect? \n");
3711 #else
3712 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3713 		    entry_flight, entry_cnt);
3714 
3715 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3716 		    inflight, inbetween, resend, above, acked);
3717 		ret = 1;
3718 #endif
3719 	}
3720 	return (ret);
3721 }
3722 
3723 
3724 static void
3725 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3726     struct sctp_association *asoc,
3727     struct sctp_tmit_chunk *tp1)
3728 {
3729 	tp1->window_probe = 0;
3730 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3731 		/* TSN's skipped we do NOT move back. */
3732 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3733 		    tp1->whoTo->flight_size,
3734 		    tp1->book_size,
3735 		    (uintptr_t) tp1->whoTo,
3736 		    tp1->rec.data.TSN_seq);
3737 		return;
3738 	}
3739 	/* First setup this by shrinking flight */
3740 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3741 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3742 		    tp1);
3743 	}
3744 	sctp_flight_size_decrease(tp1);
3745 	sctp_total_flight_decrease(stcb, tp1);
3746 	/* Now mark for resend */
3747 	tp1->sent = SCTP_DATAGRAM_RESEND;
3748 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3749 
3750 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3751 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3752 		    tp1->whoTo->flight_size,
3753 		    tp1->book_size,
3754 		    (uintptr_t) tp1->whoTo,
3755 		    tp1->rec.data.TSN_seq);
3756 	}
3757 }
3758 
3759 void
3760 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3761     uint32_t rwnd, int *abort_now, int ecne_seen)
3762 {
3763 	struct sctp_nets *net;
3764 	struct sctp_association *asoc;
3765 	struct sctp_tmit_chunk *tp1, *tp2;
3766 	uint32_t old_rwnd;
3767 	int win_probe_recovery = 0;
3768 	int win_probe_recovered = 0;
3769 	int j, done_once = 0;
3770 	int rto_ok = 1;
3771 
3772 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3773 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3774 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3775 	}
3776 	SCTP_TCB_LOCK_ASSERT(stcb);
3777 #ifdef SCTP_ASOCLOG_OF_TSNS
3778 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3779 	stcb->asoc.cumack_log_at++;
3780 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3781 		stcb->asoc.cumack_log_at = 0;
3782 	}
3783 #endif
3784 	asoc = &stcb->asoc;
3785 	old_rwnd = asoc->peers_rwnd;
3786 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3787 		/* old ack */
3788 		return;
3789 	} else if (asoc->last_acked_seq == cumack) {
3790 		/* Window update sack */
3791 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3792 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3793 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3794 			/* SWS sender side engages */
3795 			asoc->peers_rwnd = 0;
3796 		}
3797 		if (asoc->peers_rwnd > old_rwnd) {
3798 			goto again;
3799 		}
3800 		return;
3801 	}
3802 	/* First setup for CC stuff */
3803 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3804 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3805 			/* Drag along the window_tsn for cwr's */
3806 			net->cwr_window_tsn = cumack;
3807 		}
3808 		net->prev_cwnd = net->cwnd;
3809 		net->net_ack = 0;
3810 		net->net_ack2 = 0;
3811 
3812 		/*
3813 		 * CMT: Reset CUC and Fast recovery algo variables before
3814 		 * SACK processing
3815 		 */
3816 		net->new_pseudo_cumack = 0;
3817 		net->will_exit_fast_recovery = 0;
3818 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3819 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3820 		}
3821 	}
3822 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3823 		uint32_t send_s;
3824 
3825 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3826 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3827 			    sctpchunk_listhead);
3828 			send_s = tp1->rec.data.TSN_seq + 1;
3829 		} else {
3830 			send_s = asoc->sending_seq;
3831 		}
3832 		if (SCTP_TSN_GE(cumack, send_s)) {
3833 #ifndef INVARIANTS
3834 			struct mbuf *oper;
3835 
3836 #endif
3837 #ifdef INVARIANTS
3838 			panic("Impossible sack 1");
3839 #else
3840 
3841 			*abort_now = 1;
3842 			/* XXX */
3843 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3844 			    0, M_DONTWAIT, 1, MT_DATA);
3845 			if (oper) {
3846 				struct sctp_paramhdr *ph;
3847 				uint32_t *ippp;
3848 
3849 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3850 				    sizeof(uint32_t);
3851 				ph = mtod(oper, struct sctp_paramhdr *);
3852 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3853 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3854 				ippp = (uint32_t *) (ph + 1);
3855 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3856 			}
3857 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3858 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3859 			return;
3860 #endif
3861 		}
3862 	}
3863 	asoc->this_sack_highest_gap = cumack;
3864 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3865 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3866 		    stcb->asoc.overall_error_count,
3867 		    0,
3868 		    SCTP_FROM_SCTP_INDATA,
3869 		    __LINE__);
3870 	}
3871 	stcb->asoc.overall_error_count = 0;
3872 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3873 		/* process the new consecutive TSN first */
3874 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3875 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3876 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3877 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3878 				}
3879 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3880 					/*
3881 					 * If it is less than ACKED, it is
3882 					 * now no-longer in flight. Higher
3883 					 * values may occur during marking
3884 					 */
3885 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3886 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3887 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3888 							    tp1->whoTo->flight_size,
3889 							    tp1->book_size,
3890 							    (uintptr_t) tp1->whoTo,
3891 							    tp1->rec.data.TSN_seq);
3892 						}
3893 						sctp_flight_size_decrease(tp1);
3894 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3895 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3896 							    tp1);
3897 						}
3898 						/* sa_ignore NO_NULL_CHK */
3899 						sctp_total_flight_decrease(stcb, tp1);
3900 					}
3901 					tp1->whoTo->net_ack += tp1->send_size;
3902 					if (tp1->snd_count < 2) {
3903 						/*
3904 						 * True non-retransmited
3905 						 * chunk
3906 						 */
3907 						tp1->whoTo->net_ack2 +=
3908 						    tp1->send_size;
3909 
3910 						/* update RTO too? */
3911 						if (tp1->do_rtt) {
3912 							if (rto_ok) {
3913 								tp1->whoTo->RTO =
3914 								/*
3915 								 * sa_ignore
3916 								 * NO_NULL_CH
3917 								 * K
3918 								 */
3919 								    sctp_calculate_rto(stcb,
3920 								    asoc, tp1->whoTo,
3921 								    &tp1->sent_rcv_time,
3922 								    sctp_align_safe_nocopy,
3923 								    SCTP_RTT_FROM_DATA);
3924 								rto_ok = 0;
3925 							}
3926 							if (tp1->whoTo->rto_needed == 0) {
3927 								tp1->whoTo->rto_needed = 1;
3928 							}
3929 							tp1->do_rtt = 0;
3930 						}
3931 					}
3932 					/*
3933 					 * CMT: CUCv2 algorithm. From the
3934 					 * cumack'd TSNs, for each TSN being
3935 					 * acked for the first time, set the
3936 					 * following variables for the
3937 					 * corresp destination.
3938 					 * new_pseudo_cumack will trigger a
3939 					 * cwnd update.
3940 					 * find_(rtx_)pseudo_cumack will
3941 					 * trigger search for the next
3942 					 * expected (rtx-)pseudo-cumack.
3943 					 */
3944 					tp1->whoTo->new_pseudo_cumack = 1;
3945 					tp1->whoTo->find_pseudo_cumack = 1;
3946 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3947 
3948 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3949 						/* sa_ignore NO_NULL_CHK */
3950 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3951 					}
3952 				}
3953 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3954 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3955 				}
3956 				if (tp1->rec.data.chunk_was_revoked) {
3957 					/* deflate the cwnd */
3958 					tp1->whoTo->cwnd -= tp1->book_size;
3959 					tp1->rec.data.chunk_was_revoked = 0;
3960 				}
3961 				tp1->sent = SCTP_DATAGRAM_ACKED;
3962 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3963 				if (tp1->data) {
3964 					/* sa_ignore NO_NULL_CHK */
3965 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3966 					sctp_m_freem(tp1->data);
3967 					tp1->data = NULL;
3968 				}
3969 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3970 					sctp_log_sack(asoc->last_acked_seq,
3971 					    cumack,
3972 					    tp1->rec.data.TSN_seq,
3973 					    0,
3974 					    0,
3975 					    SCTP_LOG_FREE_SENT);
3976 				}
3977 				asoc->sent_queue_cnt--;
3978 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3979 			} else {
3980 				break;
3981 			}
3982 		}
3983 
3984 	}
3985 	/* sa_ignore NO_NULL_CHK */
3986 	if (stcb->sctp_socket) {
3987 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3988 		struct socket *so;
3989 
3990 #endif
3991 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3992 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3993 			/* sa_ignore NO_NULL_CHK */
3994 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3995 		}
3996 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3997 		so = SCTP_INP_SO(stcb->sctp_ep);
3998 		atomic_add_int(&stcb->asoc.refcnt, 1);
3999 		SCTP_TCB_UNLOCK(stcb);
4000 		SCTP_SOCKET_LOCK(so, 1);
4001 		SCTP_TCB_LOCK(stcb);
4002 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4003 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4004 			/* assoc was freed while we were unlocked */
4005 			SCTP_SOCKET_UNLOCK(so, 1);
4006 			return;
4007 		}
4008 #endif
4009 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 		SCTP_SOCKET_UNLOCK(so, 1);
4012 #endif
4013 	} else {
4014 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4015 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4016 		}
4017 	}
4018 
4019 	/* JRS - Use the congestion control given in the CC module */
4020 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4021 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4022 			if (net->net_ack2 > 0) {
4023 				/*
4024 				 * Karn's rule applies to clearing error
4025 				 * count, this is optional.
4026 				 */
4027 				net->error_count = 0;
4028 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4029 					/* addr came good */
4030 					net->dest_state |= SCTP_ADDR_REACHABLE;
4031 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4032 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4033 				}
4034 				if (net == stcb->asoc.primary_destination) {
4035 					if (stcb->asoc.alternate) {
4036 						/*
4037 						 * release the alternate,
4038 						 * primary is good
4039 						 */
4040 						sctp_free_remote_addr(stcb->asoc.alternate);
4041 						stcb->asoc.alternate = NULL;
4042 					}
4043 				}
4044 				if (net->dest_state & SCTP_ADDR_PF) {
4045 					net->dest_state &= ~SCTP_ADDR_PF;
4046 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4047 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4048 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4049 					/* Done with this net */
4050 					net->net_ack = 0;
4051 				}
4052 				/* restore any doubled timers */
4053 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4054 				if (net->RTO < stcb->asoc.minrto) {
4055 					net->RTO = stcb->asoc.minrto;
4056 				}
4057 				if (net->RTO > stcb->asoc.maxrto) {
4058 					net->RTO = stcb->asoc.maxrto;
4059 				}
4060 			}
4061 		}
4062 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4063 	}
4064 	asoc->last_acked_seq = cumack;
4065 
4066 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4067 		/* nothing left in-flight */
4068 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4069 			net->flight_size = 0;
4070 			net->partial_bytes_acked = 0;
4071 		}
4072 		asoc->total_flight = 0;
4073 		asoc->total_flight_count = 0;
4074 	}
4075 	/* RWND update */
4076 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4077 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4078 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4079 		/* SWS sender side engages */
4080 		asoc->peers_rwnd = 0;
4081 	}
4082 	if (asoc->peers_rwnd > old_rwnd) {
4083 		win_probe_recovery = 1;
4084 	}
4085 	/* Now assure a timer where data is queued at */
4086 again:
4087 	j = 0;
4088 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4089 		int to_ticks;
4090 
4091 		if (win_probe_recovery && (net->window_probe)) {
4092 			win_probe_recovered = 1;
4093 			/*
4094 			 * Find first chunk that was used with window probe
4095 			 * and clear the sent
4096 			 */
4097 			/* sa_ignore FREED_MEMORY */
4098 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4099 				if (tp1->window_probe) {
4100 					/* move back to data send queue */
4101 					sctp_window_probe_recovery(stcb, asoc, tp1);
4102 					break;
4103 				}
4104 			}
4105 		}
4106 		if (net->RTO == 0) {
4107 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4108 		} else {
4109 			to_ticks = MSEC_TO_TICKS(net->RTO);
4110 		}
4111 		if (net->flight_size) {
4112 			j++;
4113 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4114 			    sctp_timeout_handler, &net->rxt_timer);
4115 			if (net->window_probe) {
4116 				net->window_probe = 0;
4117 			}
4118 		} else {
4119 			if (net->window_probe) {
4120 				/*
4121 				 * In window probes we must assure a timer
4122 				 * is still running there
4123 				 */
4124 				net->window_probe = 0;
4125 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4126 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4127 					    sctp_timeout_handler, &net->rxt_timer);
4128 				}
4129 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4130 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4131 				    stcb, net,
4132 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4133 			}
4134 		}
4135 	}
4136 	if ((j == 0) &&
4137 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4138 	    (asoc->sent_queue_retran_cnt == 0) &&
4139 	    (win_probe_recovered == 0) &&
4140 	    (done_once == 0)) {
4141 		/*
4142 		 * huh, this should not happen unless all packets are
4143 		 * PR-SCTP and marked to skip of course.
4144 		 */
4145 		if (sctp_fs_audit(asoc)) {
4146 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4147 				net->flight_size = 0;
4148 			}
4149 			asoc->total_flight = 0;
4150 			asoc->total_flight_count = 0;
4151 			asoc->sent_queue_retran_cnt = 0;
4152 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4153 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4154 					sctp_flight_size_increase(tp1);
4155 					sctp_total_flight_increase(stcb, tp1);
4156 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4157 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4158 				}
4159 			}
4160 		}
4161 		done_once = 1;
4162 		goto again;
4163 	}
4164 	/**********************************/
4165 	/* Now what about shutdown issues */
4166 	/**********************************/
4167 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4168 		/* nothing left on sendqueue.. consider done */
4169 		/* clean up */
4170 		if ((asoc->stream_queue_cnt == 1) &&
4171 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4172 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4173 		    (asoc->locked_on_sending)
4174 		    ) {
4175 			struct sctp_stream_queue_pending *sp;
4176 
4177 			/*
4178 			 * I may be in a state where we got all across.. but
4179 			 * cannot write more due to a shutdown... we abort
4180 			 * since the user did not indicate EOR in this case.
4181 			 * The sp will be cleaned during free of the asoc.
4182 			 */
4183 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4184 			    sctp_streamhead);
4185 			if ((sp) && (sp->length == 0)) {
4186 				/* Let cleanup code purge it */
4187 				if (sp->msg_is_complete) {
4188 					asoc->stream_queue_cnt--;
4189 				} else {
4190 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4191 					asoc->locked_on_sending = NULL;
4192 					asoc->stream_queue_cnt--;
4193 				}
4194 			}
4195 		}
4196 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4197 		    (asoc->stream_queue_cnt == 0)) {
4198 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4199 				/* Need to abort here */
4200 				struct mbuf *oper;
4201 
4202 		abort_out_now:
4203 				*abort_now = 1;
4204 				/* XXX */
4205 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4206 				    0, M_DONTWAIT, 1, MT_DATA);
4207 				if (oper) {
4208 					struct sctp_paramhdr *ph;
4209 					uint32_t *ippp;
4210 
4211 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4212 					    sizeof(uint32_t);
4213 					ph = mtod(oper, struct sctp_paramhdr *);
4214 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4215 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4216 					ippp = (uint32_t *) (ph + 1);
4217 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4218 				}
4219 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4220 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4221 			} else {
4222 				struct sctp_nets *netp;
4223 
4224 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4225 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4226 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4227 				}
4228 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4229 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4230 				sctp_stop_timers_for_shutdown(stcb);
4231 				if (asoc->alternate) {
4232 					netp = asoc->alternate;
4233 				} else {
4234 					netp = asoc->primary_destination;
4235 				}
4236 				sctp_send_shutdown(stcb, netp);
4237 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4238 				    stcb->sctp_ep, stcb, netp);
4239 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4240 				    stcb->sctp_ep, stcb, netp);
4241 			}
4242 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4243 		    (asoc->stream_queue_cnt == 0)) {
4244 			struct sctp_nets *netp;
4245 
4246 			if (asoc->alternate) {
4247 				netp = asoc->alternate;
4248 			} else {
4249 				netp = asoc->primary_destination;
4250 			}
4251 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4252 				goto abort_out_now;
4253 			}
4254 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4255 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4256 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4257 			sctp_send_shutdown_ack(stcb, netp);
4258 			sctp_stop_timers_for_shutdown(stcb);
4259 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4260 			    stcb->sctp_ep, stcb, netp);
4261 		}
4262 	}
4263 	/*********************************************/
4264 	/* Here we perform PR-SCTP procedures        */
4265 	/* (section 4.2)                             */
4266 	/*********************************************/
4267 	/* C1. update advancedPeerAckPoint */
4268 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4269 		asoc->advanced_peer_ack_point = cumack;
4270 	}
4271 	/* PR-Sctp issues need to be addressed too */
4272 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4273 		struct sctp_tmit_chunk *lchk;
4274 		uint32_t old_adv_peer_ack_point;
4275 
4276 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4277 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4278 		/* C3. See if we need to send a Fwd-TSN */
4279 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4280 			/*
4281 			 * ISSUE with ECN, see FWD-TSN processing.
4282 			 */
4283 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4284 				send_forward_tsn(stcb, asoc);
4285 			} else if (lchk) {
4286 				/* try to FR fwd-tsn's that get lost too */
4287 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4288 					send_forward_tsn(stcb, asoc);
4289 				}
4290 			}
4291 		}
4292 		if (lchk) {
4293 			/* Assure a timer is up */
4294 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4295 			    stcb->sctp_ep, stcb, lchk->whoTo);
4296 		}
4297 	}
4298 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4299 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4300 		    rwnd,
4301 		    stcb->asoc.peers_rwnd,
4302 		    stcb->asoc.total_flight,
4303 		    stcb->asoc.total_output_queue_size);
4304 	}
4305 }
4306 
4307 void
4308 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4309     struct sctp_tcb *stcb,
4310     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4311     int *abort_now, uint8_t flags,
4312     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4313 {
4314 	struct sctp_association *asoc;
4315 	struct sctp_tmit_chunk *tp1, *tp2;
4316 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4317 	uint16_t wake_him = 0;
4318 	uint32_t send_s = 0;
4319 	long j;
4320 	int accum_moved = 0;
4321 	int will_exit_fast_recovery = 0;
4322 	uint32_t a_rwnd, old_rwnd;
4323 	int win_probe_recovery = 0;
4324 	int win_probe_recovered = 0;
4325 	struct sctp_nets *net = NULL;
4326 	int done_once;
4327 	int rto_ok = 1;
4328 	uint8_t reneged_all = 0;
4329 	uint8_t cmt_dac_flag;
4330 
4331 	/*
4332 	 * we take any chance we can to service our queues since we cannot
4333 	 * get awoken when the socket is read from :<
4334 	 */
4335 	/*
4336 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4337 	 * old sack, if so discard. 2) If there is nothing left in the send
4338 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4339 	 * too, update any rwnd change and verify no timers are running.
4340 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4341 	 * moved process these first and note that it moved. 4) Process any
4342 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4343 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4344 	 * sync up flightsizes and things, stop all timers and also check
4345 	 * for shutdown_pending state. If so then go ahead and send off the
4346 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4347 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4348 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4349 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4350 	 * if in shutdown_recv state.
4351 	 */
4352 	SCTP_TCB_LOCK_ASSERT(stcb);
4353 	/* CMT DAC algo */
4354 	this_sack_lowest_newack = 0;
4355 	SCTP_STAT_INCR(sctps_slowpath_sack);
4356 	last_tsn = cum_ack;
4357 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4358 #ifdef SCTP_ASOCLOG_OF_TSNS
4359 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4360 	stcb->asoc.cumack_log_at++;
4361 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4362 		stcb->asoc.cumack_log_at = 0;
4363 	}
4364 #endif
4365 	a_rwnd = rwnd;
4366 
4367 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4368 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4369 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4370 	}
4371 	old_rwnd = stcb->asoc.peers_rwnd;
4372 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4373 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4374 		    stcb->asoc.overall_error_count,
4375 		    0,
4376 		    SCTP_FROM_SCTP_INDATA,
4377 		    __LINE__);
4378 	}
4379 	stcb->asoc.overall_error_count = 0;
4380 	asoc = &stcb->asoc;
4381 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4382 		sctp_log_sack(asoc->last_acked_seq,
4383 		    cum_ack,
4384 		    0,
4385 		    num_seg,
4386 		    num_dup,
4387 		    SCTP_LOG_NEW_SACK);
4388 	}
4389 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4390 		uint16_t i;
4391 		uint32_t *dupdata, dblock;
4392 
4393 		for (i = 0; i < num_dup; i++) {
4394 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4395 			    sizeof(uint32_t), (uint8_t *) & dblock);
4396 			if (dupdata == NULL) {
4397 				break;
4398 			}
4399 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4400 		}
4401 	}
4402 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4403 		/* reality check */
4404 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4405 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4406 			    sctpchunk_listhead);
4407 			send_s = tp1->rec.data.TSN_seq + 1;
4408 		} else {
4409 			tp1 = NULL;
4410 			send_s = asoc->sending_seq;
4411 		}
4412 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4413 			struct mbuf *oper;
4414 
4415 			/*
4416 			 * no way, we have not even sent this TSN out yet.
4417 			 * Peer is hopelessly messed up with us.
4418 			 */
4419 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4420 			    cum_ack, send_s);
4421 			if (tp1) {
4422 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4423 				    tp1->rec.data.TSN_seq, tp1);
4424 			}
4425 	hopeless_peer:
4426 			*abort_now = 1;
4427 			/* XXX */
4428 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4429 			    0, M_DONTWAIT, 1, MT_DATA);
4430 			if (oper) {
4431 				struct sctp_paramhdr *ph;
4432 				uint32_t *ippp;
4433 
4434 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4435 				    sizeof(uint32_t);
4436 				ph = mtod(oper, struct sctp_paramhdr *);
4437 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4438 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4439 				ippp = (uint32_t *) (ph + 1);
4440 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4441 			}
4442 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4443 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4444 			return;
4445 		}
4446 	}
4447 	/**********************/
4448 	/* 1) check the range */
4449 	/**********************/
4450 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4451 		/* acking something behind */
4452 		return;
4453 	}
4454 	/* update the Rwnd of the peer */
4455 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4456 	    TAILQ_EMPTY(&asoc->send_queue) &&
4457 	    (asoc->stream_queue_cnt == 0)) {
4458 		/* nothing left on send/sent and strmq */
4459 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4460 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4461 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4462 		}
4463 		asoc->peers_rwnd = a_rwnd;
4464 		if (asoc->sent_queue_retran_cnt) {
4465 			asoc->sent_queue_retran_cnt = 0;
4466 		}
4467 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4468 			/* SWS sender side engages */
4469 			asoc->peers_rwnd = 0;
4470 		}
4471 		/* stop any timers */
4472 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4473 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4474 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4475 			net->partial_bytes_acked = 0;
4476 			net->flight_size = 0;
4477 		}
4478 		asoc->total_flight = 0;
4479 		asoc->total_flight_count = 0;
4480 		return;
4481 	}
4482 	/*
4483 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4484 	 * things. The total byte count acked is tracked in netAckSz AND
4485 	 * netAck2 is used to track the total bytes acked that are un-
4486 	 * amibguious and were never retransmitted. We track these on a per
4487 	 * destination address basis.
4488 	 */
4489 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4490 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4491 			/* Drag along the window_tsn for cwr's */
4492 			net->cwr_window_tsn = cum_ack;
4493 		}
4494 		net->prev_cwnd = net->cwnd;
4495 		net->net_ack = 0;
4496 		net->net_ack2 = 0;
4497 
4498 		/*
4499 		 * CMT: Reset CUC and Fast recovery algo variables before
4500 		 * SACK processing
4501 		 */
4502 		net->new_pseudo_cumack = 0;
4503 		net->will_exit_fast_recovery = 0;
4504 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4505 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4506 		}
4507 	}
4508 	/* process the new consecutive TSN first */
4509 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4510 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4511 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4512 				accum_moved = 1;
4513 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4514 					/*
4515 					 * If it is less than ACKED, it is
4516 					 * now no-longer in flight. Higher
4517 					 * values may occur during marking
4518 					 */
4519 					if ((tp1->whoTo->dest_state &
4520 					    SCTP_ADDR_UNCONFIRMED) &&
4521 					    (tp1->snd_count < 2)) {
4522 						/*
4523 						 * If there was no retran
4524 						 * and the address is
4525 						 * un-confirmed and we sent
4526 						 * there and are now
4527 						 * sacked.. its confirmed,
4528 						 * mark it so.
4529 						 */
4530 						tp1->whoTo->dest_state &=
4531 						    ~SCTP_ADDR_UNCONFIRMED;
4532 					}
4533 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4534 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4535 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4536 							    tp1->whoTo->flight_size,
4537 							    tp1->book_size,
4538 							    (uintptr_t) tp1->whoTo,
4539 							    tp1->rec.data.TSN_seq);
4540 						}
4541 						sctp_flight_size_decrease(tp1);
4542 						sctp_total_flight_decrease(stcb, tp1);
4543 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4544 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4545 							    tp1);
4546 						}
4547 					}
4548 					tp1->whoTo->net_ack += tp1->send_size;
4549 
4550 					/* CMT SFR and DAC algos */
4551 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4552 					tp1->whoTo->saw_newack = 1;
4553 
4554 					if (tp1->snd_count < 2) {
4555 						/*
4556 						 * True non-retransmited
4557 						 * chunk
4558 						 */
4559 						tp1->whoTo->net_ack2 +=
4560 						    tp1->send_size;
4561 
4562 						/* update RTO too? */
4563 						if (tp1->do_rtt) {
4564 							if (rto_ok) {
4565 								tp1->whoTo->RTO =
4566 								    sctp_calculate_rto(stcb,
4567 								    asoc, tp1->whoTo,
4568 								    &tp1->sent_rcv_time,
4569 								    sctp_align_safe_nocopy,
4570 								    SCTP_RTT_FROM_DATA);
4571 								rto_ok = 0;
4572 							}
4573 							if (tp1->whoTo->rto_needed == 0) {
4574 								tp1->whoTo->rto_needed = 1;
4575 							}
4576 							tp1->do_rtt = 0;
4577 						}
4578 					}
4579 					/*
4580 					 * CMT: CUCv2 algorithm. From the
4581 					 * cumack'd TSNs, for each TSN being
4582 					 * acked for the first time, set the
4583 					 * following variables for the
4584 					 * corresp destination.
4585 					 * new_pseudo_cumack will trigger a
4586 					 * cwnd update.
4587 					 * find_(rtx_)pseudo_cumack will
4588 					 * trigger search for the next
4589 					 * expected (rtx-)pseudo-cumack.
4590 					 */
4591 					tp1->whoTo->new_pseudo_cumack = 1;
4592 					tp1->whoTo->find_pseudo_cumack = 1;
4593 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4594 
4595 
4596 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4597 						sctp_log_sack(asoc->last_acked_seq,
4598 						    cum_ack,
4599 						    tp1->rec.data.TSN_seq,
4600 						    0,
4601 						    0,
4602 						    SCTP_LOG_TSN_ACKED);
4603 					}
4604 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4605 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4606 					}
4607 				}
4608 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4609 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4610 #ifdef SCTP_AUDITING_ENABLED
4611 					sctp_audit_log(0xB3,
4612 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4613 #endif
4614 				}
4615 				if (tp1->rec.data.chunk_was_revoked) {
4616 					/* deflate the cwnd */
4617 					tp1->whoTo->cwnd -= tp1->book_size;
4618 					tp1->rec.data.chunk_was_revoked = 0;
4619 				}
4620 				tp1->sent = SCTP_DATAGRAM_ACKED;
4621 			}
4622 		} else {
4623 			break;
4624 		}
4625 	}
4626 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4627 	/* always set this up to cum-ack */
4628 	asoc->this_sack_highest_gap = last_tsn;
4629 
4630 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4631 
4632 		/*
4633 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4634 		 * to be greater than the cumack. Also reset saw_newack to 0
4635 		 * for all dests.
4636 		 */
4637 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4638 			net->saw_newack = 0;
4639 			net->this_sack_highest_newack = last_tsn;
4640 		}
4641 
4642 		/*
4643 		 * thisSackHighestGap will increase while handling NEW
4644 		 * segments this_sack_highest_newack will increase while
4645 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4646 		 * used for CMT DAC algo. saw_newack will also change.
4647 		 */
4648 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4649 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4650 		    num_seg, num_nr_seg, &rto_ok)) {
4651 			wake_him++;
4652 		}
4653 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4654 			/*
4655 			 * validate the biggest_tsn_acked in the gap acks if
4656 			 * strict adherence is wanted.
4657 			 */
4658 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4659 				/*
4660 				 * peer is either confused or we are under
4661 				 * attack. We must abort.
4662 				 */
4663 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4664 				    biggest_tsn_acked, send_s);
4665 				goto hopeless_peer;
4666 			}
4667 		}
4668 	}
4669 	/*******************************************/
4670 	/* cancel ALL T3-send timer if accum moved */
4671 	/*******************************************/
4672 	if (asoc->sctp_cmt_on_off > 0) {
4673 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4674 			if (net->new_pseudo_cumack)
4675 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4676 				    stcb, net,
4677 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4678 
4679 		}
4680 	} else {
4681 		if (accum_moved) {
4682 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4683 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4684 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4685 			}
4686 		}
4687 	}
4688 	/********************************************/
4689 	/* drop the acked chunks from the sentqueue */
4690 	/********************************************/
4691 	asoc->last_acked_seq = cum_ack;
4692 
4693 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4694 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4695 			break;
4696 		}
4697 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4698 			/* no more sent on list */
4699 			SCTP_PRINTF("Warning, tp1->sent == %d and its now acked?\n",
4700 			    tp1->sent);
4701 		}
4702 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4703 		if (tp1->pr_sctp_on) {
4704 			if (asoc->pr_sctp_cnt != 0)
4705 				asoc->pr_sctp_cnt--;
4706 		}
4707 		asoc->sent_queue_cnt--;
4708 		if (tp1->data) {
4709 			/* sa_ignore NO_NULL_CHK */
4710 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4711 			sctp_m_freem(tp1->data);
4712 			tp1->data = NULL;
4713 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4714 				asoc->sent_queue_cnt_removeable--;
4715 			}
4716 		}
4717 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4718 			sctp_log_sack(asoc->last_acked_seq,
4719 			    cum_ack,
4720 			    tp1->rec.data.TSN_seq,
4721 			    0,
4722 			    0,
4723 			    SCTP_LOG_FREE_SENT);
4724 		}
4725 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4726 		wake_him++;
4727 	}
4728 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4729 #ifdef INVARIANTS
4730 		panic("Warning flight size is postive and should be 0");
4731 #else
4732 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4733 		    asoc->total_flight);
4734 #endif
4735 		asoc->total_flight = 0;
4736 	}
4737 	/* sa_ignore NO_NULL_CHK */
4738 	if ((wake_him) && (stcb->sctp_socket)) {
4739 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4740 		struct socket *so;
4741 
4742 #endif
4743 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4744 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4745 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4746 		}
4747 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4748 		so = SCTP_INP_SO(stcb->sctp_ep);
4749 		atomic_add_int(&stcb->asoc.refcnt, 1);
4750 		SCTP_TCB_UNLOCK(stcb);
4751 		SCTP_SOCKET_LOCK(so, 1);
4752 		SCTP_TCB_LOCK(stcb);
4753 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4754 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4755 			/* assoc was freed while we were unlocked */
4756 			SCTP_SOCKET_UNLOCK(so, 1);
4757 			return;
4758 		}
4759 #endif
4760 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4761 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4762 		SCTP_SOCKET_UNLOCK(so, 1);
4763 #endif
4764 	} else {
4765 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4766 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4767 		}
4768 	}
4769 
4770 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4771 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4772 			/* Setup so we will exit RFC2582 fast recovery */
4773 			will_exit_fast_recovery = 1;
4774 		}
4775 	}
4776 	/*
4777 	 * Check for revoked fragments:
4778 	 *
4779 	 * if Previous sack - Had no frags then we can't have any revoked if
4780 	 * Previous sack - Had frag's then - If we now have frags aka
4781 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4782 	 * some of them. else - The peer revoked all ACKED fragments, since
4783 	 * we had some before and now we have NONE.
4784 	 */
4785 
4786 	if (num_seg) {
4787 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4788 		asoc->saw_sack_with_frags = 1;
4789 	} else if (asoc->saw_sack_with_frags) {
4790 		int cnt_revoked = 0;
4791 
4792 		/* Peer revoked all dg's marked or acked */
4793 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4794 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4795 				tp1->sent = SCTP_DATAGRAM_SENT;
4796 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4797 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4798 					    tp1->whoTo->flight_size,
4799 					    tp1->book_size,
4800 					    (uintptr_t) tp1->whoTo,
4801 					    tp1->rec.data.TSN_seq);
4802 				}
4803 				sctp_flight_size_increase(tp1);
4804 				sctp_total_flight_increase(stcb, tp1);
4805 				tp1->rec.data.chunk_was_revoked = 1;
4806 				/*
4807 				 * To ensure that this increase in
4808 				 * flightsize, which is artificial, does not
4809 				 * throttle the sender, we also increase the
4810 				 * cwnd artificially.
4811 				 */
4812 				tp1->whoTo->cwnd += tp1->book_size;
4813 				cnt_revoked++;
4814 			}
4815 		}
4816 		if (cnt_revoked) {
4817 			reneged_all = 1;
4818 		}
4819 		asoc->saw_sack_with_frags = 0;
4820 	}
4821 	if (num_nr_seg > 0)
4822 		asoc->saw_sack_with_nr_frags = 1;
4823 	else
4824 		asoc->saw_sack_with_nr_frags = 0;
4825 
4826 	/* JRS - Use the congestion control given in the CC module */
4827 	if (ecne_seen == 0) {
4828 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4829 			if (net->net_ack2 > 0) {
4830 				/*
4831 				 * Karn's rule applies to clearing error
4832 				 * count, this is optional.
4833 				 */
4834 				net->error_count = 0;
4835 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4836 					/* addr came good */
4837 					net->dest_state |= SCTP_ADDR_REACHABLE;
4838 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4839 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4840 				}
4841 				if (net == stcb->asoc.primary_destination) {
4842 					if (stcb->asoc.alternate) {
4843 						/*
4844 						 * release the alternate,
4845 						 * primary is good
4846 						 */
4847 						sctp_free_remote_addr(stcb->asoc.alternate);
4848 						stcb->asoc.alternate = NULL;
4849 					}
4850 				}
4851 				if (net->dest_state & SCTP_ADDR_PF) {
4852 					net->dest_state &= ~SCTP_ADDR_PF;
4853 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4854 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4855 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4856 					/* Done with this net */
4857 					net->net_ack = 0;
4858 				}
4859 				/* restore any doubled timers */
4860 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4861 				if (net->RTO < stcb->asoc.minrto) {
4862 					net->RTO = stcb->asoc.minrto;
4863 				}
4864 				if (net->RTO > stcb->asoc.maxrto) {
4865 					net->RTO = stcb->asoc.maxrto;
4866 				}
4867 			}
4868 		}
4869 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4870 	}
4871 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4872 		/* nothing left in-flight */
4873 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4874 			/* stop all timers */
4875 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4876 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4877 			net->flight_size = 0;
4878 			net->partial_bytes_acked = 0;
4879 		}
4880 		asoc->total_flight = 0;
4881 		asoc->total_flight_count = 0;
4882 	}
4883 	/**********************************/
4884 	/* Now what about shutdown issues */
4885 	/**********************************/
4886 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4887 		/* nothing left on sendqueue.. consider done */
4888 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4889 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4890 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4891 		}
4892 		asoc->peers_rwnd = a_rwnd;
4893 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4894 			/* SWS sender side engages */
4895 			asoc->peers_rwnd = 0;
4896 		}
4897 		/* clean up */
4898 		if ((asoc->stream_queue_cnt == 1) &&
4899 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4900 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4901 		    (asoc->locked_on_sending)
4902 		    ) {
4903 			struct sctp_stream_queue_pending *sp;
4904 
4905 			/*
4906 			 * I may be in a state where we got all across.. but
4907 			 * cannot write more due to a shutdown... we abort
4908 			 * since the user did not indicate EOR in this case.
4909 			 */
4910 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4911 			    sctp_streamhead);
4912 			if ((sp) && (sp->length == 0)) {
4913 				asoc->locked_on_sending = NULL;
4914 				if (sp->msg_is_complete) {
4915 					asoc->stream_queue_cnt--;
4916 				} else {
4917 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4918 					asoc->stream_queue_cnt--;
4919 				}
4920 			}
4921 		}
4922 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4923 		    (asoc->stream_queue_cnt == 0)) {
4924 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4925 				/* Need to abort here */
4926 				struct mbuf *oper;
4927 
4928 		abort_out_now:
4929 				*abort_now = 1;
4930 				/* XXX */
4931 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4932 				    0, M_DONTWAIT, 1, MT_DATA);
4933 				if (oper) {
4934 					struct sctp_paramhdr *ph;
4935 					uint32_t *ippp;
4936 
4937 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4938 					    sizeof(uint32_t);
4939 					ph = mtod(oper, struct sctp_paramhdr *);
4940 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4941 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4942 					ippp = (uint32_t *) (ph + 1);
4943 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4944 				}
4945 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4946 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4947 				return;
4948 			} else {
4949 				struct sctp_nets *netp;
4950 
4951 				if (asoc->alternate) {
4952 					netp = asoc->alternate;
4953 				} else {
4954 					netp = asoc->primary_destination;
4955 				}
4956 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4957 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4958 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4959 				}
4960 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4961 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4962 				sctp_stop_timers_for_shutdown(stcb);
4963 				sctp_send_shutdown(stcb, netp);
4964 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4965 				    stcb->sctp_ep, stcb, netp);
4966 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4967 				    stcb->sctp_ep, stcb, netp);
4968 			}
4969 			return;
4970 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4971 		    (asoc->stream_queue_cnt == 0)) {
4972 			struct sctp_nets *netp;
4973 
4974 			if (asoc->alternate) {
4975 				netp = asoc->alternate;
4976 			} else {
4977 				netp = asoc->primary_destination;
4978 			}
4979 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4980 				goto abort_out_now;
4981 			}
4982 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4983 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4984 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4985 			sctp_send_shutdown_ack(stcb, netp);
4986 			sctp_stop_timers_for_shutdown(stcb);
4987 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4988 			    stcb->sctp_ep, stcb, netp);
4989 			return;
4990 		}
4991 	}
4992 	/*
4993 	 * Now here we are going to recycle net_ack for a different use...
4994 	 * HEADS UP.
4995 	 */
4996 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4997 		net->net_ack = 0;
4998 	}
4999 
5000 	/*
5001 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5002 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5003 	 * automatically ensure that.
5004 	 */
5005 	if ((asoc->sctp_cmt_on_off > 0) &&
5006 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5007 	    (cmt_dac_flag == 0)) {
5008 		this_sack_lowest_newack = cum_ack;
5009 	}
5010 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5011 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5012 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5013 	}
5014 	/* JRS - Use the congestion control given in the CC module */
5015 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5016 
5017 	/* Now are we exiting loss recovery ? */
5018 	if (will_exit_fast_recovery) {
5019 		/* Ok, we must exit fast recovery */
5020 		asoc->fast_retran_loss_recovery = 0;
5021 	}
5022 	if ((asoc->sat_t3_loss_recovery) &&
5023 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5024 		/* end satellite t3 loss recovery */
5025 		asoc->sat_t3_loss_recovery = 0;
5026 	}
5027 	/*
5028 	 * CMT Fast recovery
5029 	 */
5030 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5031 		if (net->will_exit_fast_recovery) {
5032 			/* Ok, we must exit fast recovery */
5033 			net->fast_retran_loss_recovery = 0;
5034 		}
5035 	}
5036 
5037 	/* Adjust and set the new rwnd value */
5038 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5039 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5040 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5041 	}
5042 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5043 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5044 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5045 		/* SWS sender side engages */
5046 		asoc->peers_rwnd = 0;
5047 	}
5048 	if (asoc->peers_rwnd > old_rwnd) {
5049 		win_probe_recovery = 1;
5050 	}
5051 	/*
5052 	 * Now we must setup so we have a timer up for anyone with
5053 	 * outstanding data.
5054 	 */
5055 	done_once = 0;
5056 again:
5057 	j = 0;
5058 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5059 		if (win_probe_recovery && (net->window_probe)) {
5060 			win_probe_recovered = 1;
5061 			/*-
5062 			 * Find first chunk that was used with
5063 			 * window probe and clear the event. Put
5064 			 * it back into the send queue as if has
5065 			 * not been sent.
5066 			 */
5067 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5068 				if (tp1->window_probe) {
5069 					sctp_window_probe_recovery(stcb, asoc, tp1);
5070 					break;
5071 				}
5072 			}
5073 		}
5074 		if (net->flight_size) {
5075 			j++;
5076 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5077 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5078 				    stcb->sctp_ep, stcb, net);
5079 			}
5080 			if (net->window_probe) {
5081 				net->window_probe = 0;
5082 			}
5083 		} else {
5084 			if (net->window_probe) {
5085 				/*
5086 				 * In window probes we must assure a timer
5087 				 * is still running there
5088 				 */
5089 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5090 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5091 					    stcb->sctp_ep, stcb, net);
5092 
5093 				}
5094 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5095 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5096 				    stcb, net,
5097 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5098 			}
5099 		}
5100 	}
5101 	if ((j == 0) &&
5102 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5103 	    (asoc->sent_queue_retran_cnt == 0) &&
5104 	    (win_probe_recovered == 0) &&
5105 	    (done_once == 0)) {
5106 		/*
5107 		 * huh, this should not happen unless all packets are
5108 		 * PR-SCTP and marked to skip of course.
5109 		 */
5110 		if (sctp_fs_audit(asoc)) {
5111 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5112 				net->flight_size = 0;
5113 			}
5114 			asoc->total_flight = 0;
5115 			asoc->total_flight_count = 0;
5116 			asoc->sent_queue_retran_cnt = 0;
5117 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5118 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5119 					sctp_flight_size_increase(tp1);
5120 					sctp_total_flight_increase(stcb, tp1);
5121 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5122 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5123 				}
5124 			}
5125 		}
5126 		done_once = 1;
5127 		goto again;
5128 	}
5129 	/*********************************************/
5130 	/* Here we perform PR-SCTP procedures        */
5131 	/* (section 4.2)                             */
5132 	/*********************************************/
5133 	/* C1. update advancedPeerAckPoint */
5134 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5135 		asoc->advanced_peer_ack_point = cum_ack;
5136 	}
5137 	/* C2. try to further move advancedPeerAckPoint ahead */
5138 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5139 		struct sctp_tmit_chunk *lchk;
5140 		uint32_t old_adv_peer_ack_point;
5141 
5142 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5143 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5144 		/* C3. See if we need to send a Fwd-TSN */
5145 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5146 			/*
5147 			 * ISSUE with ECN, see FWD-TSN processing.
5148 			 */
5149 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5150 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5151 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5152 				    old_adv_peer_ack_point);
5153 			}
5154 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5155 				send_forward_tsn(stcb, asoc);
5156 			} else if (lchk) {
5157 				/* try to FR fwd-tsn's that get lost too */
5158 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5159 					send_forward_tsn(stcb, asoc);
5160 				}
5161 			}
5162 		}
5163 		if (lchk) {
5164 			/* Assure a timer is up */
5165 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5166 			    stcb->sctp_ep, stcb, lchk->whoTo);
5167 		}
5168 	}
5169 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5170 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5171 		    a_rwnd,
5172 		    stcb->asoc.peers_rwnd,
5173 		    stcb->asoc.total_flight,
5174 		    stcb->asoc.total_output_queue_size);
5175 	}
5176 }
5177 
5178 void
5179 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5180 {
5181 	/* Copy cum-ack */
5182 	uint32_t cum_ack, a_rwnd;
5183 
5184 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5185 	/* Arrange so a_rwnd does NOT change */
5186 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5187 
5188 	/* Now call the express sack handling */
5189 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5190 }
5191 
5192 static void
5193 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5194     struct sctp_stream_in *strmin)
5195 {
5196 	struct sctp_queued_to_read *ctl, *nctl;
5197 	struct sctp_association *asoc;
5198 	uint16_t tt;
5199 
5200 	asoc = &stcb->asoc;
5201 	tt = strmin->last_sequence_delivered;
5202 	/*
5203 	 * First deliver anything prior to and including the stream no that
5204 	 * came in
5205 	 */
5206 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5207 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5208 			/* this is deliverable now */
5209 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5210 			/* subtract pending on streams */
5211 			asoc->size_on_all_streams -= ctl->length;
5212 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5213 			/* deliver it to at least the delivery-q */
5214 			if (stcb->sctp_socket) {
5215 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5216 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5217 				    ctl,
5218 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5219 			}
5220 		} else {
5221 			/* no more delivery now. */
5222 			break;
5223 		}
5224 	}
5225 	/*
5226 	 * now we must deliver things in queue the normal way  if any are
5227 	 * now ready.
5228 	 */
5229 	tt = strmin->last_sequence_delivered + 1;
5230 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5231 		if (tt == ctl->sinfo_ssn) {
5232 			/* this is deliverable now */
5233 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5234 			/* subtract pending on streams */
5235 			asoc->size_on_all_streams -= ctl->length;
5236 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5237 			/* deliver it to at least the delivery-q */
5238 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5239 			if (stcb->sctp_socket) {
5240 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5241 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5242 				    ctl,
5243 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5244 
5245 			}
5246 			tt = strmin->last_sequence_delivered + 1;
5247 		} else {
5248 			break;
5249 		}
5250 	}
5251 }
5252 
5253 static void
5254 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5255     struct sctp_association *asoc,
5256     uint16_t stream, uint16_t seq)
5257 {
5258 	struct sctp_tmit_chunk *chk, *nchk;
5259 
5260 	/* For each one on here see if we need to toss it */
5261 	/*
5262 	 * For now large messages held on the reasmqueue that are complete
5263 	 * will be tossed too. We could in theory do more work to spin
5264 	 * through and stop after dumping one msg aka seeing the start of a
5265 	 * new msg at the head, and call the delivery function... to see if
5266 	 * it can be delivered... But for now we just dump everything on the
5267 	 * queue.
5268 	 */
5269 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5270 		/*
5271 		 * Do not toss it if on a different stream or marked for
5272 		 * unordered delivery in which case the stream sequence
5273 		 * number has no meaning.
5274 		 */
5275 		if ((chk->rec.data.stream_number != stream) ||
5276 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5277 			continue;
5278 		}
5279 		if (chk->rec.data.stream_seq == seq) {
5280 			/* It needs to be tossed */
5281 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5282 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5283 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5284 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5285 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5286 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5287 			}
5288 			asoc->size_on_reasm_queue -= chk->send_size;
5289 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5290 
5291 			/* Clear up any stream problem */
5292 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5293 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5294 				/*
5295 				 * We must dump forward this streams
5296 				 * sequence number if the chunk is not
5297 				 * unordered that is being skipped. There is
5298 				 * a chance that if the peer does not
5299 				 * include the last fragment in its FWD-TSN
5300 				 * we WILL have a problem here since you
5301 				 * would have a partial chunk in queue that
5302 				 * may not be deliverable. Also if a Partial
5303 				 * delivery API as started the user may get
5304 				 * a partial chunk. The next read returning
5305 				 * a new chunk... really ugly but I see no
5306 				 * way around it! Maybe a notify??
5307 				 */
5308 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5309 			}
5310 			if (chk->data) {
5311 				sctp_m_freem(chk->data);
5312 				chk->data = NULL;
5313 			}
5314 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5315 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5316 			/*
5317 			 * If the stream_seq is > than the purging one, we
5318 			 * are done
5319 			 */
5320 			break;
5321 		}
5322 	}
5323 }
5324 
5325 
5326 void
5327 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5328     struct sctp_forward_tsn_chunk *fwd,
5329     int *abort_flag, struct mbuf *m, int offset)
5330 {
5331 	/* The pr-sctp fwd tsn */
5332 	/*
5333 	 * here we will perform all the data receiver side steps for
5334 	 * processing FwdTSN, as required in by pr-sctp draft:
5335 	 *
5336 	 * Assume we get FwdTSN(x):
5337 	 *
5338 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5339 	 * others we have 3) examine and update re-ordering queue on
5340 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5341 	 * report where we are.
5342 	 */
5343 	struct sctp_association *asoc;
5344 	uint32_t new_cum_tsn, gap;
5345 	unsigned int i, fwd_sz, m_size;
5346 	uint32_t str_seq;
5347 	struct sctp_stream_in *strm;
5348 	struct sctp_tmit_chunk *chk, *nchk;
5349 	struct sctp_queued_to_read *ctl, *sv;
5350 
5351 	asoc = &stcb->asoc;
5352 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5353 		SCTPDBG(SCTP_DEBUG_INDATA1,
5354 		    "Bad size too small/big fwd-tsn\n");
5355 		return;
5356 	}
5357 	m_size = (stcb->asoc.mapping_array_size << 3);
5358 	/*************************************************************/
5359 	/* 1. Here we update local cumTSN and shift the bitmap array */
5360 	/*************************************************************/
5361 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5362 
5363 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5364 		/* Already got there ... */
5365 		return;
5366 	}
5367 	/*
5368 	 * now we know the new TSN is more advanced, let's find the actual
5369 	 * gap
5370 	 */
5371 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5372 	asoc->cumulative_tsn = new_cum_tsn;
5373 	if (gap >= m_size) {
5374 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5375 			struct mbuf *oper;
5376 
5377 			/*
5378 			 * out of range (of single byte chunks in the rwnd I
5379 			 * give out). This must be an attacker.
5380 			 */
5381 			*abort_flag = 1;
5382 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5383 			    0, M_DONTWAIT, 1, MT_DATA);
5384 			if (oper) {
5385 				struct sctp_paramhdr *ph;
5386 				uint32_t *ippp;
5387 
5388 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5389 				    (sizeof(uint32_t) * 3);
5390 				ph = mtod(oper, struct sctp_paramhdr *);
5391 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5392 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5393 				ippp = (uint32_t *) (ph + 1);
5394 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5395 				ippp++;
5396 				*ippp = asoc->highest_tsn_inside_map;
5397 				ippp++;
5398 				*ippp = new_cum_tsn;
5399 			}
5400 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5401 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5402 			return;
5403 		}
5404 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5405 
5406 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5407 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5408 		asoc->highest_tsn_inside_map = new_cum_tsn;
5409 
5410 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5411 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5412 
5413 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5414 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5415 		}
5416 	} else {
5417 		SCTP_TCB_LOCK_ASSERT(stcb);
5418 		for (i = 0; i <= gap; i++) {
5419 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5420 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5421 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5422 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5423 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5424 				}
5425 			}
5426 		}
5427 	}
5428 	/*************************************************************/
5429 	/* 2. Clear up re-assembly queue                             */
5430 	/*************************************************************/
5431 	/*
5432 	 * First service it if pd-api is up, just in case we can progress it
5433 	 * forward
5434 	 */
5435 	if (asoc->fragmented_delivery_inprogress) {
5436 		sctp_service_reassembly(stcb, asoc);
5437 	}
5438 	/* For each one on here see if we need to toss it */
5439 	/*
5440 	 * For now large messages held on the reasmqueue that are complete
5441 	 * will be tossed too. We could in theory do more work to spin
5442 	 * through and stop after dumping one msg aka seeing the start of a
5443 	 * new msg at the head, and call the delivery function... to see if
5444 	 * it can be delivered... But for now we just dump everything on the
5445 	 * queue.
5446 	 */
5447 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5448 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5449 			/* It needs to be tossed */
5450 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5451 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5452 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5453 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5454 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5455 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5456 			}
5457 			asoc->size_on_reasm_queue -= chk->send_size;
5458 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5459 
5460 			/* Clear up any stream problem */
5461 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5462 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5463 				/*
5464 				 * We must dump forward this streams
5465 				 * sequence number if the chunk is not
5466 				 * unordered that is being skipped. There is
5467 				 * a chance that if the peer does not
5468 				 * include the last fragment in its FWD-TSN
5469 				 * we WILL have a problem here since you
5470 				 * would have a partial chunk in queue that
5471 				 * may not be deliverable. Also if a Partial
5472 				 * delivery API as started the user may get
5473 				 * a partial chunk. The next read returning
5474 				 * a new chunk... really ugly but I see no
5475 				 * way around it! Maybe a notify??
5476 				 */
5477 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5478 			}
5479 			if (chk->data) {
5480 				sctp_m_freem(chk->data);
5481 				chk->data = NULL;
5482 			}
5483 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5484 		} else {
5485 			/*
5486 			 * Ok we have gone beyond the end of the fwd-tsn's
5487 			 * mark.
5488 			 */
5489 			break;
5490 		}
5491 	}
5492 	/*******************************************************/
5493 	/* 3. Update the PR-stream re-ordering queues and fix  */
5494 	/* delivery issues as needed.                       */
5495 	/*******************************************************/
5496 	fwd_sz -= sizeof(*fwd);
5497 	if (m && fwd_sz) {
5498 		/* New method. */
5499 		unsigned int num_str;
5500 		struct sctp_strseq *stseq, strseqbuf;
5501 
5502 		offset += sizeof(*fwd);
5503 
5504 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5505 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5506 		for (i = 0; i < num_str; i++) {
5507 			uint16_t st;
5508 
5509 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5510 			    sizeof(struct sctp_strseq),
5511 			    (uint8_t *) & strseqbuf);
5512 			offset += sizeof(struct sctp_strseq);
5513 			if (stseq == NULL) {
5514 				break;
5515 			}
5516 			/* Convert */
5517 			st = ntohs(stseq->stream);
5518 			stseq->stream = st;
5519 			st = ntohs(stseq->sequence);
5520 			stseq->sequence = st;
5521 
5522 			/* now process */
5523 
5524 			/*
5525 			 * Ok we now look for the stream/seq on the read
5526 			 * queue where its not all delivered. If we find it
5527 			 * we transmute the read entry into a PDI_ABORTED.
5528 			 */
5529 			if (stseq->stream >= asoc->streamincnt) {
5530 				/* screwed up streams, stop!  */
5531 				break;
5532 			}
5533 			if ((asoc->str_of_pdapi == stseq->stream) &&
5534 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5535 				/*
5536 				 * If this is the one we were partially
5537 				 * delivering now then we no longer are.
5538 				 * Note this will change with the reassembly
5539 				 * re-write.
5540 				 */
5541 				asoc->fragmented_delivery_inprogress = 0;
5542 			}
5543 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5544 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5545 				if ((ctl->sinfo_stream == stseq->stream) &&
5546 				    (ctl->sinfo_ssn == stseq->sequence)) {
5547 					str_seq = (stseq->stream << 16) | stseq->sequence;
5548 					ctl->end_added = 1;
5549 					ctl->pdapi_aborted = 1;
5550 					sv = stcb->asoc.control_pdapi;
5551 					stcb->asoc.control_pdapi = ctl;
5552 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5553 					    stcb,
5554 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5555 					    (void *)&str_seq,
5556 					    SCTP_SO_NOT_LOCKED);
5557 					stcb->asoc.control_pdapi = sv;
5558 					break;
5559 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5560 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5561 					/* We are past our victim SSN */
5562 					break;
5563 				}
5564 			}
5565 			strm = &asoc->strmin[stseq->stream];
5566 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5567 				/* Update the sequence number */
5568 				strm->last_sequence_delivered = stseq->sequence;
5569 			}
5570 			/* now kick the stream the new way */
5571 			/* sa_ignore NO_NULL_CHK */
5572 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5573 		}
5574 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5575 	}
5576 	/*
5577 	 * Now slide thing forward.
5578 	 */
5579 	sctp_slide_mapping_arrays(stcb);
5580 
5581 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5582 		/* now lets kick out and check for more fragmented delivery */
5583 		/* sa_ignore NO_NULL_CHK */
5584 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5585 	}
5586 }
5587